1 /*
   2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #if !defined(__clang_major__) && defined(__GNUC__)
  26 // FIXME, formats have issues.  Disable this macro definition, compile, and study warnings for more information.
  27 #define ATTRIBUTE_PRINTF(x,y)
  28 #endif
  29 
  30 #include "precompiled.hpp"
  31 #include "classfile/metadataOnStackMark.hpp"
  32 #include "classfile/stringTable.hpp"
  33 #include "code/codeCache.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  36 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  37 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  38 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  39 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  40 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  41 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  42 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  43 #include "gc_implementation/g1/g1EvacFailure.hpp"
  44 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  45 #include "gc_implementation/g1/g1Log.hpp"
  46 #include "gc_implementation/g1/g1MarkSweep.hpp"
  47 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  48 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
  49 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
  50 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  51 #include "gc_implementation/g1/g1StringDedup.hpp"
  52 #include "gc_implementation/g1/g1YCTypes.hpp"
  53 #include "gc_implementation/g1/heapRegion.inline.hpp"
  54 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  55 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
  56 #include "gc_implementation/g1/vm_operations_g1.hpp"
  57 #include "gc_implementation/shared/gcHeapSummary.hpp"
  58 #include "gc_implementation/shared/gcTimer.hpp"
  59 #include "gc_implementation/shared/gcTrace.hpp"
  60 #include "gc_implementation/shared/gcTraceTime.hpp"
  61 #include "gc_implementation/shared/isGCActiveMark.hpp"
  62 #include "memory/allocation.hpp"
  63 #include "memory/gcLocker.inline.hpp"
  64 #include "memory/generationSpec.hpp"
  65 #include "memory/iterator.hpp"
  66 #include "memory/referenceProcessor.hpp"
  67 #include "oops/oop.inline.hpp"
  68 #include "oops/oop.pcgc.inline.hpp"
  69 #include "runtime/atomic.inline.hpp"
  70 #include "runtime/orderAccess.inline.hpp"
  71 #include "runtime/vmThread.hpp"
  72 #include "utilities/globalDefinitions.hpp"
  73 
  74 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  75 
  76 // turn it on so that the contents of the young list (scan-only /
  77 // to-be-collected) are printed at "strategic" points before / during
  78 // / after the collection --- this is useful for debugging
  79 #define YOUNG_LIST_VERBOSE 0
  80 // CURRENT STATUS
  81 // This file is under construction.  Search for "FIXME".
  82 
  83 // INVARIANTS/NOTES
  84 //
  85 // All allocation activity covered by the G1CollectedHeap interface is
  86 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  87 // and allocate_new_tlab, which are the "entry" points to the
  88 // allocation code from the rest of the JVM.  (Note that this does not
  89 // apply to TLAB allocation, which is not part of this interface: it
  90 // is done by clients of this interface.)
  91 
  92 // Notes on implementation of parallelism in different tasks.
  93 //
  94 // G1ParVerifyTask uses heap_region_par_iterate() for parallelism.
  95 // The number of GC workers is passed to heap_region_par_iterate().
  96 // It does use run_task() which sets _n_workers in the task.
  97 // G1ParTask executes g1_process_roots() ->
  98 // SharedHeap::process_roots() which calls eventually to
  99 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
 100 // SequentialSubTasksDone.  SharedHeap::process_roots() also
 101 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
 102 //
 103 
 104 // Local to this file.
 105 
 106 class RefineCardTableEntryClosure: public CardTableEntryClosure {
 107   bool _concurrent;
 108 public:
 109   RefineCardTableEntryClosure() : _concurrent(true) { }
 110 
 111   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 112     bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false);
 113     // This path is executed by the concurrent refine or mutator threads,
 114     // concurrently, and so we do not care if card_ptr contains references
 115     // that point into the collection set.
 116     assert(!oops_into_cset, "should be");
 117 
 118     if (_concurrent && SuspendibleThreadSet::should_yield()) {
 119       // Caller will actually yield.
 120       return false;
 121     }
 122     // Otherwise, we finished successfully; return true.
 123     return true;
 124   }
 125 
 126   void set_concurrent(bool b) { _concurrent = b; }
 127 };
 128 
 129 
 130 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
 131  private:
 132   size_t _num_processed;
 133 
 134  public:
 135   RedirtyLoggedCardTableEntryClosure() : CardTableEntryClosure(), _num_processed(0) { }
 136 
 137   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 138     *card_ptr = CardTableModRefBS::dirty_card_val();
 139     _num_processed++;
 140     return true;
 141   }
 142 
 143   size_t num_processed() const { return _num_processed; }
 144 };
 145 
 146 YoungList::YoungList(G1CollectedHeap* g1h) :
 147     _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
 148     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
 149   guarantee(check_list_empty(false), "just making sure...");
 150 }
 151 
 152 void YoungList::push_region(HeapRegion *hr) {
 153   assert(!hr->is_young(), "should not already be young");
 154   assert(hr->get_next_young_region() == NULL, "cause it should!");
 155 
 156   hr->set_next_young_region(_head);
 157   _head = hr;
 158 
 159   _g1h->g1_policy()->set_region_eden(hr, (int) _length);
 160   ++_length;
 161 }
 162 
 163 void YoungList::add_survivor_region(HeapRegion* hr) {
 164   assert(hr->is_survivor(), "should be flagged as survivor region");
 165   assert(hr->get_next_young_region() == NULL, "cause it should!");
 166 
 167   hr->set_next_young_region(_survivor_head);
 168   if (_survivor_head == NULL) {
 169     _survivor_tail = hr;
 170   }
 171   _survivor_head = hr;
 172   ++_survivor_length;
 173 }
 174 
 175 void YoungList::empty_list(HeapRegion* list) {
 176   while (list != NULL) {
 177     HeapRegion* next = list->get_next_young_region();
 178     list->set_next_young_region(NULL);
 179     list->uninstall_surv_rate_group();
 180     // This is called before a Full GC and all the non-empty /
 181     // non-humongous regions at the end of the Full GC will end up as
 182     // old anyway.
 183     list->set_old();
 184     list = next;
 185   }
 186 }
 187 
 188 void YoungList::empty_list() {
 189   assert(check_list_well_formed(), "young list should be well formed");
 190 
 191   empty_list(_head);
 192   _head = NULL;
 193   _length = 0;
 194 
 195   empty_list(_survivor_head);
 196   _survivor_head = NULL;
 197   _survivor_tail = NULL;
 198   _survivor_length = 0;
 199 
 200   _last_sampled_rs_lengths = 0;
 201 
 202   assert(check_list_empty(false), "just making sure...");
 203 }
 204 
 205 bool YoungList::check_list_well_formed() {
 206   bool ret = true;
 207 
 208   uint length = 0;
 209   HeapRegion* curr = _head;
 210   HeapRegion* last = NULL;
 211   while (curr != NULL) {
 212     if (!curr->is_young()) {
 213       gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
 214                              "incorrectly tagged (y: %d, surv: %d)",
 215                              curr->bottom(), curr->end(),
 216                              curr->is_young(), curr->is_survivor());
 217       ret = false;
 218     }
 219     ++length;
 220     last = curr;
 221     curr = curr->get_next_young_region();
 222   }
 223   ret = ret && (length == _length);
 224 
 225   if (!ret) {
 226     gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
 227     gclog_or_tty->print_cr("###   list has %u entries, _length is %u",
 228                            length, _length);
 229   }
 230 
 231   return ret;
 232 }
 233 
 234 bool YoungList::check_list_empty(bool check_sample) {
 235   bool ret = true;
 236 
 237   if (_length != 0) {
 238     gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
 239                   _length);
 240     ret = false;
 241   }
 242   if (check_sample && _last_sampled_rs_lengths != 0) {
 243     gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
 244     ret = false;
 245   }
 246   if (_head != NULL) {
 247     gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
 248     ret = false;
 249   }
 250   if (!ret) {
 251     gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
 252   }
 253 
 254   return ret;
 255 }
 256 
 257 void
 258 YoungList::rs_length_sampling_init() {
 259   _sampled_rs_lengths = 0;
 260   _curr               = _head;
 261 }
 262 
 263 bool
 264 YoungList::rs_length_sampling_more() {
 265   return _curr != NULL;
 266 }
 267 
 268 void
 269 YoungList::rs_length_sampling_next() {
 270   assert( _curr != NULL, "invariant" );
 271   size_t rs_length = _curr->rem_set()->occupied();
 272 
 273   _sampled_rs_lengths += rs_length;
 274 
 275   // The current region may not yet have been added to the
 276   // incremental collection set (it gets added when it is
 277   // retired as the current allocation region).
 278   if (_curr->in_collection_set()) {
 279     // Update the collection set policy information for this region
 280     _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
 281   }
 282 
 283   _curr = _curr->get_next_young_region();
 284   if (_curr == NULL) {
 285     _last_sampled_rs_lengths = _sampled_rs_lengths;
 286     // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
 287   }
 288 }
 289 
 290 void
 291 YoungList::reset_auxilary_lists() {
 292   guarantee( is_empty(), "young list should be empty" );
 293   assert(check_list_well_formed(), "young list should be well formed");
 294 
 295   // Add survivor regions to SurvRateGroup.
 296   _g1h->g1_policy()->note_start_adding_survivor_regions();
 297   _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
 298 
 299   int young_index_in_cset = 0;
 300   for (HeapRegion* curr = _survivor_head;
 301        curr != NULL;
 302        curr = curr->get_next_young_region()) {
 303     _g1h->g1_policy()->set_region_survivor(curr, young_index_in_cset);
 304 
 305     // The region is a non-empty survivor so let's add it to
 306     // the incremental collection set for the next evacuation
 307     // pause.
 308     _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
 309     young_index_in_cset += 1;
 310   }
 311   assert((uint) young_index_in_cset == _survivor_length, "post-condition");
 312   _g1h->g1_policy()->note_stop_adding_survivor_regions();
 313 
 314   _head   = _survivor_head;
 315   _length = _survivor_length;
 316   if (_survivor_head != NULL) {
 317     assert(_survivor_tail != NULL, "cause it shouldn't be");
 318     assert(_survivor_length > 0, "invariant");
 319     _survivor_tail->set_next_young_region(NULL);
 320   }
 321 
 322   // Don't clear the survivor list handles until the start of
 323   // the next evacuation pause - we need it in order to re-tag
 324   // the survivor regions from this evacuation pause as 'young'
 325   // at the start of the next.
 326 
 327   _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
 328 
 329   assert(check_list_well_formed(), "young list should be well formed");
 330 }
 331 
 332 void YoungList::print() {
 333   HeapRegion* lists[] = {_head,   _survivor_head};
 334   const char* names[] = {"YOUNG", "SURVIVOR"};
 335 
 336   for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
 337     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
 338     HeapRegion *curr = lists[list];
 339     if (curr == NULL)
 340       gclog_or_tty->print_cr("  empty");
 341     while (curr != NULL) {
 342       gclog_or_tty->print_cr("  "HR_FORMAT", P: "PTR_FORMAT ", N: "PTR_FORMAT", age: %4d",
 343                              HR_FORMAT_PARAMS(curr),
 344                              curr->prev_top_at_mark_start(),
 345                              curr->next_top_at_mark_start(),
 346                              curr->age_in_surv_rate_group_cond());
 347       curr = curr->get_next_young_region();
 348     }
 349   }
 350 
 351   gclog_or_tty->cr();
 352 }
 353 
 354 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
 355   HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
 356 }
 357 
 358 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
 359   // The from card cache is not the memory that is actually committed. So we cannot
 360   // take advantage of the zero_filled parameter.
 361   reset_from_card_cache(start_idx, num_regions);
 362 }
 363 
 364 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
 365 {
 366   // Claim the right to put the region on the dirty cards region list
 367   // by installing a self pointer.
 368   HeapRegion* next = hr->get_next_dirty_cards_region();
 369   if (next == NULL) {
 370     HeapRegion* res = (HeapRegion*)
 371       Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
 372                           NULL);
 373     if (res == NULL) {
 374       HeapRegion* head;
 375       do {
 376         // Put the region to the dirty cards region list.
 377         head = _dirty_cards_region_list;
 378         next = (HeapRegion*)
 379           Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
 380         if (next == head) {
 381           assert(hr->get_next_dirty_cards_region() == hr,
 382                  "hr->get_next_dirty_cards_region() != hr");
 383           if (next == NULL) {
 384             // The last region in the list points to itself.
 385             hr->set_next_dirty_cards_region(hr);
 386           } else {
 387             hr->set_next_dirty_cards_region(next);
 388           }
 389         }
 390       } while (next != head);
 391     }
 392   }
 393 }
 394 
 395 HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
 396 {
 397   HeapRegion* head;
 398   HeapRegion* hr;
 399   do {
 400     head = _dirty_cards_region_list;
 401     if (head == NULL) {
 402       return NULL;
 403     }
 404     HeapRegion* new_head = head->get_next_dirty_cards_region();
 405     if (head == new_head) {
 406       // The last region.
 407       new_head = NULL;
 408     }
 409     hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
 410                                           head);
 411   } while (hr != head);
 412   assert(hr != NULL, "invariant");
 413   hr->set_next_dirty_cards_region(NULL);
 414   return hr;
 415 }
 416 
 417 #ifdef ASSERT
 418 // A region is added to the collection set as it is retired
 419 // so an address p can point to a region which will be in the
 420 // collection set but has not yet been retired.  This method
 421 // therefore is only accurate during a GC pause after all
 422 // regions have been retired.  It is used for debugging
 423 // to check if an nmethod has references to objects that can
 424 // be move during a partial collection.  Though it can be
 425 // inaccurate, it is sufficient for G1 because the conservative
 426 // implementation of is_scavengable() for G1 will indicate that
 427 // all nmethods must be scanned during a partial collection.
 428 bool G1CollectedHeap::is_in_partial_collection(const void* p) {
 429   if (p == NULL) {
 430     return false;
 431   }
 432   return heap_region_containing(p)->in_collection_set();
 433 }
 434 #endif
 435 
 436 // Returns true if the reference points to an object that
 437 // can move in an incremental collection.
 438 bool G1CollectedHeap::is_scavengable(const void* p) {
 439   HeapRegion* hr = heap_region_containing(p);
 440   return !hr->is_humongous();
 441 }
 442 
 443 // Private class members.
 444 
 445 G1CollectedHeap* G1CollectedHeap::_g1h;
 446 
 447 // Private methods.
 448 
 449 HeapRegion*
 450 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 451   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 452   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 453     if (!_secondary_free_list.is_empty()) {
 454       if (G1ConcRegionFreeingVerbose) {
 455         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 456                                "secondary_free_list has %u entries",
 457                                _secondary_free_list.length());
 458       }
 459       // It looks as if there are free regions available on the
 460       // secondary_free_list. Let's move them to the free_list and try
 461       // again to allocate from it.
 462       append_secondary_free_list();
 463 
 464       assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
 465              "empty we should have moved at least one entry to the free_list");
 466       HeapRegion* res = _hrm.allocate_free_region(is_old);
 467       if (G1ConcRegionFreeingVerbose) {
 468         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 469                                "allocated "HR_FORMAT" from secondary_free_list",
 470                                HR_FORMAT_PARAMS(res));
 471       }
 472       return res;
 473     }
 474 
 475     // Wait here until we get notified either when (a) there are no
 476     // more free regions coming or (b) some regions have been moved on
 477     // the secondary_free_list.
 478     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
 479   }
 480 
 481   if (G1ConcRegionFreeingVerbose) {
 482     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 483                            "could not allocate from secondary_free_list");
 484   }
 485   return NULL;
 486 }
 487 
 488 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
 489   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
 490          "the only time we use this to allocate a humongous region is "
 491          "when we are allocating a single humongous region");
 492 
 493   HeapRegion* res;
 494   if (G1StressConcRegionFreeing) {
 495     if (!_secondary_free_list.is_empty()) {
 496       if (G1ConcRegionFreeingVerbose) {
 497         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 498                                "forced to look at the secondary_free_list");
 499       }
 500       res = new_region_try_secondary_free_list(is_old);
 501       if (res != NULL) {
 502         return res;
 503       }
 504     }
 505   }
 506 
 507   res = _hrm.allocate_free_region(is_old);
 508 
 509   if (res == NULL) {
 510     if (G1ConcRegionFreeingVerbose) {
 511       gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 512                              "res == NULL, trying the secondary_free_list");
 513     }
 514     res = new_region_try_secondary_free_list(is_old);
 515   }
 516   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
 517     // Currently, only attempts to allocate GC alloc regions set
 518     // do_expand to true. So, we should only reach here during a
 519     // safepoint. If this assumption changes we might have to
 520     // reconsider the use of _expand_heap_after_alloc_failure.
 521     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 522 
 523     ergo_verbose1(ErgoHeapSizing,
 524                   "attempt heap expansion",
 525                   ergo_format_reason("region allocation request failed")
 526                   ergo_format_byte("allocation request"),
 527                   word_size * HeapWordSize);
 528     if (expand(word_size * HeapWordSize)) {
 529       // Given that expand() succeeded in expanding the heap, and we
 530       // always expand the heap by an amount aligned to the heap
 531       // region size, the free list should in theory not be empty.
 532       // In either case allocate_free_region() will check for NULL.
 533       res = _hrm.allocate_free_region(is_old);
 534     } else {
 535       _expand_heap_after_alloc_failure = false;
 536     }
 537   }
 538   return res;
 539 }
 540 
 541 HeapWord*
 542 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
 543                                                            uint num_regions,
 544                                                            size_t word_size,
 545                                                            AllocationContext_t context) {
 546   assert(first != G1_NO_HRM_INDEX, "pre-condition");
 547   assert(is_humongous(word_size), "word_size should be humongous");
 548   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 549 
 550   // Index of last region in the series + 1.
 551   uint last = first + num_regions;
 552 
 553   // We need to initialize the region(s) we just discovered. This is
 554   // a bit tricky given that it can happen concurrently with
 555   // refinement threads refining cards on these regions and
 556   // potentially wanting to refine the BOT as they are scanning
 557   // those cards (this can happen shortly after a cleanup; see CR
 558   // 6991377). So we have to set up the region(s) carefully and in
 559   // a specific order.
 560 
 561   // The word size sum of all the regions we will allocate.
 562   size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
 563   assert(word_size <= word_size_sum, "sanity");
 564 
 565   // This will be the "starts humongous" region.
 566   HeapRegion* first_hr = region_at(first);
 567   // The header of the new object will be placed at the bottom of
 568   // the first region.
 569   HeapWord* new_obj = first_hr->bottom();
 570   // This will be the new end of the first region in the series that
 571   // should also match the end of the last region in the series.
 572   HeapWord* new_end = new_obj + word_size_sum;
 573   // This will be the new top of the first region that will reflect
 574   // this allocation.
 575   HeapWord* new_top = new_obj + word_size;
 576 
 577   // First, we need to zero the header of the space that we will be
 578   // allocating. When we update top further down, some refinement
 579   // threads might try to scan the region. By zeroing the header we
 580   // ensure that any thread that will try to scan the region will
 581   // come across the zero klass word and bail out.
 582   //
 583   // NOTE: It would not have been correct to have used
 584   // CollectedHeap::fill_with_object() and make the space look like
 585   // an int array. The thread that is doing the allocation will
 586   // later update the object header to a potentially different array
 587   // type and, for a very short period of time, the klass and length
 588   // fields will be inconsistent. This could cause a refinement
 589   // thread to calculate the object size incorrectly.
 590   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
 591 
 592   // We will set up the first region as "starts humongous". This
 593   // will also update the BOT covering all the regions to reflect
 594   // that there is a single object that starts at the bottom of the
 595   // first region.
 596   first_hr->set_starts_humongous(new_top, new_end);
 597   first_hr->set_allocation_context(context);
 598   // Then, if there are any, we will set up the "continues
 599   // humongous" regions.
 600   HeapRegion* hr = NULL;
 601   for (uint i = first + 1; i < last; ++i) {
 602     hr = region_at(i);
 603     hr->set_continues_humongous(first_hr);
 604     hr->set_allocation_context(context);
 605   }
 606   // If we have "continues humongous" regions (hr != NULL), then the
 607   // end of the last one should match new_end.
 608   assert(hr == NULL || hr->end() == new_end, "sanity");
 609 
 610   // Up to this point no concurrent thread would have been able to
 611   // do any scanning on any region in this series. All the top
 612   // fields still point to bottom, so the intersection between
 613   // [bottom,top] and [card_start,card_end] will be empty. Before we
 614   // update the top fields, we'll do a storestore to make sure that
 615   // no thread sees the update to top before the zeroing of the
 616   // object header and the BOT initialization.
 617   OrderAccess::storestore();
 618 
 619   // Now that the BOT and the object header have been initialized,
 620   // we can update top of the "starts humongous" region.
 621   assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
 622          "new_top should be in this region");
 623   first_hr->set_top(new_top);
 624   if (_hr_printer.is_active()) {
 625     HeapWord* bottom = first_hr->bottom();
 626     HeapWord* end = first_hr->orig_end();
 627     if ((first + 1) == last) {
 628       // the series has a single humongous region
 629       _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);
 630     } else {
 631       // the series has more than one humongous regions
 632       _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);
 633     }
 634   }
 635 
 636   // Now, we will update the top fields of the "continues humongous"
 637   // regions. The reason we need to do this is that, otherwise,
 638   // these regions would look empty and this will confuse parts of
 639   // G1. For example, the code that looks for a consecutive number
 640   // of empty regions will consider them empty and try to
 641   // re-allocate them. We can extend is_empty() to also include
 642   // !is_continues_humongous(), but it is easier to just update the top
 643   // fields here. The way we set top for all regions (i.e., top ==
 644   // end for all regions but the last one, top == new_top for the
 645   // last one) is actually used when we will free up the humongous
 646   // region in free_humongous_region().
 647   hr = NULL;
 648   for (uint i = first + 1; i < last; ++i) {
 649     hr = region_at(i);
 650     if ((i + 1) == last) {
 651       // last continues humongous region
 652       assert(hr->bottom() < new_top && new_top <= hr->end(),
 653              "new_top should fall on this region");
 654       hr->set_top(new_top);
 655       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
 656     } else {
 657       // not last one
 658       assert(new_top > hr->end(), "new_top should be above this region");
 659       hr->set_top(hr->end());
 660       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
 661     }
 662   }
 663   // If we have continues humongous regions (hr != NULL), then the
 664   // end of the last one should match new_end and its top should
 665   // match new_top.
 666   assert(hr == NULL ||
 667          (hr->end() == new_end && hr->top() == new_top), "sanity");
 668   check_bitmaps("Humongous Region Allocation", first_hr);
 669 
 670   assert(first_hr->used() == word_size * HeapWordSize, "invariant");
 671   _allocator->increase_used(first_hr->used());
 672   _humongous_set.add(first_hr);
 673 
 674   return new_obj;
 675 }
 676 
 677 // If could fit into free regions w/o expansion, try.
 678 // Otherwise, if can expand, do so.
 679 // Otherwise, if using ex regions might help, try with ex given back.
 680 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
 681   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 682 
 683   verify_region_sets_optional();
 684 
 685   uint first = G1_NO_HRM_INDEX;
 686   uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
 687 
 688   if (obj_regions == 1) {
 689     // Only one region to allocate, try to use a fast path by directly allocating
 690     // from the free lists. Do not try to expand here, we will potentially do that
 691     // later.
 692     HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
 693     if (hr != NULL) {
 694       first = hr->hrm_index();
 695     }
 696   } else {
 697     // We can't allocate humongous regions spanning more than one region while
 698     // cleanupComplete() is running, since some of the regions we find to be
 699     // empty might not yet be added to the free list. It is not straightforward
 700     // to know in which list they are on so that we can remove them. We only
 701     // need to do this if we need to allocate more than one region to satisfy the
 702     // current humongous allocation request. If we are only allocating one region
 703     // we use the one-region region allocation code (see above), that already
 704     // potentially waits for regions from the secondary free list.
 705     wait_while_free_regions_coming();
 706     append_secondary_free_list_if_not_empty_with_lock();
 707 
 708     // Policy: Try only empty regions (i.e. already committed first). Maybe we
 709     // are lucky enough to find some.
 710     first = _hrm.find_contiguous_only_empty(obj_regions);
 711     if (first != G1_NO_HRM_INDEX) {
 712       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 713     }
 714   }
 715 
 716   if (first == G1_NO_HRM_INDEX) {
 717     // Policy: We could not find enough regions for the humongous object in the
 718     // free list. Look through the heap to find a mix of free and uncommitted regions.
 719     // If so, try expansion.
 720     first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
 721     if (first != G1_NO_HRM_INDEX) {
 722       // We found something. Make sure these regions are committed, i.e. expand
 723       // the heap. Alternatively we could do a defragmentation GC.
 724       ergo_verbose1(ErgoHeapSizing,
 725                     "attempt heap expansion",
 726                     ergo_format_reason("humongous allocation request failed")
 727                     ergo_format_byte("allocation request"),
 728                     word_size * HeapWordSize);
 729 
 730       _hrm.expand_at(first, obj_regions);
 731       g1_policy()->record_new_heap_size(num_regions());
 732 
 733 #ifdef ASSERT
 734       for (uint i = first; i < first + obj_regions; ++i) {
 735         HeapRegion* hr = region_at(i);
 736         assert(hr->is_free(), "sanity");
 737         assert(hr->is_empty(), "sanity");
 738         assert(is_on_master_free_list(hr), "sanity");
 739       }
 740 #endif
 741       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 742     } else {
 743       // Policy: Potentially trigger a defragmentation GC.
 744     }
 745   }
 746 
 747   HeapWord* result = NULL;
 748   if (first != G1_NO_HRM_INDEX) {
 749     result = humongous_obj_allocate_initialize_regions(first, obj_regions,
 750                                                        word_size, context);
 751     assert(result != NULL, "it should always return a valid result");
 752 
 753     // A successful humongous object allocation changes the used space
 754     // information of the old generation so we need to recalculate the
 755     // sizes and update the jstat counters here.
 756     g1mm()->update_sizes();
 757   }
 758 
 759   verify_region_sets_optional();
 760 
 761   return result;
 762 }
 763 
 764 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
 765   assert_heap_not_locked_and_not_at_safepoint();
 766   assert(!is_humongous(word_size), "we do not allow humongous TLABs");
 767 
 768   unsigned int dummy_gc_count_before;
 769   int dummy_gclocker_retry_count = 0;
 770   return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
 771 }
 772 
 773 HeapWord*
 774 G1CollectedHeap::mem_allocate(size_t word_size,
 775                               bool*  gc_overhead_limit_was_exceeded) {
 776   assert_heap_not_locked_and_not_at_safepoint();
 777 
 778   // Loop until the allocation is satisfied, or unsatisfied after GC.
 779   for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
 780     unsigned int gc_count_before;
 781 
 782     HeapWord* result = NULL;
 783     if (!is_humongous(word_size)) {
 784       result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
 785     } else {
 786       result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
 787     }
 788     if (result != NULL) {
 789       return result;
 790     }
 791 
 792     // Create the garbage collection operation...
 793     VM_G1CollectForAllocation op(gc_count_before, word_size);
 794     op.set_allocation_context(AllocationContext::current());
 795 
 796     // ...and get the VM thread to execute it.
 797     VMThread::execute(&op);
 798 
 799     if (op.prologue_succeeded() && op.pause_succeeded()) {
 800       // If the operation was successful we'll return the result even
 801       // if it is NULL. If the allocation attempt failed immediately
 802       // after a Full GC, it's unlikely we'll be able to allocate now.
 803       HeapWord* result = op.result();
 804       if (result != NULL && !is_humongous(word_size)) {
 805         // Allocations that take place on VM operations do not do any
 806         // card dirtying and we have to do it here. We only have to do
 807         // this for non-humongous allocations, though.
 808         dirty_young_block(result, word_size);
 809       }
 810       return result;
 811     } else {
 812       if (gclocker_retry_count > GCLockerRetryAllocationCount) {
 813         return NULL;
 814       }
 815       assert(op.result() == NULL,
 816              "the result should be NULL if the VM op did not succeed");
 817     }
 818 
 819     // Give a warning if we seem to be looping forever.
 820     if ((QueuedAllocationWarningCount > 0) &&
 821         (try_count % QueuedAllocationWarningCount == 0)) {
 822       warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
 823     }
 824   }
 825 
 826   ShouldNotReachHere();
 827   return NULL;
 828 }
 829 
 830 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
 831                                                    AllocationContext_t context,
 832                                                    unsigned int *gc_count_before_ret,
 833                                                    int* gclocker_retry_count_ret) {
 834   // Make sure you read the note in attempt_allocation_humongous().
 835 
 836   assert_heap_not_locked_and_not_at_safepoint();
 837   assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
 838          "be called for humongous allocation requests");
 839 
 840   // We should only get here after the first-level allocation attempt
 841   // (attempt_allocation()) failed to allocate.
 842 
 843   // We will loop until a) we manage to successfully perform the
 844   // allocation or b) we successfully schedule a collection which
 845   // fails to perform the allocation. b) is the only case when we'll
 846   // return NULL.
 847   HeapWord* result = NULL;
 848   for (int try_count = 1; /* we'll return */; try_count += 1) {
 849     bool should_try_gc;
 850     unsigned int gc_count_before;
 851 
 852     {
 853       MutexLockerEx x(Heap_lock);
 854       result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
 855                                                                                     false /* bot_updates */);
 856       if (result != NULL) {
 857         return result;
 858       }
 859 
 860       // If we reach here, attempt_allocation_locked() above failed to
 861       // allocate a new region. So the mutator alloc region should be NULL.
 862       assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");
 863 
 864       if (GC_locker::is_active_and_needs_gc()) {
 865         if (g1_policy()->can_expand_young_list()) {
 866           // No need for an ergo verbose message here,
 867           // can_expand_young_list() does this when it returns true.
 868           result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size,
 869                                                                                        false /* bot_updates */);
 870           if (result != NULL) {
 871             return result;
 872           }
 873         }
 874         should_try_gc = false;
 875       } else {
 876         // The GCLocker may not be active but the GCLocker initiated
 877         // GC may not yet have been performed (GCLocker::needs_gc()
 878         // returns true). In this case we do not try this GC and
 879         // wait until the GCLocker initiated GC is performed, and
 880         // then retry the allocation.
 881         if (GC_locker::needs_gc()) {
 882           should_try_gc = false;
 883         } else {
 884           // Read the GC count while still holding the Heap_lock.
 885           gc_count_before = total_collections();
 886           should_try_gc = true;
 887         }
 888       }
 889     }
 890 
 891     if (should_try_gc) {
 892       bool succeeded;
 893       result = do_collection_pause(word_size, gc_count_before, &succeeded,
 894           GCCause::_g1_inc_collection_pause);
 895       if (result != NULL) {
 896         assert(succeeded, "only way to get back a non-NULL result");
 897         return result;
 898       }
 899 
 900       if (succeeded) {
 901         // If we get here we successfully scheduled a collection which
 902         // failed to allocate. No point in trying to allocate
 903         // further. We'll just return NULL.
 904         MutexLockerEx x(Heap_lock);
 905         *gc_count_before_ret = total_collections();
 906         return NULL;
 907       }
 908     } else {
 909       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
 910         MutexLockerEx x(Heap_lock);
 911         *gc_count_before_ret = total_collections();
 912         return NULL;
 913       }
 914       // The GCLocker is either active or the GCLocker initiated
 915       // GC has not yet been performed. Stall until it is and
 916       // then retry the allocation.
 917       GC_locker::stall_until_clear();
 918       (*gclocker_retry_count_ret) += 1;
 919     }
 920 
 921     // We can reach here if we were unsuccessful in scheduling a
 922     // collection (because another thread beat us to it) or if we were
 923     // stalled due to the GC locker. In either can we should retry the
 924     // allocation attempt in case another thread successfully
 925     // performed a collection and reclaimed enough space. We do the
 926     // first attempt (without holding the Heap_lock) here and the
 927     // follow-on attempt will be at the start of the next loop
 928     // iteration (after taking the Heap_lock).
 929     result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
 930                                                                            false /* bot_updates */);
 931     if (result != NULL) {
 932       return result;
 933     }
 934 
 935     // Give a warning if we seem to be looping forever.
 936     if ((QueuedAllocationWarningCount > 0) &&
 937         (try_count % QueuedAllocationWarningCount == 0)) {
 938       warning("G1CollectedHeap::attempt_allocation_slow() "
 939               "retries %d times", try_count);
 940     }
 941   }
 942 
 943   ShouldNotReachHere();
 944   return NULL;
 945 }
 946 
 947 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
 948                                                         unsigned int * gc_count_before_ret,
 949                                                         int* gclocker_retry_count_ret) {
 950   // The structure of this method has a lot of similarities to
 951   // attempt_allocation_slow(). The reason these two were not merged
 952   // into a single one is that such a method would require several "if
 953   // allocation is not humongous do this, otherwise do that"
 954   // conditional paths which would obscure its flow. In fact, an early
 955   // version of this code did use a unified method which was harder to
 956   // follow and, as a result, it had subtle bugs that were hard to
 957   // track down. So keeping these two methods separate allows each to
 958   // be more readable. It will be good to keep these two in sync as
 959   // much as possible.
 960 
 961   assert_heap_not_locked_and_not_at_safepoint();
 962   assert(is_humongous(word_size), "attempt_allocation_humongous() "
 963          "should only be called for humongous allocations");
 964 
 965   // Humongous objects can exhaust the heap quickly, so we should check if we
 966   // need to start a marking cycle at each humongous object allocation. We do
 967   // the check before we do the actual allocation. The reason for doing it
 968   // before the allocation is that we avoid having to keep track of the newly
 969   // allocated memory while we do a GC.
 970   if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
 971                                            word_size)) {
 972     collect(GCCause::_g1_humongous_allocation);
 973   }
 974 
 975   // We will loop until a) we manage to successfully perform the
 976   // allocation or b) we successfully schedule a collection which
 977   // fails to perform the allocation. b) is the only case when we'll
 978   // return NULL.
 979   HeapWord* result = NULL;
 980   for (int try_count = 1; /* we'll return */; try_count += 1) {
 981     bool should_try_gc;
 982     unsigned int gc_count_before;
 983 
 984     {
 985       MutexLockerEx x(Heap_lock);
 986 
 987       // Given that humongous objects are not allocated in young
 988       // regions, we'll first try to do the allocation without doing a
 989       // collection hoping that there's enough space in the heap.
 990       result = humongous_obj_allocate(word_size, AllocationContext::current());
 991       if (result != NULL) {
 992         return result;
 993       }
 994 
 995       if (GC_locker::is_active_and_needs_gc()) {
 996         should_try_gc = false;
 997       } else {
 998          // The GCLocker may not be active but the GCLocker initiated
 999         // GC may not yet have been performed (GCLocker::needs_gc()
1000         // returns true). In this case we do not try this GC and
1001         // wait until the GCLocker initiated GC is performed, and
1002         // then retry the allocation.
1003         if (GC_locker::needs_gc()) {
1004           should_try_gc = false;
1005         } else {
1006           // Read the GC count while still holding the Heap_lock.
1007           gc_count_before = total_collections();
1008           should_try_gc = true;
1009         }
1010       }
1011     }
1012 
1013     if (should_try_gc) {
1014       // If we failed to allocate the humongous object, we should try to
1015       // do a collection pause (if we're allowed) in case it reclaims
1016       // enough space for the allocation to succeed after the pause.
1017 
1018       bool succeeded;
1019       result = do_collection_pause(word_size, gc_count_before, &succeeded,
1020           GCCause::_g1_humongous_allocation);
1021       if (result != NULL) {
1022         assert(succeeded, "only way to get back a non-NULL result");
1023         return result;
1024       }
1025 
1026       if (succeeded) {
1027         // If we get here we successfully scheduled a collection which
1028         // failed to allocate. No point in trying to allocate
1029         // further. We'll just return NULL.
1030         MutexLockerEx x(Heap_lock);
1031         *gc_count_before_ret = total_collections();
1032         return NULL;
1033       }
1034     } else {
1035       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
1036         MutexLockerEx x(Heap_lock);
1037         *gc_count_before_ret = total_collections();
1038         return NULL;
1039       }
1040       // The GCLocker is either active or the GCLocker initiated
1041       // GC has not yet been performed. Stall until it is and
1042       // then retry the allocation.
1043       GC_locker::stall_until_clear();
1044       (*gclocker_retry_count_ret) += 1;
1045     }
1046 
1047     // We can reach here if we were unsuccessful in scheduling a
1048     // collection (because another thread beat us to it) or if we were
1049     // stalled due to the GC locker. In either can we should retry the
1050     // allocation attempt in case another thread successfully
1051     // performed a collection and reclaimed enough space.  Give a
1052     // warning if we seem to be looping forever.
1053 
1054     if ((QueuedAllocationWarningCount > 0) &&
1055         (try_count % QueuedAllocationWarningCount == 0)) {
1056       warning("G1CollectedHeap::attempt_allocation_humongous() "
1057               "retries %d times", try_count);
1058     }
1059   }
1060 
1061   ShouldNotReachHere();
1062   return NULL;
1063 }
1064 
1065 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1066                                                            AllocationContext_t context,
1067                                                            bool expect_null_mutator_alloc_region) {
1068   assert_at_safepoint(true /* should_be_vm_thread */);
1069   assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
1070                                              !expect_null_mutator_alloc_region,
1071          "the current alloc region was unexpectedly found to be non-NULL");
1072 
1073   if (!is_humongous(word_size)) {
1074     return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
1075                                                       false /* bot_updates */);
1076   } else {
1077     HeapWord* result = humongous_obj_allocate(word_size, context);
1078     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1079       g1_policy()->set_initiate_conc_mark_if_possible();
1080     }
1081     return result;
1082   }
1083 
1084   ShouldNotReachHere();
1085 }
1086 
1087 class PostMCRemSetClearClosure: public HeapRegionClosure {
1088   G1CollectedHeap* _g1h;
1089   ModRefBarrierSet* _mr_bs;
1090 public:
1091   PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1092     _g1h(g1h), _mr_bs(mr_bs) {}
1093 
1094   bool doHeapRegion(HeapRegion* r) {
1095     HeapRegionRemSet* hrrs = r->rem_set();
1096 
1097     if (r->is_continues_humongous()) {
1098       // We'll assert that the strong code root list and RSet is empty
1099       assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
1100       assert(hrrs->occupied() == 0, "RSet should be empty");
1101       return false;
1102     }
1103 
1104     _g1h->reset_gc_time_stamps(r);
1105     hrrs->clear();
1106     // You might think here that we could clear just the cards
1107     // corresponding to the used region.  But no: if we leave a dirty card
1108     // in a region we might allocate into, then it would prevent that card
1109     // from being enqueued, and cause it to be missed.
1110     // Re: the performance cost: we shouldn't be doing full GC anyway!
1111     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1112 
1113     return false;
1114   }
1115 };
1116 
1117 void G1CollectedHeap::clear_rsets_post_compaction() {
1118   PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
1119   heap_region_iterate(&rs_clear);
1120 }
1121 
1122 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1123   G1CollectedHeap*   _g1h;
1124   UpdateRSOopClosure _cl;
1125   int                _worker_i;
1126 public:
1127   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
1128     _cl(g1->g1_rem_set(), worker_i),
1129     _worker_i(worker_i),
1130     _g1h(g1)
1131   { }
1132 
1133   bool doHeapRegion(HeapRegion* r) {
1134     if (!r->is_continues_humongous()) {
1135       _cl.set_from(r);
1136       r->oop_iterate(&_cl);
1137     }
1138     return false;
1139   }
1140 };
1141 
1142 class ParRebuildRSTask: public AbstractGangTask {
1143   G1CollectedHeap* _g1;
1144   HeapRegionClaimer _hrclaimer;
1145 
1146 public:
1147   ParRebuildRSTask(G1CollectedHeap* g1) :
1148       AbstractGangTask("ParRebuildRSTask"), _g1(g1), _hrclaimer(g1->workers()->active_workers()) {}
1149 
1150   void work(uint worker_id) {
1151     RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
1152     _g1->heap_region_par_iterate(&rebuild_rs, worker_id, &_hrclaimer);
1153   }
1154 };
1155 
1156 class PostCompactionPrinterClosure: public HeapRegionClosure {
1157 private:
1158   G1HRPrinter* _hr_printer;
1159 public:
1160   bool doHeapRegion(HeapRegion* hr) {
1161     assert(!hr->is_young(), "not expecting to find young regions");
1162     if (hr->is_free()) {
1163       // We only generate output for non-empty regions.
1164     } else if (hr->is_starts_humongous()) {
1165       if (hr->region_num() == 1) {
1166         // single humongous region
1167         _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
1168       } else {
1169         _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1170       }
1171     } else if (hr->is_continues_humongous()) {
1172       _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1173     } else if (hr->is_old()) {
1174       _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1175     } else {
1176       ShouldNotReachHere();
1177     }
1178     return false;
1179   }
1180 
1181   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1182     : _hr_printer(hr_printer) { }
1183 };
1184 
1185 void G1CollectedHeap::print_hrm_post_compaction() {
1186   PostCompactionPrinterClosure cl(hr_printer());
1187   heap_region_iterate(&cl);
1188 }
1189 
1190 bool G1CollectedHeap::do_collection(bool explicit_gc,
1191                                     bool clear_all_soft_refs,
1192                                     size_t word_size) {
1193   assert_at_safepoint(true /* should_be_vm_thread */);
1194 
1195   if (GC_locker::check_active_before_gc()) {
1196     return false;
1197   }
1198 
1199   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1200   gc_timer->register_gc_start();
1201 
1202   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1203   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1204 
1205   SvcGCMarker sgcm(SvcGCMarker::FULL);
1206   ResourceMark rm;
1207 
1208   print_heap_before_gc();
1209   trace_heap_before_gc(gc_tracer);
1210 
1211   size_t metadata_prev_used = MetaspaceAux::used_bytes();
1212 
1213   verify_region_sets_optional();
1214 
1215   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1216                            collector_policy()->should_clear_all_soft_refs();
1217 
1218   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1219 
1220   {
1221     IsGCActiveMark x;
1222 
1223     // Timing
1224     assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
1225     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1226 
1227     {
1228       GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id());
1229       TraceCollectorStats tcs(g1mm()->full_collection_counters());
1230       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1231 
1232       g1_policy()->record_full_collection_start();
1233 
1234       // Note: When we have a more flexible GC logging framework that
1235       // allows us to add optional attributes to a GC log record we
1236       // could consider timing and reporting how long we wait in the
1237       // following two methods.
1238       wait_while_free_regions_coming();
1239       // If we start the compaction before the CM threads finish
1240       // scanning the root regions we might trip them over as we'll
1241       // be moving objects / updating references. So let's wait until
1242       // they are done. By telling them to abort, they should complete
1243       // early.
1244       _cm->root_regions()->abort();
1245       _cm->root_regions()->wait_until_scan_finished();
1246       append_secondary_free_list_if_not_empty_with_lock();
1247 
1248       gc_prologue(true);
1249       increment_total_collections(true /* full gc */);
1250       increment_old_marking_cycles_started();
1251 
1252       assert(used() == recalculate_used(), "Should be equal");
1253 
1254       verify_before_gc();
1255 
1256       check_bitmaps("Full GC Start");
1257       pre_full_gc_dump(gc_timer);
1258 
1259       COMPILER2_PRESENT(DerivedPointerTable::clear());
1260 
1261       // Disable discovery and empty the discovered lists
1262       // for the CM ref processor.
1263       ref_processor_cm()->disable_discovery();
1264       ref_processor_cm()->abandon_partial_discovery();
1265       ref_processor_cm()->verify_no_references_recorded();
1266 
1267       // Abandon current iterations of concurrent marking and concurrent
1268       // refinement, if any are in progress. We have to do this before
1269       // wait_until_scan_finished() below.
1270       concurrent_mark()->abort();
1271 
1272       // Make sure we'll choose a new allocation region afterwards.
1273       _allocator->release_mutator_alloc_region();
1274       _allocator->abandon_gc_alloc_regions();
1275       g1_rem_set()->cleanupHRRS();
1276 
1277       // We should call this after we retire any currently active alloc
1278       // regions so that all the ALLOC / RETIRE events are generated
1279       // before the start GC event.
1280       _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1281 
1282       // We may have added regions to the current incremental collection
1283       // set between the last GC or pause and now. We need to clear the
1284       // incremental collection set and then start rebuilding it afresh
1285       // after this full GC.
1286       abandon_collection_set(g1_policy()->inc_cset_head());
1287       g1_policy()->clear_incremental_cset();
1288       g1_policy()->stop_incremental_cset_building();
1289 
1290       tear_down_region_sets(false /* free_list_only */);
1291       g1_policy()->set_gcs_are_young(true);
1292 
1293       // See the comments in g1CollectedHeap.hpp and
1294       // G1CollectedHeap::ref_processing_init() about
1295       // how reference processing currently works in G1.
1296 
1297       // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1298       ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1299 
1300       // Temporarily clear the STW ref processor's _is_alive_non_header field.
1301       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1302 
1303       ref_processor_stw()->enable_discovery();
1304       ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1305 
1306       // Do collection work
1307       {
1308         HandleMark hm;  // Discard invalid handles created during gc
1309         G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1310       }
1311 
1312       assert(num_free_regions() == 0, "we should not have added any free regions");
1313       rebuild_region_sets(false /* free_list_only */);
1314 
1315       // Enqueue any discovered reference objects that have
1316       // not been removed from the discovered lists.
1317       ref_processor_stw()->enqueue_discovered_references();
1318 
1319       COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1320 
1321       MemoryService::track_memory_usage();
1322 
1323       assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1324       ref_processor_stw()->verify_no_references_recorded();
1325 
1326       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1327       ClassLoaderDataGraph::purge();
1328       MetaspaceAux::verify_metrics();
1329 
1330       // Note: since we've just done a full GC, concurrent
1331       // marking is no longer active. Therefore we need not
1332       // re-enable reference discovery for the CM ref processor.
1333       // That will be done at the start of the next marking cycle.
1334       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1335       ref_processor_cm()->verify_no_references_recorded();
1336 
1337       reset_gc_time_stamp();
1338       // Since everything potentially moved, we will clear all remembered
1339       // sets, and clear all cards.  Later we will rebuild remembered
1340       // sets. We will also reset the GC time stamps of the regions.
1341       clear_rsets_post_compaction();
1342       check_gc_time_stamps();
1343 
1344       // Resize the heap if necessary.
1345       resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1346 
1347       if (_hr_printer.is_active()) {
1348         // We should do this after we potentially resize the heap so
1349         // that all the COMMIT / UNCOMMIT events are generated before
1350         // the end GC event.
1351 
1352         print_hrm_post_compaction();
1353         _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1354       }
1355 
1356       G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1357       if (hot_card_cache->use_cache()) {
1358         hot_card_cache->reset_card_counts();
1359         hot_card_cache->reset_hot_cache();
1360       }
1361 
1362       // Rebuild remembered sets of all regions.
1363       uint n_workers =
1364         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1365                                                 workers()->active_workers(),
1366                                                 Threads::number_of_non_daemon_threads());
1367       assert(UseDynamicNumberOfGCThreads ||
1368              n_workers == workers()->total_workers(),
1369              "If not dynamic should be using all the  workers");
1370       workers()->set_active_workers(n_workers);
1371       // Set parallel threads in the heap (_n_par_threads) only
1372       // before a parallel phase and always reset it to 0 after
1373       // the phase so that the number of parallel threads does
1374       // no get carried forward to a serial phase where there
1375       // may be code that is "possibly_parallel".
1376       set_par_threads(n_workers);
1377 
1378       ParRebuildRSTask rebuild_rs_task(this);
1379       assert(UseDynamicNumberOfGCThreads ||
1380              workers()->active_workers() == workers()->total_workers(),
1381              "Unless dynamic should use total workers");
1382       // Use the most recent number of  active workers
1383       assert(workers()->active_workers() > 0,
1384              "Active workers not properly set");
1385       set_par_threads(workers()->active_workers());
1386       workers()->run_task(&rebuild_rs_task);
1387       set_par_threads(0);
1388 
1389       // Rebuild the strong code root lists for each region
1390       rebuild_strong_code_roots();
1391 
1392       if (true) { // FIXME
1393         MetaspaceGC::compute_new_size();
1394       }
1395 
1396 #ifdef TRACESPINNING
1397       ParallelTaskTerminator::print_termination_counts();
1398 #endif
1399 
1400       // Discard all rset updates
1401       JavaThread::dirty_card_queue_set().abandon_logs();
1402       assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1403 
1404       _young_list->reset_sampled_info();
1405       // At this point there should be no regions in the
1406       // entire heap tagged as young.
1407       assert(check_young_list_empty(true /* check_heap */),
1408              "young list should be empty at this point");
1409 
1410       // Update the number of full collections that have been completed.
1411       increment_old_marking_cycles_completed(false /* concurrent */);
1412 
1413       _hrm.verify_optional();
1414       verify_region_sets_optional();
1415 
1416       verify_after_gc();
1417 
1418       // Clear the previous marking bitmap, if needed for bitmap verification.
1419       // Note we cannot do this when we clear the next marking bitmap in
1420       // ConcurrentMark::abort() above since VerifyDuringGC verifies the
1421       // objects marked during a full GC against the previous bitmap.
1422       // But we need to clear it before calling check_bitmaps below since
1423       // the full GC has compacted objects and updated TAMS but not updated
1424       // the prev bitmap.
1425       if (G1VerifyBitmaps) {
1426         ((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
1427       }
1428       check_bitmaps("Full GC End");
1429 
1430       // Start a new incremental collection set for the next pause
1431       assert(g1_policy()->collection_set() == NULL, "must be");
1432       g1_policy()->start_incremental_cset_building();
1433 
1434       clear_cset_fast_test();
1435 
1436       _allocator->init_mutator_alloc_region();
1437 
1438       g1_policy()->record_full_collection_end();
1439 
1440       if (G1Log::fine()) {
1441         g1_policy()->print_heap_transition();
1442       }
1443 
1444       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1445       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1446       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1447       // before any GC notifications are raised.
1448       g1mm()->update_sizes();
1449 
1450       gc_epilogue(true);
1451     }
1452 
1453     if (G1Log::finer()) {
1454       g1_policy()->print_detailed_heap_transition(true /* full */);
1455     }
1456 
1457     print_heap_after_gc();
1458     trace_heap_after_gc(gc_tracer);
1459 
1460     post_full_gc_dump(gc_timer);
1461 
1462     gc_timer->register_gc_end();
1463     gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1464   }
1465 
1466   return true;
1467 }
1468 
1469 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1470   // do_collection() will return whether it succeeded in performing
1471   // the GC. Currently, there is no facility on the
1472   // do_full_collection() API to notify the caller than the collection
1473   // did not succeed (e.g., because it was locked out by the GC
1474   // locker). So, right now, we'll ignore the return value.
1475   bool dummy = do_collection(true,                /* explicit_gc */
1476                              clear_all_soft_refs,
1477                              0                    /* word_size */);
1478 }
1479 
1480 // This code is mostly copied from TenuredGeneration.
1481 void
1482 G1CollectedHeap::
1483 resize_if_necessary_after_full_collection(size_t word_size) {
1484   // Include the current allocation, if any, and bytes that will be
1485   // pre-allocated to support collections, as "used".
1486   const size_t used_after_gc = used();
1487   const size_t capacity_after_gc = capacity();
1488   const size_t free_after_gc = capacity_after_gc - used_after_gc;
1489 
1490   // This is enforced in arguments.cpp.
1491   assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1492          "otherwise the code below doesn't make sense");
1493 
1494   // We don't have floating point command-line arguments
1495   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1496   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1497   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1498   const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1499 
1500   const size_t min_heap_size = collector_policy()->min_heap_byte_size();
1501   const size_t max_heap_size = collector_policy()->max_heap_byte_size();
1502 
1503   // We have to be careful here as these two calculations can overflow
1504   // 32-bit size_t's.
1505   double used_after_gc_d = (double) used_after_gc;
1506   double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
1507   double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
1508 
1509   // Let's make sure that they are both under the max heap size, which
1510   // by default will make them fit into a size_t.
1511   double desired_capacity_upper_bound = (double) max_heap_size;
1512   minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
1513                                     desired_capacity_upper_bound);
1514   maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
1515                                     desired_capacity_upper_bound);
1516 
1517   // We can now safely turn them into size_t's.
1518   size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
1519   size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
1520 
1521   // This assert only makes sense here, before we adjust them
1522   // with respect to the min and max heap size.
1523   assert(minimum_desired_capacity <= maximum_desired_capacity,
1524          err_msg("minimum_desired_capacity = "SIZE_FORMAT", "
1525                  "maximum_desired_capacity = "SIZE_FORMAT,
1526                  minimum_desired_capacity, maximum_desired_capacity));
1527 
1528   // Should not be greater than the heap max size. No need to adjust
1529   // it with respect to the heap min size as it's a lower bound (i.e.,
1530   // we'll try to make the capacity larger than it, not smaller).
1531   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1532   // Should not be less than the heap min size. No need to adjust it
1533   // with respect to the heap max size as it's an upper bound (i.e.,
1534   // we'll try to make the capacity smaller than it, not greater).
1535   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
1536 
1537   if (capacity_after_gc < minimum_desired_capacity) {
1538     // Don't expand unless it's significant
1539     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1540     ergo_verbose4(ErgoHeapSizing,
1541                   "attempt heap expansion",
1542                   ergo_format_reason("capacity lower than "
1543                                      "min desired capacity after Full GC")
1544                   ergo_format_byte("capacity")
1545                   ergo_format_byte("occupancy")
1546                   ergo_format_byte_perc("min desired capacity"),
1547                   capacity_after_gc, used_after_gc,
1548                   minimum_desired_capacity, (double) MinHeapFreeRatio);
1549     expand(expand_bytes);
1550 
1551     // No expansion, now see if we want to shrink
1552   } else if (capacity_after_gc > maximum_desired_capacity) {
1553     // Capacity too large, compute shrinking size
1554     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1555     ergo_verbose4(ErgoHeapSizing,
1556                   "attempt heap shrinking",
1557                   ergo_format_reason("capacity higher than "
1558                                      "max desired capacity after Full GC")
1559                   ergo_format_byte("capacity")
1560                   ergo_format_byte("occupancy")
1561                   ergo_format_byte_perc("max desired capacity"),
1562                   capacity_after_gc, used_after_gc,
1563                   maximum_desired_capacity, (double) MaxHeapFreeRatio);
1564     shrink(shrink_bytes);
1565   }
1566 }
1567 
1568 
1569 HeapWord*
1570 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
1571                                            AllocationContext_t context,
1572                                            bool* succeeded) {
1573   assert_at_safepoint(true /* should_be_vm_thread */);
1574 
1575   *succeeded = true;
1576   // Let's attempt the allocation first.
1577   HeapWord* result =
1578     attempt_allocation_at_safepoint(word_size,
1579                                     context,
1580                                     false /* expect_null_mutator_alloc_region */);
1581   if (result != NULL) {
1582     assert(*succeeded, "sanity");
1583     return result;
1584   }
1585 
1586   // In a G1 heap, we're supposed to keep allocation from failing by
1587   // incremental pauses.  Therefore, at least for now, we'll favor
1588   // expansion over collection.  (This might change in the future if we can
1589   // do something smarter than full collection to satisfy a failed alloc.)
1590   result = expand_and_allocate(word_size, context);
1591   if (result != NULL) {
1592     assert(*succeeded, "sanity");
1593     return result;
1594   }
1595 
1596   // Expansion didn't work, we'll try to do a Full GC.
1597   bool gc_succeeded = do_collection(false, /* explicit_gc */
1598                                     false, /* clear_all_soft_refs */
1599                                     word_size);
1600   if (!gc_succeeded) {
1601     *succeeded = false;
1602     return NULL;
1603   }
1604 
1605   // Retry the allocation
1606   result = attempt_allocation_at_safepoint(word_size,
1607                                            context,
1608                                            true /* expect_null_mutator_alloc_region */);
1609   if (result != NULL) {
1610     assert(*succeeded, "sanity");
1611     return result;
1612   }
1613 
1614   // Then, try a Full GC that will collect all soft references.
1615   gc_succeeded = do_collection(false, /* explicit_gc */
1616                                true,  /* clear_all_soft_refs */
1617                                word_size);
1618   if (!gc_succeeded) {
1619     *succeeded = false;
1620     return NULL;
1621   }
1622 
1623   // Retry the allocation once more
1624   result = attempt_allocation_at_safepoint(word_size,
1625                                            context,
1626                                            true /* expect_null_mutator_alloc_region */);
1627   if (result != NULL) {
1628     assert(*succeeded, "sanity");
1629     return result;
1630   }
1631 
1632   assert(!collector_policy()->should_clear_all_soft_refs(),
1633          "Flag should have been handled and cleared prior to this point");
1634 
1635   // What else?  We might try synchronous finalization later.  If the total
1636   // space available is large enough for the allocation, then a more
1637   // complete compaction phase than we've tried so far might be
1638   // appropriate.
1639   assert(*succeeded, "sanity");
1640   return NULL;
1641 }
1642 
1643 // Attempting to expand the heap sufficiently
1644 // to support an allocation of the given "word_size".  If
1645 // successful, perform the allocation and return the address of the
1646 // allocated block, or else "NULL".
1647 
1648 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1649   assert_at_safepoint(true /* should_be_vm_thread */);
1650 
1651   verify_region_sets_optional();
1652 
1653   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1654   ergo_verbose1(ErgoHeapSizing,
1655                 "attempt heap expansion",
1656                 ergo_format_reason("allocation request failed")
1657                 ergo_format_byte("allocation request"),
1658                 word_size * HeapWordSize);
1659   if (expand(expand_bytes)) {
1660     _hrm.verify_optional();
1661     verify_region_sets_optional();
1662     return attempt_allocation_at_safepoint(word_size,
1663                                            context,
1664                                            false /* expect_null_mutator_alloc_region */);
1665   }
1666   return NULL;
1667 }
1668 
1669 bool G1CollectedHeap::expand(size_t expand_bytes) {
1670   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1671   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1672                                        HeapRegion::GrainBytes);
1673   ergo_verbose2(ErgoHeapSizing,
1674                 "expand the heap",
1675                 ergo_format_byte("requested expansion amount")
1676                 ergo_format_byte("attempted expansion amount"),
1677                 expand_bytes, aligned_expand_bytes);
1678 
1679   if (is_maximal_no_gc()) {
1680     ergo_verbose0(ErgoHeapSizing,
1681                       "did not expand the heap",
1682                       ergo_format_reason("heap already fully expanded"));
1683     return false;
1684   }
1685 
1686   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1687   assert(regions_to_expand > 0, "Must expand by at least one region");
1688 
1689   uint expanded_by = _hrm.expand_by(regions_to_expand);
1690 
1691   if (expanded_by > 0) {
1692     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1693     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1694     g1_policy()->record_new_heap_size(num_regions());
1695   } else {
1696     ergo_verbose0(ErgoHeapSizing,
1697                   "did not expand the heap",
1698                   ergo_format_reason("heap expansion operation failed"));
1699     // The expansion of the virtual storage space was unsuccessful.
1700     // Let's see if it was because we ran out of swap.
1701     if (G1ExitOnExpansionFailure &&
1702         _hrm.available() >= regions_to_expand) {
1703       // We had head room...
1704       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1705     }
1706   }
1707   return regions_to_expand > 0;
1708 }
1709 
1710 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1711   size_t aligned_shrink_bytes =
1712     ReservedSpace::page_align_size_down(shrink_bytes);
1713   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1714                                          HeapRegion::GrainBytes);
1715   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1716 
1717   uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1718   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1719 
1720   ergo_verbose3(ErgoHeapSizing,
1721                 "shrink the heap",
1722                 ergo_format_byte("requested shrinking amount")
1723                 ergo_format_byte("aligned shrinking amount")
1724                 ergo_format_byte("attempted shrinking amount"),
1725                 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1726   if (num_regions_removed > 0) {
1727     g1_policy()->record_new_heap_size(num_regions());
1728   } else {
1729     ergo_verbose0(ErgoHeapSizing,
1730                   "did not shrink the heap",
1731                   ergo_format_reason("heap shrinking operation failed"));
1732   }
1733 }
1734 
1735 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1736   verify_region_sets_optional();
1737 
1738   // We should only reach here at the end of a Full GC which means we
1739   // should not not be holding to any GC alloc regions. The method
1740   // below will make sure of that and do any remaining clean up.
1741   _allocator->abandon_gc_alloc_regions();
1742 
1743   // Instead of tearing down / rebuilding the free lists here, we
1744   // could instead use the remove_all_pending() method on free_list to
1745   // remove only the ones that we need to remove.
1746   tear_down_region_sets(true /* free_list_only */);
1747   shrink_helper(shrink_bytes);
1748   rebuild_region_sets(true /* free_list_only */);
1749 
1750   _hrm.verify_optional();
1751   verify_region_sets_optional();
1752 }
1753 
1754 // Public methods.
1755 
1756 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1757 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1758 #endif // _MSC_VER
1759 
1760 
1761 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1762   SharedHeap(policy_),
1763   _g1_policy(policy_),
1764   _dirty_card_queue_set(false),
1765   _into_cset_dirty_card_queue_set(false),
1766   _is_alive_closure_cm(this),
1767   _is_alive_closure_stw(this),
1768   _ref_processor_cm(NULL),
1769   _ref_processor_stw(NULL),
1770   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1771   _bot_shared(NULL),
1772   _evac_failure_scan_stack(NULL),
1773   _mark_in_progress(false),
1774   _cg1r(NULL),
1775   _g1mm(NULL),
1776   _refine_cte_cl(NULL),
1777   _full_collection(false),
1778   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1779   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1780   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1781   _humongous_is_live(),
1782   _has_humongous_reclaim_candidates(false),
1783   _free_regions_coming(false),
1784   _young_list(new YoungList(this)),
1785   _gc_time_stamp(0),
1786   _survivor_plab_stats(YoungPLABSize, PLABWeight),
1787   _old_plab_stats(OldPLABSize, PLABWeight),
1788   _expand_heap_after_alloc_failure(true),
1789   _surviving_young_words(NULL),
1790   _old_marking_cycles_started(0),
1791   _old_marking_cycles_completed(0),
1792   _concurrent_cycle_started(false),
1793   _heap_summary_sent(false),
1794   _in_cset_fast_test(),
1795   _dirty_cards_region_list(NULL),
1796   _worker_cset_start_region(NULL),
1797   _worker_cset_start_region_time_stamp(NULL),
1798   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1799   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1800   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1801   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1802 
1803   _g1h = this;
1804   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1805     vm_exit_during_initialization("Failed necessary allocation.");
1806   }
1807 
1808   _allocator = G1Allocator::create_allocator(_g1h);
1809   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1810 
1811   int n_queues = MAX2((int)ParallelGCThreads, 1);
1812   _task_queues = new RefToScanQueueSet(n_queues);
1813 
1814   uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1815   assert(n_rem_sets > 0, "Invariant.");
1816 
1817   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1818   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
1819   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1820 
1821   for (int i = 0; i < n_queues; i++) {
1822     RefToScanQueue* q = new RefToScanQueue();
1823     q->initialize();
1824     _task_queues->register_queue(i, q);
1825     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1826   }
1827   clear_cset_start_regions();
1828 
1829   // Initialize the G1EvacuationFailureALot counters and flags.
1830   NOT_PRODUCT(reset_evacuation_should_fail();)
1831 
1832   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1833 }
1834 
1835 jint G1CollectedHeap::initialize() {
1836   CollectedHeap::pre_initialize();
1837   os::enable_vtime();
1838 
1839   G1Log::init();
1840 
1841   // Necessary to satisfy locking discipline assertions.
1842 
1843   MutexLocker x(Heap_lock);
1844 
1845   // We have to initialize the printer before committing the heap, as
1846   // it will be used then.
1847   _hr_printer.set_active(G1PrintHeapRegions);
1848 
1849   // While there are no constraints in the GC code that HeapWordSize
1850   // be any particular value, there are multiple other areas in the
1851   // system which believe this to be true (e.g. oop->object_size in some
1852   // cases incorrectly returns the size in wordSize units rather than
1853   // HeapWordSize).
1854   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1855 
1856   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1857   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1858   size_t heap_alignment = collector_policy()->heap_alignment();
1859 
1860   // Ensure that the sizes are properly aligned.
1861   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1862   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1863   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1864 
1865   _refine_cte_cl = new RefineCardTableEntryClosure();
1866 
1867   _cg1r = new ConcurrentG1Refine(this, _refine_cte_cl);
1868 
1869   // Reserve the maximum.
1870 
1871   // When compressed oops are enabled, the preferred heap base
1872   // is calculated by subtracting the requested size from the
1873   // 32Gb boundary and using the result as the base address for
1874   // heap reservation. If the requested size is not aligned to
1875   // HeapRegion::GrainBytes (i.e. the alignment that is passed
1876   // into the ReservedHeapSpace constructor) then the actual
1877   // base of the reserved heap may end up differing from the
1878   // address that was requested (i.e. the preferred heap base).
1879   // If this happens then we could end up using a non-optimal
1880   // compressed oops mode.
1881 
1882   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1883                                                  heap_alignment);
1884 
1885   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1886 
1887   // Create the barrier set for the entire reserved region.
1888   G1SATBCardTableLoggingModRefBS* bs
1889     = new G1SATBCardTableLoggingModRefBS(reserved_region());
1890   bs->initialize();
1891   assert(bs->is_a(BarrierSet::G1SATBCTLogging), "sanity");
1892   set_barrier_set(bs);
1893 
1894   // Also create a G1 rem set.
1895   _g1_rem_set = new G1RemSet(this, g1_barrier_set());
1896 
1897   // Carve out the G1 part of the heap.
1898 
1899   ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1900   G1RegionToSpaceMapper* heap_storage =
1901     G1RegionToSpaceMapper::create_mapper(g1_rs,
1902                                          UseLargePages ? os::large_page_size() : os::vm_page_size(),
1903                                          HeapRegion::GrainBytes,
1904                                          1,
1905                                          mtJavaHeap);
1906   heap_storage->set_mapping_changed_listener(&_listener);
1907 
1908   // Reserve space for the block offset table. We do not support automatic uncommit
1909   // for the card table at this time. BOT only.
1910   ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
1911   G1RegionToSpaceMapper* bot_storage =
1912     G1RegionToSpaceMapper::create_mapper(bot_rs,
1913                                          os::vm_page_size(),
1914                                          HeapRegion::GrainBytes,
1915                                          G1BlockOffsetSharedArray::N_bytes,
1916                                          mtGC);
1917 
1918   ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
1919   G1RegionToSpaceMapper* cardtable_storage =
1920     G1RegionToSpaceMapper::create_mapper(cardtable_rs,
1921                                          os::vm_page_size(),
1922                                          HeapRegion::GrainBytes,
1923                                          G1BlockOffsetSharedArray::N_bytes,
1924                                          mtGC);
1925 
1926   // Reserve space for the card counts table.
1927   ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
1928   G1RegionToSpaceMapper* card_counts_storage =
1929     G1RegionToSpaceMapper::create_mapper(card_counts_rs,
1930                                          os::vm_page_size(),
1931                                          HeapRegion::GrainBytes,
1932                                          G1BlockOffsetSharedArray::N_bytes,
1933                                          mtGC);
1934 
1935   // Reserve space for prev and next bitmap.
1936   size_t bitmap_size = CMBitMap::compute_size(g1_rs.size());
1937 
1938   ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
1939   G1RegionToSpaceMapper* prev_bitmap_storage =
1940     G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs,
1941                                          os::vm_page_size(),
1942                                          HeapRegion::GrainBytes,
1943                                          CMBitMap::mark_distance(),
1944                                          mtGC);
1945 
1946   ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
1947   G1RegionToSpaceMapper* next_bitmap_storage =
1948     G1RegionToSpaceMapper::create_mapper(next_bitmap_rs,
1949                                          os::vm_page_size(),
1950                                          HeapRegion::GrainBytes,
1951                                          CMBitMap::mark_distance(),
1952                                          mtGC);
1953 
1954   _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
1955   g1_barrier_set()->initialize(cardtable_storage);
1956    // Do later initialization work for concurrent refinement.
1957   _cg1r->init(card_counts_storage);
1958 
1959   // 6843694 - ensure that the maximum region index can fit
1960   // in the remembered set structures.
1961   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1962   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1963 
1964   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1965   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1966   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1967             "too many cards per region");
1968 
1969   FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
1970 
1971   _bot_shared = new G1BlockOffsetSharedArray(reserved_region(), bot_storage);
1972 
1973   _g1h = this;
1974 
1975   _in_cset_fast_test.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
1976   _humongous_is_live.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
1977 
1978   // Create the ConcurrentMark data structure and thread.
1979   // (Must do this late, so that "max_regions" is defined.)
1980   _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1981   if (_cm == NULL || !_cm->completed_initialization()) {
1982     vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
1983     return JNI_ENOMEM;
1984   }
1985   _cmThread = _cm->cmThread();
1986 
1987   // Initialize the from_card cache structure of HeapRegionRemSet.
1988   HeapRegionRemSet::init_heap(max_regions());
1989 
1990   // Now expand into the initial heap size.
1991   if (!expand(init_byte_size)) {
1992     vm_shutdown_during_initialization("Failed to allocate initial heap.");
1993     return JNI_ENOMEM;
1994   }
1995 
1996   // Perform any initialization actions delegated to the policy.
1997   g1_policy()->init();
1998 
1999   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
2000                                                SATB_Q_FL_lock,
2001                                                G1SATBProcessCompletedThreshold,
2002                                                Shared_SATB_Q_lock);
2003 
2004   JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
2005                                                 DirtyCardQ_CBL_mon,
2006                                                 DirtyCardQ_FL_lock,
2007                                                 concurrent_g1_refine()->yellow_zone(),
2008                                                 concurrent_g1_refine()->red_zone(),
2009                                                 Shared_DirtyCardQ_lock);
2010 
2011   dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
2012                                     DirtyCardQ_CBL_mon,
2013                                     DirtyCardQ_FL_lock,
2014                                     -1, // never trigger processing
2015                                     -1, // no limit on length
2016                                     Shared_DirtyCardQ_lock,
2017                                     &JavaThread::dirty_card_queue_set());
2018 
2019   // Initialize the card queue set used to hold cards containing
2020   // references into the collection set.
2021   _into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code
2022                                              DirtyCardQ_CBL_mon,
2023                                              DirtyCardQ_FL_lock,
2024                                              -1, // never trigger processing
2025                                              -1, // no limit on length
2026                                              Shared_DirtyCardQ_lock,
2027                                              &JavaThread::dirty_card_queue_set());
2028 
2029   // In case we're keeping closure specialization stats, initialize those
2030   // counts and that mechanism.
2031   SpecializationStats::clear();
2032 
2033   // Here we allocate the dummy HeapRegion that is required by the
2034   // G1AllocRegion class.
2035   HeapRegion* dummy_region = _hrm.get_dummy_region();
2036 
2037   // We'll re-use the same region whether the alloc region will
2038   // require BOT updates or not and, if it doesn't, then a non-young
2039   // region will complain that it cannot support allocations without
2040   // BOT updates. So we'll tag the dummy region as eden to avoid that.
2041   dummy_region->set_eden();
2042   // Make sure it's full.
2043   dummy_region->set_top(dummy_region->end());
2044   G1AllocRegion::setup(this, dummy_region);
2045 
2046   _allocator->init_mutator_alloc_region();
2047 
2048   // Do create of the monitoring and management support so that
2049   // values in the heap have been properly initialized.
2050   _g1mm = new G1MonitoringSupport(this);
2051 
2052   G1StringDedup::initialize();
2053 
2054   return JNI_OK;
2055 }
2056 
2057 void G1CollectedHeap::stop() {
2058   // Stop all concurrent threads. We do this to make sure these threads
2059   // do not continue to execute and access resources (e.g. gclog_or_tty)
2060   // that are destroyed during shutdown.
2061   _cg1r->stop();
2062   _cmThread->stop();
2063   if (G1StringDedup::is_enabled()) {
2064     G1StringDedup::stop();
2065   }
2066 }
2067 
2068 void G1CollectedHeap::clear_humongous_is_live_table() {
2069   guarantee(G1EagerReclaimHumongousObjects, "Should only be called if true");
2070   _humongous_is_live.clear();
2071 }
2072 
2073 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2074   return HeapRegion::max_region_size();
2075 }
2076 
2077 void G1CollectedHeap::ref_processing_init() {
2078   // Reference processing in G1 currently works as follows:
2079   //
2080   // * There are two reference processor instances. One is
2081   //   used to record and process discovered references
2082   //   during concurrent marking; the other is used to
2083   //   record and process references during STW pauses
2084   //   (both full and incremental).
2085   // * Both ref processors need to 'span' the entire heap as
2086   //   the regions in the collection set may be dotted around.
2087   //
2088   // * For the concurrent marking ref processor:
2089   //   * Reference discovery is enabled at initial marking.
2090   //   * Reference discovery is disabled and the discovered
2091   //     references processed etc during remarking.
2092   //   * Reference discovery is MT (see below).
2093   //   * Reference discovery requires a barrier (see below).
2094   //   * Reference processing may or may not be MT
2095   //     (depending on the value of ParallelRefProcEnabled
2096   //     and ParallelGCThreads).
2097   //   * A full GC disables reference discovery by the CM
2098   //     ref processor and abandons any entries on it's
2099   //     discovered lists.
2100   //
2101   // * For the STW processor:
2102   //   * Non MT discovery is enabled at the start of a full GC.
2103   //   * Processing and enqueueing during a full GC is non-MT.
2104   //   * During a full GC, references are processed after marking.
2105   //
2106   //   * Discovery (may or may not be MT) is enabled at the start
2107   //     of an incremental evacuation pause.
2108   //   * References are processed near the end of a STW evacuation pause.
2109   //   * For both types of GC:
2110   //     * Discovery is atomic - i.e. not concurrent.
2111   //     * Reference discovery will not need a barrier.
2112 
2113   SharedHeap::ref_processing_init();
2114   MemRegion mr = reserved_region();
2115 
2116   // Concurrent Mark ref processor
2117   _ref_processor_cm =
2118     new ReferenceProcessor(mr,    // span
2119                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
2120                                 // mt processing
2121                            (int) ParallelGCThreads,
2122                                 // degree of mt processing
2123                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
2124                                 // mt discovery
2125                            (int) MAX2(ParallelGCThreads, ConcGCThreads),
2126                                 // degree of mt discovery
2127                            false,
2128                                 // Reference discovery is not atomic
2129                            &_is_alive_closure_cm);
2130                                 // is alive closure
2131                                 // (for efficiency/performance)
2132 
2133   // STW ref processor
2134   _ref_processor_stw =
2135     new ReferenceProcessor(mr,    // span
2136                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
2137                                 // mt processing
2138                            MAX2((int)ParallelGCThreads, 1),
2139                                 // degree of mt processing
2140                            (ParallelGCThreads > 1),
2141                                 // mt discovery
2142                            MAX2((int)ParallelGCThreads, 1),
2143                                 // degree of mt discovery
2144                            true,
2145                                 // Reference discovery is atomic
2146                            &_is_alive_closure_stw);
2147                                 // is alive closure
2148                                 // (for efficiency/performance)
2149 }
2150 
2151 size_t G1CollectedHeap::capacity() const {
2152   return _hrm.length() * HeapRegion::GrainBytes;
2153 }
2154 
2155 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2156   assert(!hr->is_continues_humongous(), "pre-condition");
2157   hr->reset_gc_time_stamp();
2158   if (hr->is_starts_humongous()) {
2159     uint first_index = hr->hrm_index() + 1;
2160     uint last_index = hr->last_hc_index();
2161     for (uint i = first_index; i < last_index; i += 1) {
2162       HeapRegion* chr = region_at(i);
2163       assert(chr->is_continues_humongous(), "sanity");
2164       chr->reset_gc_time_stamp();
2165     }
2166   }
2167 }
2168 
2169 #ifndef PRODUCT
2170 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2171 private:
2172   unsigned _gc_time_stamp;
2173   bool _failures;
2174 
2175 public:
2176   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2177     _gc_time_stamp(gc_time_stamp), _failures(false) { }
2178 
2179   virtual bool doHeapRegion(HeapRegion* hr) {
2180     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2181     if (_gc_time_stamp != region_gc_time_stamp) {
2182       gclog_or_tty->print_cr("Region "HR_FORMAT" has GC time stamp = %d, "
2183                              "expected %d", HR_FORMAT_PARAMS(hr),
2184                              region_gc_time_stamp, _gc_time_stamp);
2185       _failures = true;
2186     }
2187     return false;
2188   }
2189 
2190   bool failures() { return _failures; }
2191 };
2192 
2193 void G1CollectedHeap::check_gc_time_stamps() {
2194   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2195   heap_region_iterate(&cl);
2196   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2197 }
2198 #endif // PRODUCT
2199 
2200 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2201                                                  DirtyCardQueue* into_cset_dcq,
2202                                                  bool concurrent,
2203                                                  uint worker_i) {
2204   // Clean cards in the hot card cache
2205   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
2206   hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
2207 
2208   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2209   int n_completed_buffers = 0;
2210   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2211     n_completed_buffers++;
2212   }
2213   g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
2214   dcqs.clear_n_completed_buffers();
2215   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2216 }
2217 
2218 
2219 // Computes the sum of the storage used by the various regions.
2220 size_t G1CollectedHeap::used() const {
2221   return _allocator->used();
2222 }
2223 
2224 size_t G1CollectedHeap::used_unlocked() const {
2225   return _allocator->used_unlocked();
2226 }
2227 
2228 class SumUsedClosure: public HeapRegionClosure {
2229   size_t _used;
2230 public:
2231   SumUsedClosure() : _used(0) {}
2232   bool doHeapRegion(HeapRegion* r) {
2233     if (!r->is_continues_humongous()) {
2234       _used += r->used();
2235     }
2236     return false;
2237   }
2238   size_t result() { return _used; }
2239 };
2240 
2241 size_t G1CollectedHeap::recalculate_used() const {
2242   double recalculate_used_start = os::elapsedTime();
2243 
2244   SumUsedClosure blk;
2245   heap_region_iterate(&blk);
2246 
2247   g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
2248   return blk.result();
2249 }
2250 
2251 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2252   switch (cause) {
2253     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
2254     case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
2255     case GCCause::_g1_humongous_allocation: return true;
2256     case GCCause::_update_allocation_context_stats_inc: return true;
2257     case GCCause::_wb_conc_mark:            return true;
2258     default:                                return false;
2259   }
2260 }
2261 
2262 #ifndef PRODUCT
2263 void G1CollectedHeap::allocate_dummy_regions() {
2264   // Let's fill up most of the region
2265   size_t word_size = HeapRegion::GrainWords - 1024;
2266   // And as a result the region we'll allocate will be humongous.
2267   guarantee(is_humongous(word_size), "sanity");
2268 
2269   for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2270     // Let's use the existing mechanism for the allocation
2271     HeapWord* dummy_obj = humongous_obj_allocate(word_size,
2272                                                  AllocationContext::system());
2273     if (dummy_obj != NULL) {
2274       MemRegion mr(dummy_obj, word_size);
2275       CollectedHeap::fill_with_object(mr);
2276     } else {
2277       // If we can't allocate once, we probably cannot allocate
2278       // again. Let's get out of the loop.
2279       break;
2280     }
2281   }
2282 }
2283 #endif // !PRODUCT
2284 
2285 void G1CollectedHeap::increment_old_marking_cycles_started() {
2286   assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
2287     _old_marking_cycles_started == _old_marking_cycles_completed + 1,
2288     err_msg("Wrong marking cycle count (started: %d, completed: %d)",
2289     _old_marking_cycles_started, _old_marking_cycles_completed));
2290 
2291   _old_marking_cycles_started++;
2292 }
2293 
2294 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
2295   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
2296 
2297   // We assume that if concurrent == true, then the caller is a
2298   // concurrent thread that was joined the Suspendible Thread
2299   // Set. If there's ever a cheap way to check this, we should add an
2300   // assert here.
2301 
2302   // Given that this method is called at the end of a Full GC or of a
2303   // concurrent cycle, and those can be nested (i.e., a Full GC can
2304   // interrupt a concurrent cycle), the number of full collections
2305   // completed should be either one (in the case where there was no
2306   // nesting) or two (when a Full GC interrupted a concurrent cycle)
2307   // behind the number of full collections started.
2308 
2309   // This is the case for the inner caller, i.e. a Full GC.
2310   assert(concurrent ||
2311          (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
2312          (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
2313          err_msg("for inner caller (Full GC): _old_marking_cycles_started = %u "
2314                  "is inconsistent with _old_marking_cycles_completed = %u",
2315                  _old_marking_cycles_started, _old_marking_cycles_completed));
2316 
2317   // This is the case for the outer caller, i.e. the concurrent cycle.
2318   assert(!concurrent ||
2319          (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
2320          err_msg("for outer caller (concurrent cycle): "
2321                  "_old_marking_cycles_started = %u "
2322                  "is inconsistent with _old_marking_cycles_completed = %u",
2323                  _old_marking_cycles_started, _old_marking_cycles_completed));
2324 
2325   _old_marking_cycles_completed += 1;
2326 
2327   // We need to clear the "in_progress" flag in the CM thread before
2328   // we wake up any waiters (especially when ExplicitInvokesConcurrent
2329   // is set) so that if a waiter requests another System.gc() it doesn't
2330   // incorrectly see that a marking cycle is still in progress.
2331   if (concurrent) {
2332     _cmThread->clear_in_progress();
2333   }
2334 
2335   // This notify_all() will ensure that a thread that called
2336   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2337   // and it's waiting for a full GC to finish will be woken up. It is
2338   // waiting in VM_G1IncCollectionPause::doit_epilogue().
2339   FullGCCount_lock->notify_all();
2340 }
2341 
2342 void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
2343   _concurrent_cycle_started = true;
2344   _gc_timer_cm->register_gc_start(start_time);
2345 
2346   _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
2347   trace_heap_before_gc(_gc_tracer_cm);
2348 }
2349 
2350 void G1CollectedHeap::register_concurrent_cycle_end() {
2351   if (_concurrent_cycle_started) {
2352     if (_cm->has_aborted()) {
2353       _gc_tracer_cm->report_concurrent_mode_failure();
2354     }
2355 
2356     _gc_timer_cm->register_gc_end();
2357     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2358 
2359     // Clear state variables to prepare for the next concurrent cycle.
2360     _concurrent_cycle_started = false;
2361     _heap_summary_sent = false;
2362   }
2363 }
2364 
2365 void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
2366   if (_concurrent_cycle_started) {
2367     // This function can be called when:
2368     //  the cleanup pause is run
2369     //  the concurrent cycle is aborted before the cleanup pause.
2370     //  the concurrent cycle is aborted after the cleanup pause,
2371     //   but before the concurrent cycle end has been registered.
2372     // Make sure that we only send the heap information once.
2373     if (!_heap_summary_sent) {
2374       trace_heap_after_gc(_gc_tracer_cm);
2375       _heap_summary_sent = true;
2376     }
2377   }
2378 }
2379 
2380 G1YCType G1CollectedHeap::yc_type() {
2381   bool is_young = g1_policy()->gcs_are_young();
2382   bool is_initial_mark = g1_policy()->during_initial_mark_pause();
2383   bool is_during_mark = mark_in_progress();
2384 
2385   if (is_initial_mark) {
2386     return InitialMark;
2387   } else if (is_during_mark) {
2388     return DuringMark;
2389   } else if (is_young) {
2390     return Normal;
2391   } else {
2392     return Mixed;
2393   }
2394 }
2395 
2396 void G1CollectedHeap::collect(GCCause::Cause cause) {
2397   assert_heap_not_locked();
2398 
2399   unsigned int gc_count_before;
2400   unsigned int old_marking_count_before;
2401   unsigned int full_gc_count_before;
2402   bool retry_gc;
2403 
2404   do {
2405     retry_gc = false;
2406 
2407     {
2408       MutexLocker ml(Heap_lock);
2409 
2410       // Read the GC count while holding the Heap_lock
2411       gc_count_before = total_collections();
2412       full_gc_count_before = total_full_collections();
2413       old_marking_count_before = _old_marking_cycles_started;
2414     }
2415 
2416     if (should_do_concurrent_full_gc(cause)) {
2417       // Schedule an initial-mark evacuation pause that will start a
2418       // concurrent cycle. We're setting word_size to 0 which means that
2419       // we are not requesting a post-GC allocation.
2420       VM_G1IncCollectionPause op(gc_count_before,
2421                                  0,     /* word_size */
2422                                  true,  /* should_initiate_conc_mark */
2423                                  g1_policy()->max_pause_time_ms(),
2424                                  cause);
2425       op.set_allocation_context(AllocationContext::current());
2426 
2427       VMThread::execute(&op);
2428       if (!op.pause_succeeded()) {
2429         if (old_marking_count_before == _old_marking_cycles_started) {
2430           retry_gc = op.should_retry_gc();
2431         } else {
2432           // A Full GC happened while we were trying to schedule the
2433           // initial-mark GC. No point in starting a new cycle given
2434           // that the whole heap was collected anyway.
2435         }
2436 
2437         if (retry_gc) {
2438           if (GC_locker::is_active_and_needs_gc()) {
2439             GC_locker::stall_until_clear();
2440           }
2441         }
2442       }
2443     } else {
2444       if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
2445           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2446 
2447         // Schedule a standard evacuation pause. We're setting word_size
2448         // to 0 which means that we are not requesting a post-GC allocation.
2449         VM_G1IncCollectionPause op(gc_count_before,
2450                                    0,     /* word_size */
2451                                    false, /* should_initiate_conc_mark */
2452                                    g1_policy()->max_pause_time_ms(),
2453                                    cause);
2454         VMThread::execute(&op);
2455       } else {
2456         // Schedule a Full GC.
2457         VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2458         VMThread::execute(&op);
2459       }
2460     }
2461   } while (retry_gc);
2462 }
2463 
2464 bool G1CollectedHeap::is_in(const void* p) const {
2465   if (_hrm.reserved().contains(p)) {
2466     // Given that we know that p is in the reserved space,
2467     // heap_region_containing_raw() should successfully
2468     // return the containing region.
2469     HeapRegion* hr = heap_region_containing_raw(p);
2470     return hr->is_in(p);
2471   } else {
2472     return false;
2473   }
2474 }
2475 
2476 #ifdef ASSERT
2477 bool G1CollectedHeap::is_in_exact(const void* p) const {
2478   bool contains = reserved_region().contains(p);
2479   bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
2480   if (contains && available) {
2481     return true;
2482   } else {
2483     return false;
2484   }
2485 }
2486 #endif
2487 
2488 // Iteration functions.
2489 
2490 // Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
2491 
2492 class IterateOopClosureRegionClosure: public HeapRegionClosure {
2493   ExtendedOopClosure* _cl;
2494 public:
2495   IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}
2496   bool doHeapRegion(HeapRegion* r) {
2497     if (!r->is_continues_humongous()) {
2498       r->oop_iterate(_cl);
2499     }
2500     return false;
2501   }
2502 };
2503 
2504 void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
2505   IterateOopClosureRegionClosure blk(cl);
2506   heap_region_iterate(&blk);
2507 }
2508 
2509 // Iterates an ObjectClosure over all objects within a HeapRegion.
2510 
2511 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2512   ObjectClosure* _cl;
2513 public:
2514   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2515   bool doHeapRegion(HeapRegion* r) {
2516     if (!r->is_continues_humongous()) {
2517       r->object_iterate(_cl);
2518     }
2519     return false;
2520   }
2521 };
2522 
2523 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2524   IterateObjectClosureRegionClosure blk(cl);
2525   heap_region_iterate(&blk);
2526 }
2527 
2528 // Calls a SpaceClosure on a HeapRegion.
2529 
2530 class SpaceClosureRegionClosure: public HeapRegionClosure {
2531   SpaceClosure* _cl;
2532 public:
2533   SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
2534   bool doHeapRegion(HeapRegion* r) {
2535     _cl->do_space(r);
2536     return false;
2537   }
2538 };
2539 
2540 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
2541   SpaceClosureRegionClosure blk(cl);
2542   heap_region_iterate(&blk);
2543 }
2544 
2545 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2546   _hrm.iterate(cl);
2547 }
2548 
2549 void
2550 G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
2551                                          uint worker_id,
2552                                          HeapRegionClaimer *hrclaimer,
2553                                          bool concurrent) const {
2554   _hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
2555 }
2556 
2557 // Clear the cached CSet starting regions and (more importantly)
2558 // the time stamps. Called when we reset the GC time stamp.
2559 void G1CollectedHeap::clear_cset_start_regions() {
2560   assert(_worker_cset_start_region != NULL, "sanity");
2561   assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
2562 
2563   int n_queues = MAX2((int)ParallelGCThreads, 1);
2564   for (int i = 0; i < n_queues; i++) {
2565     _worker_cset_start_region[i] = NULL;
2566     _worker_cset_start_region_time_stamp[i] = 0;
2567   }
2568 }
2569 
2570 // Given the id of a worker, obtain or calculate a suitable
2571 // starting region for iterating over the current collection set.
2572 HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
2573   assert(get_gc_time_stamp() > 0, "should have been updated by now");
2574 
2575   HeapRegion* result = NULL;
2576   unsigned gc_time_stamp = get_gc_time_stamp();
2577 
2578   if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
2579     // Cached starting region for current worker was set
2580     // during the current pause - so it's valid.
2581     // Note: the cached starting heap region may be NULL
2582     // (when the collection set is empty).
2583     result = _worker_cset_start_region[worker_i];
2584     assert(result == NULL || result->in_collection_set(), "sanity");
2585     return result;
2586   }
2587 
2588   // The cached entry was not valid so let's calculate
2589   // a suitable starting heap region for this worker.
2590 
2591   // We want the parallel threads to start their collection
2592   // set iteration at different collection set regions to
2593   // avoid contention.
2594   // If we have:
2595   //          n collection set regions
2596   //          p threads
2597   // Then thread t will start at region floor ((t * n) / p)
2598 
2599   result = g1_policy()->collection_set();
2600   uint cs_size = g1_policy()->cset_region_length();
2601   uint active_workers = workers()->active_workers();
2602   assert(UseDynamicNumberOfGCThreads ||
2603            active_workers == workers()->total_workers(),
2604            "Unless dynamic should use total workers");
2605 
2606   uint end_ind   = (cs_size * worker_i) / active_workers;
2607   uint start_ind = 0;
2608 
2609   if (worker_i > 0 &&
2610       _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
2611     // Previous workers starting region is valid
2612     // so let's iterate from there
2613     start_ind = (cs_size * (worker_i - 1)) / active_workers;
2614     result = _worker_cset_start_region[worker_i - 1];
2615   }
2616 
2617   for (uint i = start_ind; i < end_ind; i++) {
2618     result = result->next_in_collection_set();
2619   }
2620 
2621   // Note: the calculated starting heap region may be NULL
2622   // (when the collection set is empty).
2623   assert(result == NULL || result->in_collection_set(), "sanity");
2624   assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
2625          "should be updated only once per pause");
2626   _worker_cset_start_region[worker_i] = result;
2627   OrderAccess::storestore();
2628   _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
2629   return result;
2630 }
2631 
2632 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2633   HeapRegion* r = g1_policy()->collection_set();
2634   while (r != NULL) {
2635     HeapRegion* next = r->next_in_collection_set();
2636     if (cl->doHeapRegion(r)) {
2637       cl->incomplete();
2638       return;
2639     }
2640     r = next;
2641   }
2642 }
2643 
2644 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
2645                                                   HeapRegionClosure *cl) {
2646   if (r == NULL) {
2647     // The CSet is empty so there's nothing to do.
2648     return;
2649   }
2650 
2651   assert(r->in_collection_set(),
2652          "Start region must be a member of the collection set.");
2653   HeapRegion* cur = r;
2654   while (cur != NULL) {
2655     HeapRegion* next = cur->next_in_collection_set();
2656     if (cl->doHeapRegion(cur) && false) {
2657       cl->incomplete();
2658       return;
2659     }
2660     cur = next;
2661   }
2662   cur = g1_policy()->collection_set();
2663   while (cur != r) {
2664     HeapRegion* next = cur->next_in_collection_set();
2665     if (cl->doHeapRegion(cur) && false) {
2666       cl->incomplete();
2667       return;
2668     }
2669     cur = next;
2670   }
2671 }
2672 
2673 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
2674   HeapRegion* result = _hrm.next_region_in_heap(from);
2675   while (result != NULL && result->is_humongous()) {
2676     result = _hrm.next_region_in_heap(result);
2677   }
2678   return result;
2679 }
2680 
2681 Space* G1CollectedHeap::space_containing(const void* addr) const {
2682   return heap_region_containing(addr);
2683 }
2684 
2685 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2686   Space* sp = space_containing(addr);
2687   return sp->block_start(addr);
2688 }
2689 
2690 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2691   Space* sp = space_containing(addr);
2692   return sp->block_size(addr);
2693 }
2694 
2695 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2696   Space* sp = space_containing(addr);
2697   return sp->block_is_obj(addr);
2698 }
2699 
2700 bool G1CollectedHeap::supports_tlab_allocation() const {
2701   return true;
2702 }
2703 
2704 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2705   return (_g1_policy->young_list_target_length() - young_list()->survivor_length()) * HeapRegion::GrainBytes;
2706 }
2707 
2708 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
2709   return young_list()->eden_used_bytes();
2710 }
2711 
2712 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2713 // must be smaller than the humongous object limit.
2714 size_t G1CollectedHeap::max_tlab_size() const {
2715   return align_size_down(_humongous_object_threshold_in_words - 1, MinObjAlignment);
2716 }
2717 
2718 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2719   // Return the remaining space in the cur alloc region, but not less than
2720   // the min TLAB size.
2721 
2722   // Also, this value can be at most the humongous object threshold,
2723   // since we can't allow tlabs to grow big enough to accommodate
2724   // humongous objects.
2725 
2726   HeapRegion* hr = _allocator->mutator_alloc_region(AllocationContext::current())->get();
2727   size_t max_tlab = max_tlab_size() * wordSize;
2728   if (hr == NULL) {
2729     return max_tlab;
2730   } else {
2731     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
2732   }
2733 }
2734 
2735 size_t G1CollectedHeap::max_capacity() const {
2736   return _hrm.reserved().byte_size();
2737 }
2738 
2739 jlong G1CollectedHeap::millis_since_last_gc() {
2740   // assert(false, "NYI");
2741   return 0;
2742 }
2743 
2744 void G1CollectedHeap::prepare_for_verify() {
2745   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
2746     ensure_parsability(false);
2747   }
2748   g1_rem_set()->prepare_for_verify();
2749 }
2750 
2751 bool G1CollectedHeap::allocated_since_marking(oop obj, HeapRegion* hr,
2752                                               VerifyOption vo) {
2753   switch (vo) {
2754   case VerifyOption_G1UsePrevMarking:
2755     return hr->obj_allocated_since_prev_marking(obj);
2756   case VerifyOption_G1UseNextMarking:
2757     return hr->obj_allocated_since_next_marking(obj);
2758   case VerifyOption_G1UseMarkWord:
2759     return false;
2760   default:
2761     ShouldNotReachHere();
2762   }
2763   return false; // keep some compilers happy
2764 }
2765 
2766 HeapWord* G1CollectedHeap::top_at_mark_start(HeapRegion* hr, VerifyOption vo) {
2767   switch (vo) {
2768   case VerifyOption_G1UsePrevMarking: return hr->prev_top_at_mark_start();
2769   case VerifyOption_G1UseNextMarking: return hr->next_top_at_mark_start();
2770   case VerifyOption_G1UseMarkWord:    return NULL;
2771   default:                            ShouldNotReachHere();
2772   }
2773   return NULL; // keep some compilers happy
2774 }
2775 
2776 bool G1CollectedHeap::is_marked(oop obj, VerifyOption vo) {
2777   switch (vo) {
2778   case VerifyOption_G1UsePrevMarking: return isMarkedPrev(obj);
2779   case VerifyOption_G1UseNextMarking: return isMarkedNext(obj);
2780   case VerifyOption_G1UseMarkWord:    return obj->is_gc_marked();
2781   default:                            ShouldNotReachHere();
2782   }
2783   return false; // keep some compilers happy
2784 }
2785 
2786 const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
2787   switch (vo) {
2788   case VerifyOption_G1UsePrevMarking: return "PTAMS";
2789   case VerifyOption_G1UseNextMarking: return "NTAMS";
2790   case VerifyOption_G1UseMarkWord:    return "NONE";
2791   default:                            ShouldNotReachHere();
2792   }
2793   return NULL; // keep some compilers happy
2794 }
2795 
2796 class VerifyRootsClosure: public OopClosure {
2797 private:
2798   G1CollectedHeap* _g1h;
2799   VerifyOption     _vo;
2800   bool             _failures;
2801 public:
2802   // _vo == UsePrevMarking -> use "prev" marking information,
2803   // _vo == UseNextMarking -> use "next" marking information,
2804   // _vo == UseMarkWord    -> use mark word from object header.
2805   VerifyRootsClosure(VerifyOption vo) :
2806     _g1h(G1CollectedHeap::heap()),
2807     _vo(vo),
2808     _failures(false) { }
2809 
2810   bool failures() { return _failures; }
2811 
2812   template <class T> void do_oop_nv(T* p) {
2813     T heap_oop = oopDesc::load_heap_oop(p);
2814     if (!oopDesc::is_null(heap_oop)) {
2815       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2816       if (_g1h->is_obj_dead_cond(obj, _vo)) {
2817         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
2818                               "points to dead obj "PTR_FORMAT, p, (void*) obj);
2819         if (_vo == VerifyOption_G1UseMarkWord) {
2820           gclog_or_tty->print_cr("  Mark word: "PTR_FORMAT, (void*)(obj->mark()));
2821         }
2822         obj->print_on(gclog_or_tty);
2823         _failures = true;
2824       }
2825     }
2826   }
2827 
2828   void do_oop(oop* p)       { do_oop_nv(p); }
2829   void do_oop(narrowOop* p) { do_oop_nv(p); }
2830 };
2831 
2832 class G1VerifyCodeRootOopClosure: public OopClosure {
2833   G1CollectedHeap* _g1h;
2834   OopClosure* _root_cl;
2835   nmethod* _nm;
2836   VerifyOption _vo;
2837   bool _failures;
2838 
2839   template <class T> void do_oop_work(T* p) {
2840     // First verify that this root is live
2841     _root_cl->do_oop(p);
2842 
2843     if (!G1VerifyHeapRegionCodeRoots) {
2844       // We're not verifying the code roots attached to heap region.
2845       return;
2846     }
2847 
2848     // Don't check the code roots during marking verification in a full GC
2849     if (_vo == VerifyOption_G1UseMarkWord) {
2850       return;
2851     }
2852 
2853     // Now verify that the current nmethod (which contains p) is
2854     // in the code root list of the heap region containing the
2855     // object referenced by p.
2856 
2857     T heap_oop = oopDesc::load_heap_oop(p);
2858     if (!oopDesc::is_null(heap_oop)) {
2859       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2860 
2861       // Now fetch the region containing the object
2862       HeapRegion* hr = _g1h->heap_region_containing(obj);
2863       HeapRegionRemSet* hrrs = hr->rem_set();
2864       // Verify that the strong code root list for this region
2865       // contains the nmethod
2866       if (!hrrs->strong_code_roots_list_contains(_nm)) {
2867         gclog_or_tty->print_cr("Code root location "PTR_FORMAT" "
2868                               "from nmethod "PTR_FORMAT" not in strong "
2869                               "code roots for region ["PTR_FORMAT","PTR_FORMAT")",
2870                               p, _nm, hr->bottom(), hr->end());
2871         _failures = true;
2872       }
2873     }
2874   }
2875 
2876 public:
2877   G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
2878     _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
2879 
2880   void do_oop(oop* p) { do_oop_work(p); }
2881   void do_oop(narrowOop* p) { do_oop_work(p); }
2882 
2883   void set_nmethod(nmethod* nm) { _nm = nm; }
2884   bool failures() { return _failures; }
2885 };
2886 
2887 class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
2888   G1VerifyCodeRootOopClosure* _oop_cl;
2889 
2890 public:
2891   G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):
2892     _oop_cl(oop_cl) {}
2893 
2894   void do_code_blob(CodeBlob* cb) {
2895     nmethod* nm = cb->as_nmethod_or_null();
2896     if (nm != NULL) {
2897       _oop_cl->set_nmethod(nm);
2898       nm->oops_do(_oop_cl);
2899     }
2900   }
2901 };
2902 
2903 class YoungRefCounterClosure : public OopClosure {
2904   G1CollectedHeap* _g1h;
2905   int              _count;
2906  public:
2907   YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
2908   void do_oop(oop* p)       { if (_g1h->is_in_young(*p)) { _count++; } }
2909   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2910 
2911   int count() { return _count; }
2912   void reset_count() { _count = 0; };
2913 };
2914 
2915 class VerifyKlassClosure: public KlassClosure {
2916   YoungRefCounterClosure _young_ref_counter_closure;
2917   OopClosure *_oop_closure;
2918  public:
2919   VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
2920   void do_klass(Klass* k) {
2921     k->oops_do(_oop_closure);
2922 
2923     _young_ref_counter_closure.reset_count();
2924     k->oops_do(&_young_ref_counter_closure);
2925     if (_young_ref_counter_closure.count() > 0) {
2926       guarantee(k->has_modified_oops(), err_msg("Klass " PTR_FORMAT ", has young refs but is not dirty.", k));
2927     }
2928   }
2929 };
2930 
2931 class VerifyLivenessOopClosure: public OopClosure {
2932   G1CollectedHeap* _g1h;
2933   VerifyOption _vo;
2934 public:
2935   VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
2936     _g1h(g1h), _vo(vo)
2937   { }
2938   void do_oop(narrowOop *p) { do_oop_work(p); }
2939   void do_oop(      oop *p) { do_oop_work(p); }
2940 
2941   template <class T> void do_oop_work(T *p) {
2942     oop obj = oopDesc::load_decode_heap_oop(p);
2943     guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
2944               "Dead object referenced by a not dead object");
2945   }
2946 };
2947 
2948 class VerifyObjsInRegionClosure: public ObjectClosure {
2949 private:
2950   G1CollectedHeap* _g1h;
2951   size_t _live_bytes;
2952   HeapRegion *_hr;
2953   VerifyOption _vo;
2954 public:
2955   // _vo == UsePrevMarking -> use "prev" marking information,
2956   // _vo == UseNextMarking -> use "next" marking information,
2957   // _vo == UseMarkWord    -> use mark word from object header.
2958   VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
2959     : _live_bytes(0), _hr(hr), _vo(vo) {
2960     _g1h = G1CollectedHeap::heap();
2961   }
2962   void do_object(oop o) {
2963     VerifyLivenessOopClosure isLive(_g1h, _vo);
2964     assert(o != NULL, "Huh?");
2965     if (!_g1h->is_obj_dead_cond(o, _vo)) {
2966       // If the object is alive according to the mark word,
2967       // then verify that the marking information agrees.
2968       // Note we can't verify the contra-positive of the
2969       // above: if the object is dead (according to the mark
2970       // word), it may not be marked, or may have been marked
2971       // but has since became dead, or may have been allocated
2972       // since the last marking.
2973       if (_vo == VerifyOption_G1UseMarkWord) {
2974         guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
2975       }
2976 
2977       o->oop_iterate_no_header(&isLive);
2978       if (!_hr->obj_allocated_since_prev_marking(o)) {
2979         size_t obj_size = o->size();    // Make sure we don't overflow
2980         _live_bytes += (obj_size * HeapWordSize);
2981       }
2982     }
2983   }
2984   size_t live_bytes() { return _live_bytes; }
2985 };
2986 
2987 class PrintObjsInRegionClosure : public ObjectClosure {
2988   HeapRegion *_hr;
2989   G1CollectedHeap *_g1;
2990 public:
2991   PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
2992     _g1 = G1CollectedHeap::heap();
2993   };
2994 
2995   void do_object(oop o) {
2996     if (o != NULL) {
2997       HeapWord *start = (HeapWord *) o;
2998       size_t word_sz = o->size();
2999       gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
3000                           " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
3001                           (void*) o, word_sz,
3002                           _g1->isMarkedPrev(o),
3003                           _g1->isMarkedNext(o),
3004                           _hr->obj_allocated_since_prev_marking(o));
3005       HeapWord *end = start + word_sz;
3006       HeapWord *cur;
3007       int *val;
3008       for (cur = start; cur < end; cur++) {
3009         val = (int *) cur;
3010         gclog_or_tty->print("\t "PTR_FORMAT":%d\n", val, *val);
3011       }
3012     }
3013   }
3014 };
3015 
3016 class VerifyRegionClosure: public HeapRegionClosure {
3017 private:
3018   bool             _par;
3019   VerifyOption     _vo;
3020   bool             _failures;
3021 public:
3022   // _vo == UsePrevMarking -> use "prev" marking information,
3023   // _vo == UseNextMarking -> use "next" marking information,
3024   // _vo == UseMarkWord    -> use mark word from object header.
3025   VerifyRegionClosure(bool par, VerifyOption vo)
3026     : _par(par),
3027       _vo(vo),
3028       _failures(false) {}
3029 
3030   bool failures() {
3031     return _failures;
3032   }
3033 
3034   bool doHeapRegion(HeapRegion* r) {
3035     if (!r->is_continues_humongous()) {
3036       bool failures = false;
3037       r->verify(_vo, &failures);
3038       if (failures) {
3039         _failures = true;
3040       } else {
3041         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
3042         r->object_iterate(&not_dead_yet_cl);
3043         if (_vo != VerifyOption_G1UseNextMarking) {
3044           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
3045             gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
3046                                    "max_live_bytes "SIZE_FORMAT" "
3047                                    "< calculated "SIZE_FORMAT,
3048                                    r->bottom(), r->end(),
3049                                    r->max_live_bytes(),
3050                                  not_dead_yet_cl.live_bytes());
3051             _failures = true;
3052           }
3053         } else {
3054           // When vo == UseNextMarking we cannot currently do a sanity
3055           // check on the live bytes as the calculation has not been
3056           // finalized yet.
3057         }
3058       }
3059     }
3060     return false; // stop the region iteration if we hit a failure
3061   }
3062 };
3063 
3064 // This is the task used for parallel verification of the heap regions
3065 
3066 class G1ParVerifyTask: public AbstractGangTask {
3067 private:
3068   G1CollectedHeap*  _g1h;
3069   VerifyOption      _vo;
3070   bool              _failures;
3071   HeapRegionClaimer _hrclaimer;
3072 
3073 public:
3074   // _vo == UsePrevMarking -> use "prev" marking information,
3075   // _vo == UseNextMarking -> use "next" marking information,
3076   // _vo == UseMarkWord    -> use mark word from object header.
3077   G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
3078       AbstractGangTask("Parallel verify task"),
3079       _g1h(g1h),
3080       _vo(vo),
3081       _failures(false),
3082       _hrclaimer(g1h->workers()->active_workers()) {}
3083 
3084   bool failures() {
3085     return _failures;
3086   }
3087 
3088   void work(uint worker_id) {
3089     HandleMark hm;
3090     VerifyRegionClosure blk(true, _vo);
3091     _g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer);
3092     if (blk.failures()) {
3093       _failures = true;
3094     }
3095   }
3096 };
3097 
3098 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
3099   if (SafepointSynchronize::is_at_safepoint()) {
3100     assert(Thread::current()->is_VM_thread(),
3101            "Expected to be executed serially by the VM thread at this point");
3102 
3103     if (!silent) { gclog_or_tty->print("Roots "); }
3104     VerifyRootsClosure rootsCl(vo);
3105     VerifyKlassClosure klassCl(this, &rootsCl);
3106     CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3107 
3108     // We apply the relevant closures to all the oops in the
3109     // system dictionary, class loader data graph, the string table
3110     // and the nmethods in the code cache.
3111     G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3112     G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3113 
3114     process_all_roots(true,            // activate StrongRootsScope
3115                       SO_AllCodeCache, // roots scanning options
3116                       &rootsCl,
3117                       &cldCl,
3118                       &blobsCl);
3119 
3120     bool failures = rootsCl.failures() || codeRootsCl.failures();
3121 
3122     if (vo != VerifyOption_G1UseMarkWord) {
3123       // If we're verifying during a full GC then the region sets
3124       // will have been torn down at the start of the GC. Therefore
3125       // verifying the region sets will fail. So we only verify
3126       // the region sets when not in a full GC.
3127       if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3128       verify_region_sets();
3129     }
3130 
3131     if (!silent) { gclog_or_tty->print("HeapRegions "); }
3132     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3133 
3134       G1ParVerifyTask task(this, vo);
3135       assert(UseDynamicNumberOfGCThreads ||
3136         workers()->active_workers() == workers()->total_workers(),
3137         "If not dynamic should be using all the workers");
3138       int n_workers = workers()->active_workers();
3139       set_par_threads(n_workers);
3140       workers()->run_task(&task);
3141       set_par_threads(0);
3142       if (task.failures()) {
3143         failures = true;
3144       }
3145 
3146     } else {
3147       VerifyRegionClosure blk(false, vo);
3148       heap_region_iterate(&blk);
3149       if (blk.failures()) {
3150         failures = true;
3151       }
3152     }
3153 
3154     if (G1StringDedup::is_enabled()) {
3155       if (!silent) gclog_or_tty->print("StrDedup ");
3156       G1StringDedup::verify();
3157     }
3158 
3159     if (failures) {
3160       gclog_or_tty->print_cr("Heap:");
3161       // It helps to have the per-region information in the output to
3162       // help us track down what went wrong. This is why we call
3163       // print_extended_on() instead of print_on().
3164       print_extended_on(gclog_or_tty);
3165       gclog_or_tty->cr();
3166 #ifndef PRODUCT
3167       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
3168         concurrent_mark()->print_reachable("at-verification-failure",
3169                                            vo, false /* all */);
3170       }
3171 #endif
3172       gclog_or_tty->flush();
3173     }
3174     guarantee(!failures, "there should not have been any failures");
3175   } else {
3176     if (!silent) {
3177       gclog_or_tty->print("(SKIPPING Roots, HeapRegionSets, HeapRegions, RemSet");
3178       if (G1StringDedup::is_enabled()) {
3179         gclog_or_tty->print(", StrDedup");
3180       }
3181       gclog_or_tty->print(") ");
3182     }
3183   }
3184 }
3185 
3186 void G1CollectedHeap::verify(bool silent) {
3187   verify(silent, VerifyOption_G1UsePrevMarking);
3188 }
3189 
3190 double G1CollectedHeap::verify(bool guard, const char* msg) {
3191   double verify_time_ms = 0.0;
3192 
3193   if (guard && total_collections() >= VerifyGCStartAt) {
3194     double verify_start = os::elapsedTime();
3195     HandleMark hm;  // Discard invalid handles created during verification
3196     prepare_for_verify();
3197     Universe::verify(VerifyOption_G1UsePrevMarking, msg);
3198     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
3199   }
3200 
3201   return verify_time_ms;
3202 }
3203 
3204 void G1CollectedHeap::verify_before_gc() {
3205   double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
3206   g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
3207 }
3208 
3209 void G1CollectedHeap::verify_after_gc() {
3210   double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
3211   g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
3212 }
3213 
3214 class PrintRegionClosure: public HeapRegionClosure {
3215   outputStream* _st;
3216 public:
3217   PrintRegionClosure(outputStream* st) : _st(st) {}
3218   bool doHeapRegion(HeapRegion* r) {
3219     r->print_on(_st);
3220     return false;
3221   }
3222 };
3223 
3224 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3225                                        const HeapRegion* hr,
3226                                        const VerifyOption vo) const {
3227   switch (vo) {
3228   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
3229   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
3230   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
3231   default:                            ShouldNotReachHere();
3232   }
3233   return false; // keep some compilers happy
3234 }
3235 
3236 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3237                                        const VerifyOption vo) const {
3238   switch (vo) {
3239   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
3240   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
3241   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
3242   default:                            ShouldNotReachHere();
3243   }
3244   return false; // keep some compilers happy
3245 }
3246 
3247 void G1CollectedHeap::print_on(outputStream* st) const {
3248   st->print(" %-20s", "garbage-first heap");
3249   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
3250             capacity()/K, used_unlocked()/K);
3251   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
3252             _hrm.reserved().start(),
3253             _hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords,
3254             _hrm.reserved().end());
3255   st->cr();
3256   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
3257   uint young_regions = _young_list->length();
3258   st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
3259             (size_t) young_regions * HeapRegion::GrainBytes / K);
3260   uint survivor_regions = g1_policy()->recorded_survivor_regions();
3261   st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
3262             (size_t) survivor_regions * HeapRegion::GrainBytes / K);
3263   st->cr();
3264   MetaspaceAux::print_on(st);
3265 }
3266 
3267 void G1CollectedHeap::print_extended_on(outputStream* st) const {
3268   print_on(st);
3269 
3270   // Print the per-region information.
3271   st->cr();
3272   st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
3273                "HS=humongous(starts), HC=humongous(continues), "
3274                "CS=collection set, F=free, TS=gc time stamp, "
3275                "PTAMS=previous top-at-mark-start, "
3276                "NTAMS=next top-at-mark-start)");
3277   PrintRegionClosure blk(st);
3278   heap_region_iterate(&blk);
3279 }
3280 
3281 void G1CollectedHeap::print_on_error(outputStream* st) const {
3282   this->CollectedHeap::print_on_error(st);
3283 
3284   if (_cm != NULL) {
3285     st->cr();
3286     _cm->print_on_error(st);
3287   }
3288 }
3289 
3290 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
3291   workers()->print_worker_threads_on(st);
3292   _cmThread->print_on(st);
3293   st->cr();
3294   _cm->print_worker_threads_on(st);
3295   _cg1r->print_worker_threads_on(st);
3296   if (G1StringDedup::is_enabled()) {
3297     G1StringDedup::print_worker_threads_on(st);
3298   }
3299 }
3300 
3301 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3302   workers()->threads_do(tc);
3303   tc->do_thread(_cmThread);
3304   _cg1r->threads_do(tc);
3305   if (G1StringDedup::is_enabled()) {
3306     G1StringDedup::threads_do(tc);
3307   }
3308 }
3309 
3310 void G1CollectedHeap::print_tracing_info() const {
3311   // We'll overload this to mean "trace GC pause statistics."
3312   if (TraceYoungGenTime || TraceOldGenTime) {
3313     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3314     // to that.
3315     g1_policy()->print_tracing_info();
3316   }
3317   if (G1SummarizeRSetStats) {
3318     g1_rem_set()->print_summary_info();
3319   }
3320   if (G1SummarizeConcMark) {
3321     concurrent_mark()->print_summary_info();
3322   }
3323   g1_policy()->print_yg_surv_rate_info();
3324   SpecializationStats::print();
3325 }
3326 
3327 #ifndef PRODUCT
3328 // Helpful for debugging RSet issues.
3329 
3330 class PrintRSetsClosure : public HeapRegionClosure {
3331 private:
3332   const char* _msg;
3333   size_t _occupied_sum;
3334 
3335 public:
3336   bool doHeapRegion(HeapRegion* r) {
3337     HeapRegionRemSet* hrrs = r->rem_set();
3338     size_t occupied = hrrs->occupied();
3339     _occupied_sum += occupied;
3340 
3341     gclog_or_tty->print_cr("Printing RSet for region "HR_FORMAT,
3342                            HR_FORMAT_PARAMS(r));
3343     if (occupied == 0) {
3344       gclog_or_tty->print_cr("  RSet is empty");
3345     } else {
3346       hrrs->print();
3347     }
3348     gclog_or_tty->print_cr("----------");
3349     return false;
3350   }
3351 
3352   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3353     gclog_or_tty->cr();
3354     gclog_or_tty->print_cr("========================================");
3355     gclog_or_tty->print_cr("%s", msg);
3356     gclog_or_tty->cr();
3357   }
3358 
3359   ~PrintRSetsClosure() {
3360     gclog_or_tty->print_cr("Occupied Sum: "SIZE_FORMAT, _occupied_sum);
3361     gclog_or_tty->print_cr("========================================");
3362     gclog_or_tty->cr();
3363   }
3364 };
3365 
3366 void G1CollectedHeap::print_cset_rsets() {
3367   PrintRSetsClosure cl("Printing CSet RSets");
3368   collection_set_iterate(&cl);
3369 }
3370 
3371 void G1CollectedHeap::print_all_rsets() {
3372   PrintRSetsClosure cl("Printing All RSets");;
3373   heap_region_iterate(&cl);
3374 }
3375 #endif // PRODUCT
3376 
3377 G1CollectedHeap* G1CollectedHeap::heap() {
3378   assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
3379          "not a garbage-first heap");
3380   return _g1h;
3381 }
3382 
3383 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3384   // always_do_update_barrier = false;
3385   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3386   // Fill TLAB's and such
3387   accumulate_statistics_all_tlabs();
3388   ensure_parsability(true);
3389 
3390   if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
3391       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3392     g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
3393   }
3394 }
3395 
3396 void G1CollectedHeap::gc_epilogue(bool full) {
3397 
3398   if (G1SummarizeRSetStats &&
3399       (G1SummarizeRSetStatsPeriod > 0) &&
3400       // we are at the end of the GC. Total collections has already been increased.
3401       ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3402     g1_rem_set()->print_periodic_summary_info("After GC RS summary");
3403   }
3404 
3405   // FIXME: what is this about?
3406   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3407   // is set.
3408   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3409                         "derived pointer present"));
3410   // always_do_update_barrier = true;
3411 
3412   resize_all_tlabs();
3413   allocation_context_stats().update(full);
3414 
3415   // We have just completed a GC. Update the soft reference
3416   // policy with the new heap occupancy
3417   Universe::update_heap_info_at_gc();
3418 }
3419 
3420 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3421                                                unsigned int gc_count_before,
3422                                                bool* succeeded,
3423                                                GCCause::Cause gc_cause) {
3424   assert_heap_not_locked_and_not_at_safepoint();
3425   g1_policy()->record_stop_world_start();
3426   VM_G1IncCollectionPause op(gc_count_before,
3427                              word_size,
3428                              false, /* should_initiate_conc_mark */
3429                              g1_policy()->max_pause_time_ms(),
3430                              gc_cause);
3431 
3432   op.set_allocation_context(AllocationContext::current());
3433   VMThread::execute(&op);
3434 
3435   HeapWord* result = op.result();
3436   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3437   assert(result == NULL || ret_succeeded,
3438          "the result should be NULL if the VM did not succeed");
3439   *succeeded = ret_succeeded;
3440 
3441   assert_heap_not_locked();
3442   return result;
3443 }
3444 
3445 void
3446 G1CollectedHeap::doConcurrentMark() {
3447   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
3448   if (!_cmThread->in_progress()) {
3449     _cmThread->set_started();
3450     CGC_lock->notify();
3451   }
3452 }
3453 
3454 size_t G1CollectedHeap::pending_card_num() {
3455   size_t extra_cards = 0;
3456   JavaThread *curr = Threads::first();
3457   while (curr != NULL) {
3458     DirtyCardQueue& dcq = curr->dirty_card_queue();
3459     extra_cards += dcq.size();
3460     curr = curr->next();
3461   }
3462   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3463   size_t buffer_size = dcqs.buffer_size();
3464   size_t buffer_num = dcqs.completed_buffers_num();
3465 
3466   // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
3467   // in bytes - not the number of 'entries'. We need to convert
3468   // into a number of cards.
3469   return (buffer_size * buffer_num + extra_cards) / oopSize;
3470 }
3471 
3472 size_t G1CollectedHeap::cards_scanned() {
3473   return g1_rem_set()->cardsScanned();
3474 }
3475 
3476 bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
3477   HeapRegion* region = region_at(index);
3478   assert(region->is_starts_humongous(), "Must start a humongous object");
3479   return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
3480 }
3481 
3482 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
3483  private:
3484   size_t _total_humongous;
3485   size_t _candidate_humongous;
3486 
3487   DirtyCardQueue _dcq;
3488 
3489   bool humongous_region_is_candidate(uint index) {
3490     HeapRegion* region = G1CollectedHeap::heap()->region_at(index);
3491     assert(region->is_starts_humongous(), "Must start a humongous object");
3492     HeapRegionRemSet* const rset = region->rem_set();
3493     bool const allow_stale_refs = G1EagerReclaimHumongousObjectsWithStaleRefs;
3494     return !oop(region->bottom())->is_objArray() &&
3495            ((allow_stale_refs && rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)) ||
3496             (!allow_stale_refs && rset->is_empty()));
3497   }
3498 
3499  public:
3500   RegisterHumongousWithInCSetFastTestClosure()
3501   : _total_humongous(0),
3502     _candidate_humongous(0),
3503     _dcq(&JavaThread::dirty_card_queue_set()) {
3504   }
3505 
3506   virtual bool doHeapRegion(HeapRegion* r) {
3507     if (!r->is_starts_humongous()) {
3508       return false;
3509     }
3510     G1CollectedHeap* g1h = G1CollectedHeap::heap();
3511 
3512     uint region_idx = r->hrm_index();
3513     bool is_candidate = humongous_region_is_candidate(region_idx);
3514     // Is_candidate already filters out humongous object with large remembered sets.
3515     // If we have a humongous object with a few remembered sets, we simply flush these
3516     // remembered set entries into the DCQS. That will result in automatic
3517     // re-evaluation of their remembered set entries during the following evacuation
3518     // phase.
3519     if (is_candidate) {
3520       if (!r->rem_set()->is_empty()) {
3521         guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
3522                   "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
3523         G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
3524         HeapRegionRemSetIterator hrrs(r->rem_set());
3525         size_t card_index;
3526         while (hrrs.has_next(card_index)) {
3527           jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index);
3528           if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
3529             *card_ptr = CardTableModRefBS::dirty_card_val();
3530             _dcq.enqueue(card_ptr);
3531           }
3532         }
3533         r->rem_set()->clear_locked();
3534       }
3535       assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
3536       g1h->register_humongous_region_with_in_cset_fast_test(region_idx);
3537       _candidate_humongous++;
3538     }
3539     _total_humongous++;
3540 
3541     return false;
3542   }
3543 
3544   size_t total_humongous() const { return _total_humongous; }
3545   size_t candidate_humongous() const { return _candidate_humongous; }
3546 
3547   void flush_rem_set_entries() { _dcq.flush(); }
3548 };
3549 
3550 void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
3551   if (!G1EagerReclaimHumongousObjects) {
3552     g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
3553     return;
3554   }
3555   double time = os::elapsed_counter();
3556 
3557   RegisterHumongousWithInCSetFastTestClosure cl;
3558   heap_region_iterate(&cl);
3559 
3560   time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
3561   g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
3562                                                                   cl.total_humongous(),
3563                                                                   cl.candidate_humongous());
3564   _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
3565 
3566   if (_has_humongous_reclaim_candidates || G1TraceEagerReclaimHumongousObjects) {
3567     clear_humongous_is_live_table();
3568   }
3569 
3570   // Finally flush all remembered set entries to re-check into the global DCQS.
3571   cl.flush_rem_set_entries();
3572 }
3573 
3574 void
3575 G1CollectedHeap::setup_surviving_young_words() {
3576   assert(_surviving_young_words == NULL, "pre-condition");
3577   uint array_length = g1_policy()->young_cset_region_length();
3578   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3579   if (_surviving_young_words == NULL) {
3580     vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
3581                           "Not enough space for young surv words summary.");
3582   }
3583   memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3584 #ifdef ASSERT
3585   for (uint i = 0;  i < array_length; ++i) {
3586     assert( _surviving_young_words[i] == 0, "memset above" );
3587   }
3588 #endif // !ASSERT
3589 }
3590 
3591 void
3592 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
3593   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
3594   uint array_length = g1_policy()->young_cset_region_length();
3595   for (uint i = 0; i < array_length; ++i) {
3596     _surviving_young_words[i] += surv_young_words[i];
3597   }
3598 }
3599 
3600 void
3601 G1CollectedHeap::cleanup_surviving_young_words() {
3602   guarantee( _surviving_young_words != NULL, "pre-condition" );
3603   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words);
3604   _surviving_young_words = NULL;
3605 }
3606 
3607 #ifdef ASSERT
3608 class VerifyCSetClosure: public HeapRegionClosure {
3609 public:
3610   bool doHeapRegion(HeapRegion* hr) {
3611     // Here we check that the CSet region's RSet is ready for parallel
3612     // iteration. The fields that we'll verify are only manipulated
3613     // when the region is part of a CSet and is collected. Afterwards,
3614     // we reset these fields when we clear the region's RSet (when the
3615     // region is freed) so they are ready when the region is
3616     // re-allocated. The only exception to this is if there's an
3617     // evacuation failure and instead of freeing the region we leave
3618     // it in the heap. In that case, we reset these fields during
3619     // evacuation failure handling.
3620     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3621 
3622     // Here's a good place to add any other checks we'd like to
3623     // perform on CSet regions.
3624     return false;
3625   }
3626 };
3627 #endif // ASSERT
3628 
3629 #if TASKQUEUE_STATS
3630 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3631   st->print_raw_cr("GC Task Stats");
3632   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3633   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3634 }
3635 
3636 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
3637   print_taskqueue_stats_hdr(st);
3638 
3639   TaskQueueStats totals;
3640   const int n = workers()->total_workers();
3641   for (int i = 0; i < n; ++i) {
3642     st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
3643     totals += task_queue(i)->stats;
3644   }
3645   st->print_raw("tot "); totals.print(st); st->cr();
3646 
3647   DEBUG_ONLY(totals.verify());
3648 }
3649 
3650 void G1CollectedHeap::reset_taskqueue_stats() {
3651   const int n = workers()->total_workers();
3652   for (int i = 0; i < n; ++i) {
3653     task_queue(i)->stats.reset();
3654   }
3655 }
3656 #endif // TASKQUEUE_STATS
3657 
3658 void G1CollectedHeap::log_gc_header() {
3659   if (!G1Log::fine()) {
3660     return;
3661   }
3662 
3663   gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
3664 
3665   GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3666     .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
3667     .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
3668 
3669   gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3670 }
3671 
3672 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3673   if (!G1Log::fine()) {
3674     return;
3675   }
3676 
3677   if (G1Log::finer()) {
3678     if (evacuation_failed()) {
3679       gclog_or_tty->print(" (to-space exhausted)");
3680     }
3681     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3682     g1_policy()->phase_times()->note_gc_end();
3683     g1_policy()->phase_times()->print(pause_time_sec);
3684     g1_policy()->print_detailed_heap_transition();
3685   } else {
3686     if (evacuation_failed()) {
3687       gclog_or_tty->print("--");
3688     }
3689     g1_policy()->print_heap_transition();
3690     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3691   }
3692   gclog_or_tty->flush();
3693 }
3694 
3695 bool
3696 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3697   assert_at_safepoint(true /* should_be_vm_thread */);
3698   guarantee(!is_gc_active(), "collection is not reentrant");
3699 
3700   if (GC_locker::check_active_before_gc()) {
3701     return false;
3702   }
3703 
3704   _gc_timer_stw->register_gc_start();
3705 
3706   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3707 
3708   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3709   ResourceMark rm;
3710 
3711   print_heap_before_gc();
3712   trace_heap_before_gc(_gc_tracer_stw);
3713 
3714   verify_region_sets_optional();
3715   verify_dirty_young_regions();
3716 
3717   // This call will decide whether this pause is an initial-mark
3718   // pause. If it is, during_initial_mark_pause() will return true
3719   // for the duration of this pause.
3720   g1_policy()->decide_on_conc_mark_initiation();
3721 
3722   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3723   assert(!g1_policy()->during_initial_mark_pause() ||
3724           g1_policy()->gcs_are_young(), "sanity");
3725 
3726   // We also do not allow mixed GCs during marking.
3727   assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
3728 
3729   // Record whether this pause is an initial mark. When the current
3730   // thread has completed its logging output and it's safe to signal
3731   // the CM thread, the flag's value in the policy has been reset.
3732   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3733 
3734   // Inner scope for scope based logging, timers, and stats collection
3735   {
3736     EvacuationInfo evacuation_info;
3737 
3738     if (g1_policy()->during_initial_mark_pause()) {
3739       // We are about to start a marking cycle, so we increment the
3740       // full collection counter.
3741       increment_old_marking_cycles_started();
3742       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3743     }
3744 
3745     _gc_tracer_stw->report_yc_type(yc_type());
3746 
3747     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3748 
3749     int active_workers = workers()->active_workers();
3750     double pause_start_sec = os::elapsedTime();
3751     g1_policy()->phase_times()->note_gc_start(active_workers);
3752     log_gc_header();
3753 
3754     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3755     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3756 
3757     // If the secondary_free_list is not empty, append it to the
3758     // free_list. No need to wait for the cleanup operation to finish;
3759     // the region allocation code will check the secondary_free_list
3760     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3761     // set, skip this step so that the region allocation code has to
3762     // get entries from the secondary_free_list.
3763     if (!G1StressConcRegionFreeing) {
3764       append_secondary_free_list_if_not_empty_with_lock();
3765     }
3766 
3767     assert(check_young_list_well_formed(), "young list should be well formed");
3768 
3769     // Don't dynamically change the number of GC threads this early.  A value of
3770     // 0 is used to indicate serial work.  When parallel work is done,
3771     // it will be set.
3772 
3773     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3774       IsGCActiveMark x;
3775 
3776       gc_prologue(false);
3777       increment_total_collections(false /* full gc */);
3778       increment_gc_time_stamp();
3779 
3780       verify_before_gc();
3781 
3782       check_bitmaps("GC Start");
3783 
3784       COMPILER2_PRESENT(DerivedPointerTable::clear());
3785 
3786       // Please see comment in g1CollectedHeap.hpp and
3787       // G1CollectedHeap::ref_processing_init() to see how
3788       // reference processing currently works in G1.
3789 
3790       // Enable discovery in the STW reference processor
3791       ref_processor_stw()->enable_discovery();
3792 
3793       {
3794         // We want to temporarily turn off discovery by the
3795         // CM ref processor, if necessary, and turn it back on
3796         // on again later if we do. Using a scoped
3797         // NoRefDiscovery object will do this.
3798         NoRefDiscovery no_cm_discovery(ref_processor_cm());
3799 
3800         // Forget the current alloc region (we might even choose it to be part
3801         // of the collection set!).
3802         _allocator->release_mutator_alloc_region();
3803 
3804         // We should call this after we retire the mutator alloc
3805         // region(s) so that all the ALLOC / RETIRE events are generated
3806         // before the start GC event.
3807         _hr_printer.start_gc(false /* full */, (size_t) total_collections());
3808 
3809         // This timing is only used by the ergonomics to handle our pause target.
3810         // It is unclear why this should not include the full pause. We will
3811         // investigate this in CR 7178365.
3812         //
3813         // Preserving the old comment here if that helps the investigation:
3814         //
3815         // The elapsed time induced by the start time below deliberately elides
3816         // the possible verification above.
3817         double sample_start_time_sec = os::elapsedTime();
3818 
3819 #if YOUNG_LIST_VERBOSE
3820         gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
3821         _young_list->print();
3822         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3823 #endif // YOUNG_LIST_VERBOSE
3824 
3825         g1_policy()->record_collection_pause_start(sample_start_time_sec);
3826 
3827         double scan_wait_start = os::elapsedTime();
3828         // We have to wait until the CM threads finish scanning the
3829         // root regions as it's the only way to ensure that all the
3830         // objects on them have been correctly scanned before we start
3831         // moving them during the GC.
3832         bool waited = _cm->root_regions()->wait_until_scan_finished();
3833         double wait_time_ms = 0.0;
3834         if (waited) {
3835           double scan_wait_end = os::elapsedTime();
3836           wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3837         }
3838         g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3839 
3840 #if YOUNG_LIST_VERBOSE
3841         gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
3842         _young_list->print();
3843 #endif // YOUNG_LIST_VERBOSE
3844 
3845         if (g1_policy()->during_initial_mark_pause()) {
3846           concurrent_mark()->checkpointRootsInitialPre();
3847         }
3848 
3849 #if YOUNG_LIST_VERBOSE
3850         gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
3851         _young_list->print();
3852         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3853 #endif // YOUNG_LIST_VERBOSE
3854 
3855         g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
3856 
3857         register_humongous_regions_with_in_cset_fast_test();
3858 
3859         assert(check_cset_fast_test(), "Inconsistency in the InCSetState table.");
3860 
3861         _cm->note_start_of_gc();
3862         // We should not verify the per-thread SATB buffers given that
3863         // we have not filtered them yet (we'll do so during the
3864         // GC). We also call this after finalize_cset() to
3865         // ensure that the CSet has been finalized.
3866         _cm->verify_no_cset_oops(true  /* verify_stacks */,
3867                                  true  /* verify_enqueued_buffers */,
3868                                  false /* verify_thread_buffers */,
3869                                  true  /* verify_fingers */);
3870 
3871         if (_hr_printer.is_active()) {
3872           HeapRegion* hr = g1_policy()->collection_set();
3873           while (hr != NULL) {
3874             _hr_printer.cset(hr);
3875             hr = hr->next_in_collection_set();
3876           }
3877         }
3878 
3879 #ifdef ASSERT
3880         VerifyCSetClosure cl;
3881         collection_set_iterate(&cl);
3882 #endif // ASSERT
3883 
3884         setup_surviving_young_words();
3885 
3886         // Initialize the GC alloc regions.
3887         _allocator->init_gc_alloc_regions(evacuation_info);
3888 
3889         // Actually do the work...
3890         evacuate_collection_set(evacuation_info);
3891 
3892         // We do this to mainly verify the per-thread SATB buffers
3893         // (which have been filtered by now) since we didn't verify
3894         // them earlier. No point in re-checking the stacks / enqueued
3895         // buffers given that the CSet has not changed since last time
3896         // we checked.
3897         _cm->verify_no_cset_oops(false /* verify_stacks */,
3898                                  false /* verify_enqueued_buffers */,
3899                                  true  /* verify_thread_buffers */,
3900                                  true  /* verify_fingers */);
3901 
3902         free_collection_set(g1_policy()->collection_set(), evacuation_info);
3903 
3904         eagerly_reclaim_humongous_regions();
3905 
3906         g1_policy()->clear_collection_set();
3907 
3908         cleanup_surviving_young_words();
3909 
3910         // Start a new incremental collection set for the next pause.
3911         g1_policy()->start_incremental_cset_building();
3912 
3913         clear_cset_fast_test();
3914 
3915         _young_list->reset_sampled_info();
3916 
3917         // Don't check the whole heap at this point as the
3918         // GC alloc regions from this pause have been tagged
3919         // as survivors and moved on to the survivor list.
3920         // Survivor regions will fail the !is_young() check.
3921         assert(check_young_list_empty(false /* check_heap */),
3922           "young list should be empty");
3923 
3924 #if YOUNG_LIST_VERBOSE
3925         gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
3926         _young_list->print();
3927 #endif // YOUNG_LIST_VERBOSE
3928 
3929         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
3930                                              _young_list->first_survivor_region(),
3931                                              _young_list->last_survivor_region());
3932 
3933         _young_list->reset_auxilary_lists();
3934 
3935         if (evacuation_failed()) {
3936           _allocator->set_used(recalculate_used());
3937           uint n_queues = MAX2((int)ParallelGCThreads, 1);
3938           for (uint i = 0; i < n_queues; i++) {
3939             if (_evacuation_failed_info_array[i].has_failed()) {
3940               _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
3941             }
3942           }
3943         } else {
3944           // The "used" of the the collection set have already been subtracted
3945           // when they were freed.  Add in the bytes evacuated.
3946           _allocator->increase_used(g1_policy()->bytes_copied_during_gc());
3947         }
3948 
3949         if (g1_policy()->during_initial_mark_pause()) {
3950           // We have to do this before we notify the CM threads that
3951           // they can start working to make sure that all the
3952           // appropriate initialization is done on the CM object.
3953           concurrent_mark()->checkpointRootsInitialPost();
3954           set_marking_started();
3955           // Note that we don't actually trigger the CM thread at
3956           // this point. We do that later when we're sure that
3957           // the current thread has completed its logging output.
3958         }
3959 
3960         allocate_dummy_regions();
3961 
3962 #if YOUNG_LIST_VERBOSE
3963         gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
3964         _young_list->print();
3965         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3966 #endif // YOUNG_LIST_VERBOSE
3967 
3968         _allocator->init_mutator_alloc_region();
3969 
3970         {
3971           size_t expand_bytes = g1_policy()->expansion_amount();
3972           if (expand_bytes > 0) {
3973             size_t bytes_before = capacity();
3974             // No need for an ergo verbose message here,
3975             // expansion_amount() does this when it returns a value > 0.
3976             if (!expand(expand_bytes)) {
3977               // We failed to expand the heap. Cannot do anything about it.
3978             }
3979           }
3980         }
3981 
3982         // We redo the verification but now wrt to the new CSet which
3983         // has just got initialized after the previous CSet was freed.
3984         _cm->verify_no_cset_oops(true  /* verify_stacks */,
3985                                  true  /* verify_enqueued_buffers */,
3986                                  true  /* verify_thread_buffers */,
3987                                  true  /* verify_fingers */);
3988         _cm->note_end_of_gc();
3989 
3990         // This timing is only used by the ergonomics to handle our pause target.
3991         // It is unclear why this should not include the full pause. We will
3992         // investigate this in CR 7178365.
3993         double sample_end_time_sec = os::elapsedTime();
3994         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3995         g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info);
3996 
3997         MemoryService::track_memory_usage();
3998 
3999         // In prepare_for_verify() below we'll need to scan the deferred
4000         // update buffers to bring the RSets up-to-date if
4001         // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
4002         // the update buffers we'll probably need to scan cards on the
4003         // regions we just allocated to (i.e., the GC alloc
4004         // regions). However, during the last GC we called
4005         // set_saved_mark() on all the GC alloc regions, so card
4006         // scanning might skip the [saved_mark_word()...top()] area of
4007         // those regions (i.e., the area we allocated objects into
4008         // during the last GC). But it shouldn't. Given that
4009         // saved_mark_word() is conditional on whether the GC time stamp
4010         // on the region is current or not, by incrementing the GC time
4011         // stamp here we invalidate all the GC time stamps on all the
4012         // regions and saved_mark_word() will simply return top() for
4013         // all the regions. This is a nicer way of ensuring this rather
4014         // than iterating over the regions and fixing them. In fact, the
4015         // GC time stamp increment here also ensures that
4016         // saved_mark_word() will return top() between pauses, i.e.,
4017         // during concurrent refinement. So we don't need the
4018         // is_gc_active() check to decided which top to use when
4019         // scanning cards (see CR 7039627).
4020         increment_gc_time_stamp();
4021 
4022         verify_after_gc();
4023         check_bitmaps("GC End");
4024 
4025         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
4026         ref_processor_stw()->verify_no_references_recorded();
4027 
4028         // CM reference discovery will be re-enabled if necessary.
4029       }
4030 
4031       // We should do this after we potentially expand the heap so
4032       // that all the COMMIT events are generated before the end GC
4033       // event, and after we retire the GC alloc regions so that all
4034       // RETIRE events are generated before the end GC event.
4035       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4036 
4037 #ifdef TRACESPINNING
4038       ParallelTaskTerminator::print_termination_counts();
4039 #endif
4040 
4041       gc_epilogue(false);
4042     }
4043 
4044     // Print the remainder of the GC log output.
4045     log_gc_footer(os::elapsedTime() - pause_start_sec);
4046 
4047     // It is not yet to safe to tell the concurrent mark to
4048     // start as we have some optional output below. We don't want the
4049     // output from the concurrent mark thread interfering with this
4050     // logging output either.
4051 
4052     _hrm.verify_optional();
4053     verify_region_sets_optional();
4054 
4055     TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats());
4056     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4057 
4058     print_heap_after_gc();
4059     trace_heap_after_gc(_gc_tracer_stw);
4060 
4061     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4062     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4063     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4064     // before any GC notifications are raised.
4065     g1mm()->update_sizes();
4066 
4067     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
4068     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
4069     _gc_timer_stw->register_gc_end();
4070     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4071   }
4072   // It should now be safe to tell the concurrent mark thread to start
4073   // without its logging output interfering with the logging output
4074   // that came from the pause.
4075 
4076   if (should_start_conc_mark) {
4077     // CAUTION: after the doConcurrentMark() call below,
4078     // the concurrent marking thread(s) could be running
4079     // concurrently with us. Make sure that anything after
4080     // this point does not assume that we are the only GC thread
4081     // running. Note: of course, the actual marking work will
4082     // not start until the safepoint itself is released in
4083     // SuspendibleThreadSet::desynchronize().
4084     doConcurrentMark();
4085   }
4086 
4087   return true;
4088 }
4089 
4090 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
4091   _drain_in_progress = false;
4092   set_evac_failure_closure(cl);
4093   _evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
4094 }
4095 
4096 void G1CollectedHeap::finalize_for_evac_failure() {
4097   assert(_evac_failure_scan_stack != NULL &&
4098          _evac_failure_scan_stack->length() == 0,
4099          "Postcondition");
4100   assert(!_drain_in_progress, "Postcondition");
4101   delete _evac_failure_scan_stack;
4102   _evac_failure_scan_stack = NULL;
4103 }
4104 
4105 void G1CollectedHeap::remove_self_forwarding_pointers() {
4106   double remove_self_forwards_start = os::elapsedTime();
4107 
4108   set_par_threads();
4109   G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
4110   workers()->run_task(&rsfp_task);
4111   set_par_threads(0);
4112 
4113   // Now restore saved marks, if any.
4114   assert(_objs_with_preserved_marks.size() ==
4115             _preserved_marks_of_objs.size(), "Both or none.");
4116   while (!_objs_with_preserved_marks.is_empty()) {
4117     oop obj = _objs_with_preserved_marks.pop();
4118     markOop m = _preserved_marks_of_objs.pop();
4119     obj->set_mark(m);
4120   }
4121   _objs_with_preserved_marks.clear(true);
4122   _preserved_marks_of_objs.clear(true);
4123 
4124   g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
4125 }
4126 
4127 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
4128   _evac_failure_scan_stack->push(obj);
4129 }
4130 
4131 void G1CollectedHeap::drain_evac_failure_scan_stack() {
4132   assert(_evac_failure_scan_stack != NULL, "precondition");
4133 
4134   while (_evac_failure_scan_stack->length() > 0) {
4135      oop obj = _evac_failure_scan_stack->pop();
4136      _evac_failure_closure->set_region(heap_region_containing(obj));
4137      obj->oop_iterate_backwards(_evac_failure_closure);
4138   }
4139 }
4140 
4141 oop
4142 G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state,
4143                                                oop old) {
4144   assert(obj_in_cs(old),
4145          err_msg("obj: "PTR_FORMAT" should still be in the CSet",
4146                  (HeapWord*) old));
4147   markOop m = old->mark();
4148   oop forward_ptr = old->forward_to_atomic(old);
4149   if (forward_ptr == NULL) {
4150     // Forward-to-self succeeded.
4151     assert(_par_scan_state != NULL, "par scan state");
4152     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
4153     uint queue_num = _par_scan_state->queue_num();
4154 
4155     _evacuation_failed = true;
4156     _evacuation_failed_info_array[queue_num].register_copy_failure(old->size());
4157     if (_evac_failure_closure != cl) {
4158       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
4159       assert(!_drain_in_progress,
4160              "Should only be true while someone holds the lock.");
4161       // Set the global evac-failure closure to the current thread's.
4162       assert(_evac_failure_closure == NULL, "Or locking has failed.");
4163       set_evac_failure_closure(cl);
4164       // Now do the common part.
4165       handle_evacuation_failure_common(old, m);
4166       // Reset to NULL.
4167       set_evac_failure_closure(NULL);
4168     } else {
4169       // The lock is already held, and this is recursive.
4170       assert(_drain_in_progress, "This should only be the recursive case.");
4171       handle_evacuation_failure_common(old, m);
4172     }
4173     return old;
4174   } else {
4175     // Forward-to-self failed. Either someone else managed to allocate
4176     // space for this object (old != forward_ptr) or they beat us in
4177     // self-forwarding it (old == forward_ptr).
4178     assert(old == forward_ptr || !obj_in_cs(forward_ptr),
4179            err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" "
4180                    "should not be in the CSet",
4181                    (HeapWord*) old, (HeapWord*) forward_ptr));
4182     return forward_ptr;
4183   }
4184 }
4185 
4186 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
4187   preserve_mark_if_necessary(old, m);
4188 
4189   HeapRegion* r = heap_region_containing(old);
4190   if (!r->evacuation_failed()) {
4191     r->set_evacuation_failed(true);
4192     _hr_printer.evac_failure(r);
4193   }
4194 
4195   push_on_evac_failure_scan_stack(old);
4196 
4197   if (!_drain_in_progress) {
4198     // prevent recursion in copy_to_survivor_space()
4199     _drain_in_progress = true;
4200     drain_evac_failure_scan_stack();
4201     _drain_in_progress = false;
4202   }
4203 }
4204 
4205 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
4206   assert(evacuation_failed(), "Oversaving!");
4207   // We want to call the "for_promotion_failure" version only in the
4208   // case of a promotion failure.
4209   if (m->must_be_preserved_for_promotion_failure(obj)) {
4210     _objs_with_preserved_marks.push(obj);
4211     _preserved_marks_of_objs.push(m);
4212   }
4213 }
4214 
4215 void G1ParCopyHelper::mark_object(oop obj) {
4216   assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
4217 
4218   // We know that the object is not moving so it's safe to read its size.
4219   _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4220 }
4221 
4222 void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
4223   assert(from_obj->is_forwarded(), "from obj should be forwarded");
4224   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4225   assert(from_obj != to_obj, "should not be self-forwarded");
4226 
4227   assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
4228   assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
4229 
4230   // The object might be in the process of being copied by another
4231   // worker so we cannot trust that its to-space image is
4232   // well-formed. So we have to read its size from its from-space
4233   // image which we know should not be changing.
4234   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4235 }
4236 
4237 template <class T>
4238 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4239   if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4240     _scanned_klass->record_modified_oops();
4241   }
4242 }
4243 
4244 template <G1Barrier barrier, G1Mark do_mark_object>
4245 template <class T>
4246 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4247   T heap_oop = oopDesc::load_heap_oop(p);
4248 
4249   if (oopDesc::is_null(heap_oop)) {
4250     return;
4251   }
4252 
4253   oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4254 
4255   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4256 
4257   const InCSetState state = _g1->in_cset_state(obj);
4258   if (state.is_in_cset()) {
4259     oop forwardee;
4260     markOop m = obj->mark();
4261     if (m->is_marked()) {
4262       forwardee = (oop) m->decode_pointer();
4263     } else {
4264       forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
4265     }
4266     assert(forwardee != NULL, "forwardee should not be NULL");
4267     oopDesc::encode_store_heap_oop(p, forwardee);
4268     if (do_mark_object != G1MarkNone && forwardee != obj) {
4269       // If the object is self-forwarded we don't need to explicitly
4270       // mark it, the evacuation failure protocol will do so.
4271       mark_forwarded_object(obj, forwardee);
4272     }
4273 
4274     if (barrier == G1BarrierKlass) {
4275       do_klass_barrier(p, forwardee);
4276     }
4277   } else {
4278     if (state.is_humongous()) {
4279       _g1->set_humongous_is_live(obj);
4280     }
4281     // The object is not in collection set. If we're a root scanning
4282     // closure during an initial mark pause then attempt to mark the object.
4283     if (do_mark_object == G1MarkFromRoot) {
4284       mark_object(obj);
4285     }
4286   }
4287 
4288   if (barrier == G1BarrierEvac) {
4289     _par_scan_state->update_rs(_from, p, _worker_id);
4290   }
4291 }
4292 
4293 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
4294 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
4295 
4296 class G1ParEvacuateFollowersClosure : public VoidClosure {
4297 protected:
4298   G1CollectedHeap*              _g1h;
4299   G1ParScanThreadState*         _par_scan_state;
4300   RefToScanQueueSet*            _queues;
4301   ParallelTaskTerminator*       _terminator;
4302 
4303   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
4304   RefToScanQueueSet*      queues()         { return _queues; }
4305   ParallelTaskTerminator* terminator()     { return _terminator; }
4306 
4307 public:
4308   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
4309                                 G1ParScanThreadState* par_scan_state,
4310                                 RefToScanQueueSet* queues,
4311                                 ParallelTaskTerminator* terminator)
4312     : _g1h(g1h), _par_scan_state(par_scan_state),
4313       _queues(queues), _terminator(terminator) {}
4314 
4315   void do_void();
4316 
4317 private:
4318   inline bool offer_termination();
4319 };
4320 
4321 bool G1ParEvacuateFollowersClosure::offer_termination() {
4322   G1ParScanThreadState* const pss = par_scan_state();
4323   pss->start_term_time();
4324   const bool res = terminator()->offer_termination();
4325   pss->end_term_time();
4326   return res;
4327 }
4328 
4329 void G1ParEvacuateFollowersClosure::do_void() {
4330   G1ParScanThreadState* const pss = par_scan_state();
4331   pss->trim_queue();
4332   do {
4333     pss->steal_and_trim_queue(queues());
4334   } while (!offer_termination());
4335 }
4336 
4337 class G1KlassScanClosure : public KlassClosure {
4338  G1ParCopyHelper* _closure;
4339  bool             _process_only_dirty;
4340  int              _count;
4341  public:
4342   G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
4343       : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
4344   void do_klass(Klass* klass) {
4345     // If the klass has not been dirtied we know that there's
4346     // no references into  the young gen and we can skip it.
4347    if (!_process_only_dirty || klass->has_modified_oops()) {
4348       // Clean the klass since we're going to scavenge all the metadata.
4349       klass->clear_modified_oops();
4350 
4351       // Tell the closure that this klass is the Klass to scavenge
4352       // and is the one to dirty if oops are left pointing into the young gen.
4353       _closure->set_scanned_klass(klass);
4354 
4355       klass->oops_do(_closure);
4356 
4357       _closure->set_scanned_klass(NULL);
4358     }
4359     _count++;
4360   }
4361 };
4362 
4363 class G1CodeBlobClosure : public CodeBlobClosure {
4364   class HeapRegionGatheringOopClosure : public OopClosure {
4365     G1CollectedHeap* _g1h;
4366     OopClosure* _work;
4367     nmethod* _nm;
4368 
4369     template <typename T>
4370     void do_oop_work(T* p) {
4371       _work->do_oop(p);
4372       T oop_or_narrowoop = oopDesc::load_heap_oop(p);
4373       if (!oopDesc::is_null(oop_or_narrowoop)) {
4374         oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
4375         HeapRegion* hr = _g1h->heap_region_containing_raw(o);
4376         assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in CS then evacuation failed and nm must already be in the remset");
4377         hr->add_strong_code_root(_nm);
4378       }
4379     }
4380 
4381   public:
4382     HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
4383 
4384     void do_oop(oop* o) {
4385       do_oop_work(o);
4386     }
4387 
4388     void do_oop(narrowOop* o) {
4389       do_oop_work(o);
4390     }
4391 
4392     void set_nm(nmethod* nm) {
4393       _nm = nm;
4394     }
4395   };
4396 
4397   HeapRegionGatheringOopClosure _oc;
4398 public:
4399   G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
4400 
4401   void do_code_blob(CodeBlob* cb) {
4402     nmethod* nm = cb->as_nmethod_or_null();
4403     if (nm != NULL) {
4404       if (!nm->test_set_oops_do_mark()) {
4405         _oc.set_nm(nm);
4406         nm->oops_do(&_oc);
4407         nm->fix_oop_relocations();
4408       }
4409     }
4410   }
4411 };
4412 
4413 class G1ParTask : public AbstractGangTask {
4414 protected:
4415   G1CollectedHeap*       _g1h;
4416   RefToScanQueueSet      *_queues;
4417   ParallelTaskTerminator _terminator;
4418   uint _n_workers;
4419 
4420   Mutex _stats_lock;
4421   Mutex* stats_lock() { return &_stats_lock; }
4422 
4423 public:
4424   G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues)
4425     : AbstractGangTask("G1 collection"),
4426       _g1h(g1h),
4427       _queues(task_queues),
4428       _terminator(0, _queues),
4429       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4430   {}
4431 
4432   RefToScanQueueSet* queues() { return _queues; }
4433 
4434   RefToScanQueue *work_queue(int i) {
4435     return queues()->queue(i);
4436   }
4437 
4438   ParallelTaskTerminator* terminator() { return &_terminator; }
4439 
4440   virtual void set_for_termination(int active_workers) {
4441     // This task calls set_n_termination() in par_non_clean_card_iterate_work()
4442     // in the young space (_par_seq_tasks) in the G1 heap
4443     // for SequentialSubTasksDone.
4444     // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
4445     // both of which need setting by set_n_termination().
4446     _g1h->SharedHeap::set_n_termination(active_workers);
4447     _g1h->set_n_termination(active_workers);
4448     terminator()->reset_for_reuse(active_workers);
4449     _n_workers = active_workers;
4450   }
4451 
4452   // Helps out with CLD processing.
4453   //
4454   // During InitialMark we need to:
4455   // 1) Scavenge all CLDs for the young GC.
4456   // 2) Mark all objects directly reachable from strong CLDs.
4457   template <G1Mark do_mark_object>
4458   class G1CLDClosure : public CLDClosure {
4459     G1ParCopyClosure<G1BarrierNone,  do_mark_object>* _oop_closure;
4460     G1ParCopyClosure<G1BarrierKlass, do_mark_object>  _oop_in_klass_closure;
4461     G1KlassScanClosure                                _klass_in_cld_closure;
4462     bool                                              _claim;
4463 
4464    public:
4465     G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
4466                  bool only_young, bool claim)
4467         : _oop_closure(oop_closure),
4468           _oop_in_klass_closure(oop_closure->g1(),
4469                                 oop_closure->pss(),
4470                                 oop_closure->rp()),
4471           _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
4472           _claim(claim) {
4473 
4474     }
4475 
4476     void do_cld(ClassLoaderData* cld) {
4477       cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
4478     }
4479   };
4480 
4481   void work(uint worker_id) {
4482     if (worker_id >= _n_workers) return;  // no work needed this round
4483 
4484     double start_time_ms = os::elapsedTime() * 1000.0;
4485     _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
4486 
4487     {
4488       ResourceMark rm;
4489       HandleMark   hm;
4490 
4491       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
4492 
4493       G1ParScanThreadState            pss(_g1h, worker_id, rp);
4494       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4495 
4496       pss.set_evac_failure_closure(&evac_failure_cl);
4497 
4498       bool only_young = _g1h->g1_policy()->gcs_are_young();
4499 
4500       // Non-IM young GC.
4501       G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, &pss, rp);
4502       G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
4503                                                                                only_young, // Only process dirty klasses.
4504                                                                                false);     // No need to claim CLDs.
4505       // IM young GC.
4506       //    Strong roots closures.
4507       G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot>         scan_mark_root_cl(_g1h, &pss, rp);
4508       G1CLDClosure<G1MarkFromRoot>                            scan_mark_cld_cl(&scan_mark_root_cl,
4509                                                                                false, // Process all klasses.
4510                                                                                true); // Need to claim CLDs.
4511       //    Weak roots closures.
4512       G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
4513       G1CLDClosure<G1MarkPromotedFromRoot>                    scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
4514                                                                                     false, // Process all klasses.
4515                                                                                     true); // Need to claim CLDs.
4516 
4517       G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl);
4518       G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl);
4519       // IM Weak code roots are handled later.
4520 
4521       OopClosure* strong_root_cl;
4522       OopClosure* weak_root_cl;
4523       CLDClosure* strong_cld_cl;
4524       CLDClosure* weak_cld_cl;
4525       CodeBlobClosure* strong_code_cl;
4526 
4527       if (_g1h->g1_policy()->during_initial_mark_pause()) {
4528         // We also need to mark copied objects.
4529         strong_root_cl = &scan_mark_root_cl;
4530         strong_cld_cl  = &scan_mark_cld_cl;
4531         strong_code_cl = &scan_mark_code_cl;
4532         if (ClassUnloadingWithConcurrentMark) {
4533           weak_root_cl = &scan_mark_weak_root_cl;
4534           weak_cld_cl  = &scan_mark_weak_cld_cl;
4535         } else {
4536           weak_root_cl = &scan_mark_root_cl;
4537           weak_cld_cl  = &scan_mark_cld_cl;
4538         }
4539       } else {
4540         strong_root_cl = &scan_only_root_cl;
4541         weak_root_cl   = &scan_only_root_cl;
4542         strong_cld_cl  = &scan_only_cld_cl;
4543         weak_cld_cl    = &scan_only_cld_cl;
4544         strong_code_cl = &scan_only_code_cl;
4545       }
4546 
4547 
4548       G1ParPushHeapRSClosure  push_heap_rs_cl(_g1h, &pss);
4549 
4550       pss.start_strong_roots();
4551       _g1h->g1_process_roots(strong_root_cl,
4552                              weak_root_cl,
4553                              &push_heap_rs_cl,
4554                              strong_cld_cl,
4555                              weak_cld_cl,
4556                              strong_code_cl,
4557                              worker_id);
4558 
4559       pss.end_strong_roots();
4560 
4561       {
4562         double start = os::elapsedTime();
4563         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4564         evac.do_void();
4565         double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4566         double term_ms = pss.term_time()*1000.0;
4567         _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
4568         _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
4569       }
4570       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4571       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4572 
4573       if (PrintTerminationStats) {
4574         MutexLocker x(stats_lock());
4575         pss.print_termination_stats(worker_id);
4576       }
4577 
4578       assert(pss.queue_is_empty(), "should be empty");
4579 
4580       // Close the inner scope so that the ResourceMark and HandleMark
4581       // destructors are executed here and are included as part of the
4582       // "GC Worker Time".
4583     }
4584 
4585     double end_time_ms = os::elapsedTime() * 1000.0;
4586     _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
4587   }
4588 };
4589 
4590 // *** Common G1 Evacuation Stuff
4591 
4592 // This method is run in a GC worker.
4593 
4594 void
4595 G1CollectedHeap::
4596 g1_process_roots(OopClosure* scan_non_heap_roots,
4597                  OopClosure* scan_non_heap_weak_roots,
4598                  G1ParPushHeapRSClosure* scan_rs,
4599                  CLDClosure* scan_strong_clds,
4600                  CLDClosure* scan_weak_clds,
4601                  CodeBlobClosure* scan_strong_code,
4602                  uint worker_i) {
4603 
4604   // First scan the shared roots.
4605   double ext_roots_start = os::elapsedTime();
4606   double closure_app_time_sec = 0.0;
4607 
4608   bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
4609   bool trace_metadata = during_im && ClassUnloadingWithConcurrentMark;
4610 
4611   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
4612   BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
4613 
4614   process_roots(false, // no scoping; this is parallel code
4615                 SharedHeap::SO_None,
4616                 &buf_scan_non_heap_roots,
4617                 &buf_scan_non_heap_weak_roots,
4618                 scan_strong_clds,
4619                 // Unloading Initial Marks handle the weak CLDs separately.
4620                 (trace_metadata ? NULL : scan_weak_clds),
4621                 scan_strong_code);
4622 
4623   // Now the CM ref_processor roots.
4624   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4625     // We need to treat the discovered reference lists of the
4626     // concurrent mark ref processor as roots and keep entries
4627     // (which are added by the marking threads) on them live
4628     // until they can be processed at the end of marking.
4629     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
4630   }
4631 
4632   if (trace_metadata) {
4633     // Barrier to make sure all workers passed
4634     // the strong CLD and strong nmethods phases.
4635     active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
4636 
4637     // Now take the complement of the strong CLDs.
4638     ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
4639   }
4640 
4641   // Finish up any enqueued closure apps (attributed as object copy time).
4642   buf_scan_non_heap_roots.done();
4643   buf_scan_non_heap_weak_roots.done();
4644 
4645   double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
4646       + buf_scan_non_heap_weak_roots.closure_app_seconds();
4647 
4648   g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
4649 
4650   double ext_root_time_ms =
4651     ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
4652 
4653   g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
4654 
4655   // During conc marking we have to filter the per-thread SATB buffers
4656   // to make sure we remove any oops into the CSet (which will show up
4657   // as implicitly live).
4658   double satb_filtering_ms = 0.0;
4659   if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
4660     if (mark_in_progress()) {
4661       double satb_filter_start = os::elapsedTime();
4662 
4663       JavaThread::satb_mark_queue_set().filter_thread_buffers();
4664 
4665       satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
4666     }
4667   }
4668   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
4669 
4670   // Now scan the complement of the collection set.
4671   G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
4672 
4673   g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
4674 
4675   _process_strong_tasks->all_tasks_completed();
4676 }
4677 
4678 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4679 private:
4680   BoolObjectClosure* _is_alive;
4681   int _initial_string_table_size;
4682   int _initial_symbol_table_size;
4683 
4684   bool  _process_strings;
4685   int _strings_processed;
4686   int _strings_removed;
4687 
4688   bool  _process_symbols;
4689   int _symbols_processed;
4690   int _symbols_removed;
4691 
4692 public:
4693   G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
4694     AbstractGangTask("String/Symbol Unlinking"),
4695     _is_alive(is_alive),
4696     _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
4697     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
4698 
4699     _initial_string_table_size = StringTable::the_table()->table_size();
4700     _initial_symbol_table_size = SymbolTable::the_table()->table_size();
4701     if (process_strings) {
4702       StringTable::clear_parallel_claimed_index();
4703     }
4704     if (process_symbols) {
4705       SymbolTable::clear_parallel_claimed_index();
4706     }
4707   }
4708 
4709   ~G1StringSymbolTableUnlinkTask() {
4710     guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
4711               err_msg("claim value %d after unlink less than initial string table size %d",
4712                       StringTable::parallel_claimed_index(), _initial_string_table_size));
4713     guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
4714               err_msg("claim value %d after unlink less than initial symbol table size %d",
4715                       SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
4716 
4717     if (G1TraceStringSymbolTableScrubbing) {
4718       gclog_or_tty->print_cr("Cleaned string and symbol table, "
4719                              "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
4720                              "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
4721                              strings_processed(), strings_removed(),
4722                              symbols_processed(), symbols_removed());
4723     }
4724   }
4725 
4726   void work(uint worker_id) {
4727     int strings_processed = 0;
4728     int strings_removed = 0;
4729     int symbols_processed = 0;
4730     int symbols_removed = 0;
4731     if (_process_strings) {
4732       StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
4733       Atomic::add(strings_processed, &_strings_processed);
4734       Atomic::add(strings_removed, &_strings_removed);
4735     }
4736     if (_process_symbols) {
4737       SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
4738       Atomic::add(symbols_processed, &_symbols_processed);
4739       Atomic::add(symbols_removed, &_symbols_removed);
4740     }
4741   }
4742 
4743   size_t strings_processed() const { return (size_t)_strings_processed; }
4744   size_t strings_removed()   const { return (size_t)_strings_removed; }
4745 
4746   size_t symbols_processed() const { return (size_t)_symbols_processed; }
4747   size_t symbols_removed()   const { return (size_t)_symbols_removed; }
4748 };
4749 
4750 class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {
4751 private:
4752   static Monitor* _lock;
4753 
4754   BoolObjectClosure* const _is_alive;
4755   const bool               _unloading_occurred;
4756   const uint               _num_workers;
4757 
4758   // Variables used to claim nmethods.
4759   nmethod* _first_nmethod;
4760   volatile nmethod* _claimed_nmethod;
4761 
4762   // The list of nmethods that need to be processed by the second pass.
4763   volatile nmethod* _postponed_list;
4764   volatile uint     _num_entered_barrier;
4765 
4766  public:
4767   G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
4768       _is_alive(is_alive),
4769       _unloading_occurred(unloading_occurred),
4770       _num_workers(num_workers),
4771       _first_nmethod(NULL),
4772       _claimed_nmethod(NULL),
4773       _postponed_list(NULL),
4774       _num_entered_barrier(0)
4775   {
4776     nmethod::increase_unloading_clock();
4777     // Get first alive nmethod
4778     NMethodIterator iter = NMethodIterator();
4779     if(iter.next_alive()) {
4780       _first_nmethod = iter.method();
4781     }
4782     _claimed_nmethod = (volatile nmethod*)_first_nmethod;
4783   }
4784 
4785   ~G1CodeCacheUnloadingTask() {
4786     CodeCache::verify_clean_inline_caches();
4787 
4788     CodeCache::set_needs_cache_clean(false);
4789     guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
4790 
4791     CodeCache::verify_icholder_relocations();
4792   }
4793 
4794  private:
4795   void add_to_postponed_list(nmethod* nm) {
4796       nmethod* old;
4797       do {
4798         old = (nmethod*)_postponed_list;
4799         nm->set_unloading_next(old);
4800       } while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
4801   }
4802 
4803   void clean_nmethod(nmethod* nm) {
4804     bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
4805 
4806     if (postponed) {
4807       // This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
4808       add_to_postponed_list(nm);
4809     }
4810 
4811     // Mark that this thread has been cleaned/unloaded.
4812     // After this call, it will be safe to ask if this nmethod was unloaded or not.
4813     nm->set_unloading_clock(nmethod::global_unloading_clock());
4814   }
4815 
4816   void clean_nmethod_postponed(nmethod* nm) {
4817     nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
4818   }
4819 
4820   static const int MaxClaimNmethods = 16;
4821 
4822   void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
4823     nmethod* first;
4824     NMethodIterator last;
4825 
4826     do {
4827       *num_claimed_nmethods = 0;
4828 
4829       first = (nmethod*)_claimed_nmethod;
4830       last = NMethodIterator(first);
4831 
4832       if (first != NULL) {
4833 
4834         for (int i = 0; i < MaxClaimNmethods; i++) {
4835           if (!last.next_alive()) {
4836             break;
4837           }
4838           claimed_nmethods[i] = last.method();
4839           (*num_claimed_nmethods)++;
4840         }
4841       }
4842 
4843     } while ((nmethod*)Atomic::cmpxchg_ptr(last.method(), &_claimed_nmethod, first) != first);
4844   }
4845 
4846   nmethod* claim_postponed_nmethod() {
4847     nmethod* claim;
4848     nmethod* next;
4849 
4850     do {
4851       claim = (nmethod*)_postponed_list;
4852       if (claim == NULL) {
4853         return NULL;
4854       }
4855 
4856       next = claim->unloading_next();
4857 
4858     } while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
4859 
4860     return claim;
4861   }
4862 
4863  public:
4864   // Mark that we're done with the first pass of nmethod cleaning.
4865   void barrier_mark(uint worker_id) {
4866     MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
4867     _num_entered_barrier++;
4868     if (_num_entered_barrier == _num_workers) {
4869       ml.notify_all();
4870     }
4871   }
4872 
4873   // See if we have to wait for the other workers to
4874   // finish their first-pass nmethod cleaning work.
4875   void barrier_wait(uint worker_id) {
4876     if (_num_entered_barrier < _num_workers) {
4877       MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
4878       while (_num_entered_barrier < _num_workers) {
4879           ml.wait(Mutex::_no_safepoint_check_flag, 0, false);
4880       }
4881     }
4882   }
4883 
4884   // Cleaning and unloading of nmethods. Some work has to be postponed
4885   // to the second pass, when we know which nmethods survive.
4886   void work_first_pass(uint worker_id) {
4887     // The first nmethods is claimed by the first worker.
4888     if (worker_id == 0 && _first_nmethod != NULL) {
4889       clean_nmethod(_first_nmethod);
4890       _first_nmethod = NULL;
4891     }
4892 
4893     int num_claimed_nmethods;
4894     nmethod* claimed_nmethods[MaxClaimNmethods];
4895 
4896     while (true) {
4897       claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
4898 
4899       if (num_claimed_nmethods == 0) {
4900         break;
4901       }
4902 
4903       for (int i = 0; i < num_claimed_nmethods; i++) {
4904         clean_nmethod(claimed_nmethods[i]);
4905       }
4906     }
4907 
4908     // The nmethod cleaning helps out and does the CodeCache part of MetadataOnStackMark.
4909     // Need to retire the buffers now that this thread has stopped cleaning nmethods.
4910     MetadataOnStackMark::retire_buffer_for_thread(Thread::current());
4911   }
4912 
4913   void work_second_pass(uint worker_id) {
4914     nmethod* nm;
4915     // Take care of postponed nmethods.
4916     while ((nm = claim_postponed_nmethod()) != NULL) {
4917       clean_nmethod_postponed(nm);
4918     }
4919   }
4920 };
4921 
4922 Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock", false, Monitor::_safepoint_check_never);
4923 
4924 class G1KlassCleaningTask : public StackObj {
4925   BoolObjectClosure*                      _is_alive;
4926   volatile jint                           _clean_klass_tree_claimed;
4927   ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;
4928 
4929  public:
4930   G1KlassCleaningTask(BoolObjectClosure* is_alive) :
4931       _is_alive(is_alive),
4932       _clean_klass_tree_claimed(0),
4933       _klass_iterator() {
4934   }
4935 
4936  private:
4937   bool claim_clean_klass_tree_task() {
4938     if (_clean_klass_tree_claimed) {
4939       return false;
4940     }
4941 
4942     return Atomic::cmpxchg(1, (jint*)&_clean_klass_tree_claimed, 0) == 0;
4943   }
4944 
4945   InstanceKlass* claim_next_klass() {
4946     Klass* klass;
4947     do {
4948       klass =_klass_iterator.next_klass();
4949     } while (klass != NULL && !klass->oop_is_instance());
4950 
4951     return (InstanceKlass*)klass;
4952   }
4953 
4954 public:
4955 
4956   void clean_klass(InstanceKlass* ik) {
4957     ik->clean_implementors_list(_is_alive);
4958     ik->clean_method_data(_is_alive);
4959 
4960     // G1 specific cleanup work that has
4961     // been moved here to be done in parallel.
4962     ik->clean_dependent_nmethods();
4963     if (JvmtiExport::has_redefined_a_class()) {
4964       InstanceKlass::purge_previous_versions(ik);
4965     }
4966   }
4967 
4968   void work() {
4969     ResourceMark rm;
4970 
4971     // One worker will clean the subklass/sibling klass tree.
4972     if (claim_clean_klass_tree_task()) {
4973       Klass::clean_subklass_tree(_is_alive);
4974     }
4975 
4976     // All workers will help cleaning the classes,
4977     InstanceKlass* klass;
4978     while ((klass = claim_next_klass()) != NULL) {
4979       clean_klass(klass);
4980     }
4981   }
4982 };
4983 
4984 // To minimize the remark pause times, the tasks below are done in parallel.
4985 class G1ParallelCleaningTask : public AbstractGangTask {
4986 private:
4987   G1StringSymbolTableUnlinkTask _string_symbol_task;
4988   G1CodeCacheUnloadingTask      _code_cache_task;
4989   G1KlassCleaningTask           _klass_cleaning_task;
4990 
4991 public:
4992   // The constructor is run in the VMThread.
4993   G1ParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) :
4994       AbstractGangTask("Parallel Cleaning"),
4995       _string_symbol_task(is_alive, process_strings, process_symbols),
4996       _code_cache_task(num_workers, is_alive, unloading_occurred),
4997       _klass_cleaning_task(is_alive) {
4998   }
4999 
5000   void pre_work_verification() {
5001     assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
5002   }
5003 
5004   void post_work_verification() {
5005     assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
5006   }
5007 
5008   // The parallel work done by all worker threads.
5009   void work(uint worker_id) {
5010     pre_work_verification();
5011 
5012     // Do first pass of code cache cleaning.
5013     _code_cache_task.work_first_pass(worker_id);
5014 
5015     // Let the threads mark that the first pass is done.
5016     _code_cache_task.barrier_mark(worker_id);
5017 
5018     // Clean the Strings and Symbols.
5019     _string_symbol_task.work(worker_id);
5020 
5021     // Wait for all workers to finish the first code cache cleaning pass.
5022     _code_cache_task.barrier_wait(worker_id);
5023 
5024     // Do the second code cache cleaning work, which realize on
5025     // the liveness information gathered during the first pass.
5026     _code_cache_task.work_second_pass(worker_id);
5027 
5028     // Clean all klasses that were not unloaded.
5029     _klass_cleaning_task.work();
5030 
5031     post_work_verification();
5032   }
5033 };
5034 
5035 
5036 void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
5037                                         bool process_strings,
5038                                         bool process_symbols,
5039                                         bool class_unloading_occurred) {
5040   uint n_workers = workers()->active_workers();
5041 
5042   G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
5043                                         n_workers, class_unloading_occurred);
5044   set_par_threads(n_workers);
5045   workers()->run_task(&g1_unlink_task);
5046   set_par_threads(0);
5047 }
5048 
5049 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
5050                                                      bool process_strings, bool process_symbols) {
5051   {
5052     uint n_workers = _g1h->workers()->active_workers();
5053     G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
5054     set_par_threads(n_workers);
5055     workers()->run_task(&g1_unlink_task);
5056     set_par_threads(0);
5057   }
5058 
5059   if (G1StringDedup::is_enabled()) {
5060     G1StringDedup::unlink(is_alive);
5061   }
5062 }
5063 
5064 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
5065  private:
5066   DirtyCardQueueSet* _queue;
5067  public:
5068   G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
5069 
5070   virtual void work(uint worker_id) {
5071     double start_time = os::elapsedTime();
5072 
5073     RedirtyLoggedCardTableEntryClosure cl;
5074     _queue->par_apply_closure_to_all_completed_buffers(&cl);
5075 
5076     G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
5077     timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0);
5078     timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed());
5079   }
5080 };
5081 
5082 void G1CollectedHeap::redirty_logged_cards() {
5083   double redirty_logged_cards_start = os::elapsedTime();
5084 
5085   uint n_workers = _g1h->workers()->active_workers();
5086 
5087   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
5088   dirty_card_queue_set().reset_for_par_iteration();
5089   set_par_threads(n_workers);
5090   workers()->run_task(&redirty_task);
5091   set_par_threads(0);
5092 
5093   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5094   dcq.merge_bufferlists(&dirty_card_queue_set());
5095   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5096 
5097   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
5098 }
5099 
5100 // Weak Reference Processing support
5101 
5102 // An always "is_alive" closure that is used to preserve referents.
5103 // If the object is non-null then it's alive.  Used in the preservation
5104 // of referent objects that are pointed to by reference objects
5105 // discovered by the CM ref processor.
5106 class G1AlwaysAliveClosure: public BoolObjectClosure {
5107   G1CollectedHeap* _g1;
5108 public:
5109   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5110   bool do_object_b(oop p) {
5111     if (p != NULL) {
5112       return true;
5113     }
5114     return false;
5115   }
5116 };
5117 
5118 bool G1STWIsAliveClosure::do_object_b(oop p) {
5119   // An object is reachable if it is outside the collection set,
5120   // or is inside and copied.
5121   return !_g1->obj_in_cs(p) || p->is_forwarded();
5122 }
5123 
5124 // Non Copying Keep Alive closure
5125 class G1KeepAliveClosure: public OopClosure {
5126   G1CollectedHeap* _g1;
5127 public:
5128   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5129   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
5130   void do_oop(oop* p) {
5131     oop obj = *p;
5132     assert(obj != NULL, "the caller should have filtered out NULL values");
5133 
5134     const InCSetState cset_state = _g1->in_cset_state(obj);
5135     if (!cset_state.is_in_cset_or_humongous()) {
5136       return;
5137     }
5138     if (cset_state.is_in_cset()) {
5139       assert( obj->is_forwarded(), "invariant" );
5140       *p = obj->forwardee();
5141     } else {
5142       assert(!obj->is_forwarded(), "invariant" );
5143       assert(cset_state.is_humongous(),
5144              err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state.value()));
5145       _g1->set_humongous_is_live(obj);
5146     }
5147   }
5148 };
5149 
5150 // Copying Keep Alive closure - can be called from both
5151 // serial and parallel code as long as different worker
5152 // threads utilize different G1ParScanThreadState instances
5153 // and different queues.
5154 
5155 class G1CopyingKeepAliveClosure: public OopClosure {
5156   G1CollectedHeap*         _g1h;
5157   OopClosure*              _copy_non_heap_obj_cl;
5158   G1ParScanThreadState*    _par_scan_state;
5159 
5160 public:
5161   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5162                             OopClosure* non_heap_obj_cl,
5163                             G1ParScanThreadState* pss):
5164     _g1h(g1h),
5165     _copy_non_heap_obj_cl(non_heap_obj_cl),
5166     _par_scan_state(pss)
5167   {}
5168 
5169   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
5170   virtual void do_oop(      oop* p) { do_oop_work(p); }
5171 
5172   template <class T> void do_oop_work(T* p) {
5173     oop obj = oopDesc::load_decode_heap_oop(p);
5174 
5175     if (_g1h->is_in_cset_or_humongous(obj)) {
5176       // If the referent object has been forwarded (either copied
5177       // to a new location or to itself in the event of an
5178       // evacuation failure) then we need to update the reference
5179       // field and, if both reference and referent are in the G1
5180       // heap, update the RSet for the referent.
5181       //
5182       // If the referent has not been forwarded then we have to keep
5183       // it alive by policy. Therefore we have copy the referent.
5184       //
5185       // If the reference field is in the G1 heap then we can push
5186       // on the PSS queue. When the queue is drained (after each
5187       // phase of reference processing) the object and it's followers
5188       // will be copied, the reference field set to point to the
5189       // new location, and the RSet updated. Otherwise we need to
5190       // use the the non-heap or metadata closures directly to copy
5191       // the referent object and update the pointer, while avoiding
5192       // updating the RSet.
5193 
5194       if (_g1h->is_in_g1_reserved(p)) {
5195         _par_scan_state->push_on_queue(p);
5196       } else {
5197         assert(!Metaspace::contains((const void*)p),
5198                err_msg("Unexpectedly found a pointer from metadata: "
5199                               PTR_FORMAT, p));
5200         _copy_non_heap_obj_cl->do_oop(p);
5201       }
5202     }
5203   }
5204 };
5205 
5206 // Serial drain queue closure. Called as the 'complete_gc'
5207 // closure for each discovered list in some of the
5208 // reference processing phases.
5209 
5210 class G1STWDrainQueueClosure: public VoidClosure {
5211 protected:
5212   G1CollectedHeap* _g1h;
5213   G1ParScanThreadState* _par_scan_state;
5214 
5215   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
5216 
5217 public:
5218   G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
5219     _g1h(g1h),
5220     _par_scan_state(pss)
5221   { }
5222 
5223   void do_void() {
5224     G1ParScanThreadState* const pss = par_scan_state();
5225     pss->trim_queue();
5226   }
5227 };
5228 
5229 // Parallel Reference Processing closures
5230 
5231 // Implementation of AbstractRefProcTaskExecutor for parallel reference
5232 // processing during G1 evacuation pauses.
5233 
5234 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
5235 private:
5236   G1CollectedHeap*   _g1h;
5237   RefToScanQueueSet* _queues;
5238   FlexibleWorkGang*  _workers;
5239   int                _active_workers;
5240 
5241 public:
5242   G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
5243                         FlexibleWorkGang* workers,
5244                         RefToScanQueueSet *task_queues,
5245                         int n_workers) :
5246     _g1h(g1h),
5247     _queues(task_queues),
5248     _workers(workers),
5249     _active_workers(n_workers)
5250   {
5251     assert(n_workers > 0, "shouldn't call this otherwise");
5252   }
5253 
5254   // Executes the given task using concurrent marking worker threads.
5255   virtual void execute(ProcessTask& task);
5256   virtual void execute(EnqueueTask& task);
5257 };
5258 
5259 // Gang task for possibly parallel reference processing
5260 
5261 class G1STWRefProcTaskProxy: public AbstractGangTask {
5262   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5263   ProcessTask&     _proc_task;
5264   G1CollectedHeap* _g1h;
5265   RefToScanQueueSet *_task_queues;
5266   ParallelTaskTerminator* _terminator;
5267 
5268 public:
5269   G1STWRefProcTaskProxy(ProcessTask& proc_task,
5270                      G1CollectedHeap* g1h,
5271                      RefToScanQueueSet *task_queues,
5272                      ParallelTaskTerminator* terminator) :
5273     AbstractGangTask("Process reference objects in parallel"),
5274     _proc_task(proc_task),
5275     _g1h(g1h),
5276     _task_queues(task_queues),
5277     _terminator(terminator)
5278   {}
5279 
5280   virtual void work(uint worker_id) {
5281     // The reference processing task executed by a single worker.
5282     ResourceMark rm;
5283     HandleMark   hm;
5284 
5285     G1STWIsAliveClosure is_alive(_g1h);
5286 
5287     G1ParScanThreadState            pss(_g1h, worker_id, NULL);
5288     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5289 
5290     pss.set_evac_failure_closure(&evac_failure_cl);
5291 
5292     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5293 
5294     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5295 
5296     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5297 
5298     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5299       // We also need to mark copied objects.
5300       copy_non_heap_cl = &copy_mark_non_heap_cl;
5301     }
5302 
5303     // Keep alive closure.
5304     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5305 
5306     // Complete GC closure
5307     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
5308 
5309     // Call the reference processing task's work routine.
5310     _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
5311 
5312     // Note we cannot assert that the refs array is empty here as not all
5313     // of the processing tasks (specifically phase2 - pp2_work) execute
5314     // the complete_gc closure (which ordinarily would drain the queue) so
5315     // the queue may not be empty.
5316   }
5317 };
5318 
5319 // Driver routine for parallel reference processing.
5320 // Creates an instance of the ref processing gang
5321 // task and has the worker threads execute it.
5322 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
5323   assert(_workers != NULL, "Need parallel worker threads.");
5324 
5325   ParallelTaskTerminator terminator(_active_workers, _queues);
5326   G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
5327 
5328   _g1h->set_par_threads(_active_workers);
5329   _workers->run_task(&proc_task_proxy);
5330   _g1h->set_par_threads(0);
5331 }
5332 
5333 // Gang task for parallel reference enqueueing.
5334 
5335 class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
5336   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5337   EnqueueTask& _enq_task;
5338 
5339 public:
5340   G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
5341     AbstractGangTask("Enqueue reference objects in parallel"),
5342     _enq_task(enq_task)
5343   { }
5344 
5345   virtual void work(uint worker_id) {
5346     _enq_task.work(worker_id);
5347   }
5348 };
5349 
5350 // Driver routine for parallel reference enqueueing.
5351 // Creates an instance of the ref enqueueing gang
5352 // task and has the worker threads execute it.
5353 
5354 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
5355   assert(_workers != NULL, "Need parallel worker threads.");
5356 
5357   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
5358 
5359   _g1h->set_par_threads(_active_workers);
5360   _workers->run_task(&enq_task_proxy);
5361   _g1h->set_par_threads(0);
5362 }
5363 
5364 // End of weak reference support closures
5365 
5366 // Abstract task used to preserve (i.e. copy) any referent objects
5367 // that are in the collection set and are pointed to by reference
5368 // objects discovered by the CM ref processor.
5369 
5370 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
5371 protected:
5372   G1CollectedHeap* _g1h;
5373   RefToScanQueueSet      *_queues;
5374   ParallelTaskTerminator _terminator;
5375   uint _n_workers;
5376 
5377 public:
5378   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
5379     AbstractGangTask("ParPreserveCMReferents"),
5380     _g1h(g1h),
5381     _queues(task_queues),
5382     _terminator(workers, _queues),
5383     _n_workers(workers)
5384   { }
5385 
5386   void work(uint worker_id) {
5387     ResourceMark rm;
5388     HandleMark   hm;
5389 
5390     G1ParScanThreadState            pss(_g1h, worker_id, NULL);
5391     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5392 
5393     pss.set_evac_failure_closure(&evac_failure_cl);
5394 
5395     assert(pss.queue_is_empty(), "both queue and overflow should be empty");
5396 
5397     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5398 
5399     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5400 
5401     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5402 
5403     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5404       // We also need to mark copied objects.
5405       copy_non_heap_cl = &copy_mark_non_heap_cl;
5406     }
5407 
5408     // Is alive closure
5409     G1AlwaysAliveClosure always_alive(_g1h);
5410 
5411     // Copying keep alive closure. Applied to referent objects that need
5412     // to be copied.
5413     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5414 
5415     ReferenceProcessor* rp = _g1h->ref_processor_cm();
5416 
5417     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5418     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5419 
5420     // limit is set using max_num_q() - which was set using ParallelGCThreads.
5421     // So this must be true - but assert just in case someone decides to
5422     // change the worker ids.
5423     assert(0 <= worker_id && worker_id < limit, "sanity");
5424     assert(!rp->discovery_is_atomic(), "check this code");
5425 
5426     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
5427     for (uint idx = worker_id; idx < limit; idx += stride) {
5428       DiscoveredList& ref_list = rp->discovered_refs()[idx];
5429 
5430       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
5431       while (iter.has_next()) {
5432         // Since discovery is not atomic for the CM ref processor, we
5433         // can see some null referent objects.
5434         iter.load_ptrs(DEBUG_ONLY(true));
5435         oop ref = iter.obj();
5436 
5437         // This will filter nulls.
5438         if (iter.is_referent_alive()) {
5439           iter.make_referent_alive();
5440         }
5441         iter.move_to_next();
5442       }
5443     }
5444 
5445     // Drain the queue - which may cause stealing
5446     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
5447     drain_queue.do_void();
5448     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
5449     assert(pss.queue_is_empty(), "should be");
5450   }
5451 };
5452 
5453 // Weak Reference processing during an evacuation pause (part 1).
5454 void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
5455   double ref_proc_start = os::elapsedTime();
5456 
5457   ReferenceProcessor* rp = _ref_processor_stw;
5458   assert(rp->discovery_enabled(), "should have been enabled");
5459 
5460   // Any reference objects, in the collection set, that were 'discovered'
5461   // by the CM ref processor should have already been copied (either by
5462   // applying the external root copy closure to the discovered lists, or
5463   // by following an RSet entry).
5464   //
5465   // But some of the referents, that are in the collection set, that these
5466   // reference objects point to may not have been copied: the STW ref
5467   // processor would have seen that the reference object had already
5468   // been 'discovered' and would have skipped discovering the reference,
5469   // but would not have treated the reference object as a regular oop.
5470   // As a result the copy closure would not have been applied to the
5471   // referent object.
5472   //
5473   // We need to explicitly copy these referent objects - the references
5474   // will be processed at the end of remarking.
5475   //
5476   // We also need to do this copying before we process the reference
5477   // objects discovered by the STW ref processor in case one of these
5478   // referents points to another object which is also referenced by an
5479   // object discovered by the STW ref processor.
5480 
5481   assert(no_of_gc_workers == workers()->active_workers(), "Need to reset active GC workers");
5482 
5483   set_par_threads(no_of_gc_workers);
5484   G1ParPreserveCMReferentsTask keep_cm_referents(this,
5485                                                  no_of_gc_workers,
5486                                                  _task_queues);
5487 
5488   workers()->run_task(&keep_cm_referents);
5489 
5490   set_par_threads(0);
5491 
5492   // Closure to test whether a referent is alive.
5493   G1STWIsAliveClosure is_alive(this);
5494 
5495   // Even when parallel reference processing is enabled, the processing
5496   // of JNI refs is serial and performed serially by the current thread
5497   // rather than by a worker. The following PSS will be used for processing
5498   // JNI refs.
5499 
5500   // Use only a single queue for this PSS.
5501   G1ParScanThreadState            pss(this, 0, NULL);
5502 
5503   // We do not embed a reference processor in the copying/scanning
5504   // closures while we're actually processing the discovered
5505   // reference objects.
5506   G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5507 
5508   pss.set_evac_failure_closure(&evac_failure_cl);
5509 
5510   assert(pss.queue_is_empty(), "pre-condition");
5511 
5512   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
5513 
5514   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5515 
5516   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5517 
5518   if (_g1h->g1_policy()->during_initial_mark_pause()) {
5519     // We also need to mark copied objects.
5520     copy_non_heap_cl = &copy_mark_non_heap_cl;
5521   }
5522 
5523   // Keep alive closure.
5524   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, &pss);
5525 
5526   // Serial Complete GC closure
5527   G1STWDrainQueueClosure drain_queue(this, &pss);
5528 
5529   // Setup the soft refs policy...
5530   rp->setup_policy(false);
5531 
5532   ReferenceProcessorStats stats;
5533   if (!rp->processing_is_mt()) {
5534     // Serial reference processing...
5535     stats = rp->process_discovered_references(&is_alive,
5536                                               &keep_alive,
5537                                               &drain_queue,
5538                                               NULL,
5539                                               _gc_timer_stw,
5540                                               _gc_tracer_stw->gc_id());
5541   } else {
5542     // Parallel reference processing
5543     assert(rp->num_q() == no_of_gc_workers, "sanity");
5544     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5545 
5546     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5547     stats = rp->process_discovered_references(&is_alive,
5548                                               &keep_alive,
5549                                               &drain_queue,
5550                                               &par_task_executor,
5551                                               _gc_timer_stw,
5552                                               _gc_tracer_stw->gc_id());
5553   }
5554 
5555   _gc_tracer_stw->report_gc_reference_stats(stats);
5556 
5557   // We have completed copying any necessary live referent objects.
5558   assert(pss.queue_is_empty(), "both queue and overflow should be empty");
5559 
5560   double ref_proc_time = os::elapsedTime() - ref_proc_start;
5561   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5562 }
5563 
5564 // Weak Reference processing during an evacuation pause (part 2).
5565 void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
5566   double ref_enq_start = os::elapsedTime();
5567 
5568   ReferenceProcessor* rp = _ref_processor_stw;
5569   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5570 
5571   // Now enqueue any remaining on the discovered lists on to
5572   // the pending list.
5573   if (!rp->processing_is_mt()) {
5574     // Serial reference processing...
5575     rp->enqueue_discovered_references();
5576   } else {
5577     // Parallel reference enqueueing
5578 
5579     assert(no_of_gc_workers == workers()->active_workers(),
5580            "Need to reset active workers");
5581     assert(rp->num_q() == no_of_gc_workers, "sanity");
5582     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5583 
5584     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5585     rp->enqueue_discovered_references(&par_task_executor);
5586   }
5587 
5588   rp->verify_no_references_recorded();
5589   assert(!rp->discovery_enabled(), "should have been disabled");
5590 
5591   // FIXME
5592   // CM's reference processing also cleans up the string and symbol tables.
5593   // Should we do that here also? We could, but it is a serial operation
5594   // and could significantly increase the pause time.
5595 
5596   double ref_enq_time = os::elapsedTime() - ref_enq_start;
5597   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5598 }
5599 
5600 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
5601   _expand_heap_after_alloc_failure = true;
5602   _evacuation_failed = false;
5603 
5604   // Should G1EvacuationFailureALot be in effect for this GC?
5605   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5606 
5607   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5608 
5609   // Disable the hot card cache.
5610   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5611   hot_card_cache->reset_hot_cache_claimed_index();
5612   hot_card_cache->set_use_cache(false);
5613 
5614   uint n_workers;
5615   n_workers =
5616     AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5617                                    workers()->active_workers(),
5618                                    Threads::number_of_non_daemon_threads());
5619   assert(UseDynamicNumberOfGCThreads ||
5620          n_workers == workers()->total_workers(),
5621          "If not dynamic should be using all the  workers");
5622   workers()->set_active_workers(n_workers);
5623   set_par_threads(n_workers);
5624 
5625   G1ParTask g1_par_task(this, _task_queues);
5626 
5627   init_for_evac_failure(NULL);
5628 
5629   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5630   double start_par_time_sec = os::elapsedTime();
5631   double end_par_time_sec;
5632 
5633   {
5634     StrongRootsScope srs(this);
5635     // InitialMark needs claim bits to keep track of the marked-through CLDs.
5636     if (g1_policy()->during_initial_mark_pause()) {
5637       ClassLoaderDataGraph::clear_claimed_marks();
5638     }
5639 
5640      // The individual threads will set their evac-failure closures.
5641      if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
5642      // These tasks use ShareHeap::_process_strong_tasks
5643      assert(UseDynamicNumberOfGCThreads ||
5644             workers()->active_workers() == workers()->total_workers(),
5645             "If not dynamic should be using all the  workers");
5646     workers()->run_task(&g1_par_task);
5647     end_par_time_sec = os::elapsedTime();
5648 
5649     // Closing the inner scope will execute the destructor
5650     // for the StrongRootsScope object. We record the current
5651     // elapsed time before closing the scope so that time
5652     // taken for the SRS destructor is NOT included in the
5653     // reported parallel time.
5654   }
5655 
5656   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5657   g1_policy()->phase_times()->record_par_time(par_time_ms);
5658 
5659   double code_root_fixup_time_ms =
5660         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5661   g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
5662 
5663   set_par_threads(0);
5664 
5665   // Process any discovered reference objects - we have
5666   // to do this _before_ we retire the GC alloc regions
5667   // as we may have to copy some 'reachable' referent
5668   // objects (and their reachable sub-graphs) that were
5669   // not copied during the pause.
5670   process_discovered_references(n_workers);
5671 
5672   if (G1StringDedup::is_enabled()) {
5673     G1STWIsAliveClosure is_alive(this);
5674     G1KeepAliveClosure keep_alive(this);
5675     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive);
5676   }
5677 
5678   _allocator->release_gc_alloc_regions(n_workers, evacuation_info);
5679   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5680 
5681   // Reset and re-enable the hot card cache.
5682   // Note the counts for the cards in the regions in the
5683   // collection set are reset when the collection set is freed.
5684   hot_card_cache->reset_hot_cache();
5685   hot_card_cache->set_use_cache(true);
5686 
5687   purge_code_root_memory();
5688 
5689   finalize_for_evac_failure();
5690 
5691   if (evacuation_failed()) {
5692     remove_self_forwarding_pointers();
5693 
5694     // Reset the G1EvacuationFailureALot counters and flags
5695     // Note: the values are reset only when an actual
5696     // evacuation failure occurs.
5697     NOT_PRODUCT(reset_evacuation_should_fail();)
5698   }
5699 
5700   // Enqueue any remaining references remaining on the STW
5701   // reference processor's discovered lists. We need to do
5702   // this after the card table is cleaned (and verified) as
5703   // the act of enqueueing entries on to the pending list
5704   // will log these updates (and dirty their associated
5705   // cards). We need these updates logged to update any
5706   // RSets.
5707   enqueue_discovered_references(n_workers);
5708 
5709   redirty_logged_cards();
5710   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
5711 }
5712 
5713 void G1CollectedHeap::free_region(HeapRegion* hr,
5714                                   FreeRegionList* free_list,
5715                                   bool par,
5716                                   bool locked) {
5717   assert(!hr->is_free(), "the region should not be free");
5718   assert(!hr->is_empty(), "the region should not be empty");
5719   assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
5720   assert(free_list != NULL, "pre-condition");
5721 
5722   if (G1VerifyBitmaps) {
5723     MemRegion mr(hr->bottom(), hr->end());
5724     concurrent_mark()->clearRangePrevBitmap(mr);
5725   }
5726 
5727   // Clear the card counts for this region.
5728   // Note: we only need to do this if the region is not young
5729   // (since we don't refine cards in young regions).
5730   if (!hr->is_young()) {
5731     _cg1r->hot_card_cache()->reset_card_counts(hr);
5732   }
5733   hr->hr_clear(par, true /* clear_space */, locked /* locked */);
5734   free_list->add_ordered(hr);
5735 }
5736 
5737 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
5738                                      FreeRegionList* free_list,
5739                                      bool par) {
5740   assert(hr->is_starts_humongous(), "this is only for starts humongous regions");
5741   assert(free_list != NULL, "pre-condition");
5742 
5743   size_t hr_capacity = hr->capacity();
5744   // We need to read this before we make the region non-humongous,
5745   // otherwise the information will be gone.
5746   uint last_index = hr->last_hc_index();
5747   hr->clear_humongous();
5748   free_region(hr, free_list, par);
5749 
5750   uint i = hr->hrm_index() + 1;
5751   while (i < last_index) {
5752     HeapRegion* curr_hr = region_at(i);
5753     assert(curr_hr->is_continues_humongous(), "invariant");
5754     curr_hr->clear_humongous();
5755     free_region(curr_hr, free_list, par);
5756     i += 1;
5757   }
5758 }
5759 
5760 void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,
5761                                        const HeapRegionSetCount& humongous_regions_removed) {
5762   if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) {
5763     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5764     _old_set.bulk_remove(old_regions_removed);
5765     _humongous_set.bulk_remove(humongous_regions_removed);
5766   }
5767 
5768 }
5769 
5770 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
5771   assert(list != NULL, "list can't be null");
5772   if (!list->is_empty()) {
5773     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
5774     _hrm.insert_list_into_free_list(list);
5775   }
5776 }
5777 
5778 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
5779   _allocator->decrease_used(bytes);
5780 }
5781 
5782 class G1ParCleanupCTTask : public AbstractGangTask {
5783   G1SATBCardTableModRefBS* _ct_bs;
5784   G1CollectedHeap* _g1h;
5785   HeapRegion* volatile _su_head;
5786 public:
5787   G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs,
5788                      G1CollectedHeap* g1h) :
5789     AbstractGangTask("G1 Par Cleanup CT Task"),
5790     _ct_bs(ct_bs), _g1h(g1h) { }
5791 
5792   void work(uint worker_id) {
5793     HeapRegion* r;
5794     while (r = _g1h->pop_dirty_cards_region()) {
5795       clear_cards(r);
5796     }
5797   }
5798 
5799   void clear_cards(HeapRegion* r) {
5800     // Cards of the survivors should have already been dirtied.
5801     if (!r->is_survivor()) {
5802       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
5803     }
5804   }
5805 };
5806 
5807 #ifndef PRODUCT
5808 class G1VerifyCardTableCleanup: public HeapRegionClosure {
5809   G1CollectedHeap* _g1h;
5810   G1SATBCardTableModRefBS* _ct_bs;
5811 public:
5812   G1VerifyCardTableCleanup(G1CollectedHeap* g1h, G1SATBCardTableModRefBS* ct_bs)
5813     : _g1h(g1h), _ct_bs(ct_bs) { }
5814   virtual bool doHeapRegion(HeapRegion* r) {
5815     if (r->is_survivor()) {
5816       _g1h->verify_dirty_region(r);
5817     } else {
5818       _g1h->verify_not_dirty_region(r);
5819     }
5820     return false;
5821   }
5822 };
5823 
5824 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
5825   // All of the region should be clean.
5826   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5827   MemRegion mr(hr->bottom(), hr->end());
5828   ct_bs->verify_not_dirty_region(mr);
5829 }
5830 
5831 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
5832   // We cannot guarantee that [bottom(),end()] is dirty.  Threads
5833   // dirty allocated blocks as they allocate them. The thread that
5834   // retires each region and replaces it with a new one will do a
5835   // maximal allocation to fill in [pre_dummy_top(),end()] but will
5836   // not dirty that area (one less thing to have to do while holding
5837   // a lock). So we can only verify that [bottom(),pre_dummy_top()]
5838   // is dirty.
5839   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5840   MemRegion mr(hr->bottom(), hr->pre_dummy_top());
5841   if (hr->is_young()) {
5842     ct_bs->verify_g1_young_region(mr);
5843   } else {
5844     ct_bs->verify_dirty_region(mr);
5845   }
5846 }
5847 
5848 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5849   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
5850   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5851     verify_dirty_region(hr);
5852   }
5853 }
5854 
5855 void G1CollectedHeap::verify_dirty_young_regions() {
5856   verify_dirty_young_list(_young_list->first_region());
5857 }
5858 
5859 bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
5860                                                HeapWord* tams, HeapWord* end) {
5861   guarantee(tams <= end,
5862             err_msg("tams: "PTR_FORMAT" end: "PTR_FORMAT, tams, end));
5863   HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
5864   if (result < end) {
5865     gclog_or_tty->cr();
5866     gclog_or_tty->print_cr("## wrong marked address on %s bitmap: "PTR_FORMAT,
5867                            bitmap_name, result);
5868     gclog_or_tty->print_cr("## %s tams: "PTR_FORMAT" end: "PTR_FORMAT,
5869                            bitmap_name, tams, end);
5870     return false;
5871   }
5872   return true;
5873 }
5874 
5875 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
5876   CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
5877   CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
5878 
5879   HeapWord* bottom = hr->bottom();
5880   HeapWord* ptams  = hr->prev_top_at_mark_start();
5881   HeapWord* ntams  = hr->next_top_at_mark_start();
5882   HeapWord* end    = hr->end();
5883 
5884   bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
5885 
5886   bool res_n = true;
5887   // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
5888   // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
5889   // if we happen to be in that state.
5890   if (mark_in_progress() || !_cmThread->in_progress()) {
5891     res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
5892   }
5893   if (!res_p || !res_n) {
5894     gclog_or_tty->print_cr("#### Bitmap verification failed for "HR_FORMAT,
5895                            HR_FORMAT_PARAMS(hr));
5896     gclog_or_tty->print_cr("#### Caller: %s", caller);
5897     return false;
5898   }
5899   return true;
5900 }
5901 
5902 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
5903   if (!G1VerifyBitmaps) return;
5904 
5905   guarantee(verify_bitmaps(caller, hr), "bitmap verification");
5906 }
5907 
5908 class G1VerifyBitmapClosure : public HeapRegionClosure {
5909 private:
5910   const char* _caller;
5911   G1CollectedHeap* _g1h;
5912   bool _failures;
5913 
5914 public:
5915   G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :
5916     _caller(caller), _g1h(g1h), _failures(false) { }
5917 
5918   bool failures() { return _failures; }
5919 
5920   virtual bool doHeapRegion(HeapRegion* hr) {
5921     if (hr->is_continues_humongous()) return false;
5922 
5923     bool result = _g1h->verify_bitmaps(_caller, hr);
5924     if (!result) {
5925       _failures = true;
5926     }
5927     return false;
5928   }
5929 };
5930 
5931 void G1CollectedHeap::check_bitmaps(const char* caller) {
5932   if (!G1VerifyBitmaps) return;
5933 
5934   G1VerifyBitmapClosure cl(caller, this);
5935   heap_region_iterate(&cl);
5936   guarantee(!cl.failures(), "bitmap verification");
5937 }
5938 
5939 class G1CheckCSetFastTableClosure : public HeapRegionClosure {
5940  private:
5941   bool _failures;
5942  public:
5943   G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
5944 
5945   virtual bool doHeapRegion(HeapRegion* hr) {
5946     uint i = hr->hrm_index();
5947     InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
5948     if (hr->is_humongous()) {
5949       if (hr->in_collection_set()) {
5950         gclog_or_tty->print_cr("\n## humongous region %u in CSet", i);
5951         _failures = true;
5952         return true;
5953       }
5954       if (cset_state.is_in_cset()) {
5955         gclog_or_tty->print_cr("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);
5956         _failures = true;
5957         return true;
5958       }
5959       if (hr->is_continues_humongous() && cset_state.is_humongous()) {
5960         gclog_or_tty->print_cr("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);
5961         _failures = true;
5962         return true;
5963       }
5964     } else {
5965       if (cset_state.is_humongous()) {
5966         gclog_or_tty->print_cr("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);
5967         _failures = true;
5968         return true;
5969       }
5970       if (hr->in_collection_set() != cset_state.is_in_cset()) {
5971         gclog_or_tty->print_cr("\n## in CSet %d / cset state %d inconsistency for region %u",
5972                                hr->in_collection_set(), cset_state.value(), i);
5973         _failures = true;
5974         return true;
5975       }
5976       if (cset_state.is_in_cset()) {
5977         if (hr->is_young() != (cset_state.is_young())) {
5978           gclog_or_tty->print_cr("\n## is_young %d / cset state %d inconsistency for region %u",
5979                                  hr->is_young(), cset_state.value(), i);
5980           _failures = true;
5981           return true;
5982         }
5983         if (hr->is_old() != (cset_state.is_old())) {
5984           gclog_or_tty->print_cr("\n## is_old %d / cset state %d inconsistency for region %u",
5985                                  hr->is_old(), cset_state.value(), i);
5986           _failures = true;
5987           return true;
5988         }
5989       }
5990     }
5991     return false;
5992   }
5993 
5994   bool failures() const { return _failures; }
5995 };
5996 
5997 bool G1CollectedHeap::check_cset_fast_test() {
5998   G1CheckCSetFastTableClosure cl;
5999   _hrm.iterate(&cl);
6000   return !cl.failures();
6001 }
6002 #endif // PRODUCT
6003 
6004 void G1CollectedHeap::cleanUpCardTable() {
6005   G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
6006   double start = os::elapsedTime();
6007 
6008   {
6009     // Iterate over the dirty cards region list.
6010     G1ParCleanupCTTask cleanup_task(ct_bs, this);
6011 
6012     set_par_threads();
6013     workers()->run_task(&cleanup_task);
6014     set_par_threads(0);
6015 #ifndef PRODUCT
6016     if (G1VerifyCTCleanup || VerifyAfterGC) {
6017       G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
6018       heap_region_iterate(&cleanup_verifier);
6019     }
6020 #endif
6021   }
6022 
6023   double elapsed = os::elapsedTime() - start;
6024   g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
6025 }
6026 
6027 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info) {
6028   size_t pre_used = 0;
6029   FreeRegionList local_free_list("Local List for CSet Freeing");
6030 
6031   double young_time_ms     = 0.0;
6032   double non_young_time_ms = 0.0;
6033 
6034   // Since the collection set is a superset of the the young list,
6035   // all we need to do to clear the young list is clear its
6036   // head and length, and unlink any young regions in the code below
6037   _young_list->clear();
6038 
6039   G1CollectorPolicy* policy = g1_policy();
6040 
6041   double start_sec = os::elapsedTime();
6042   bool non_young = true;
6043 
6044   HeapRegion* cur = cs_head;
6045   int age_bound = -1;
6046   size_t rs_lengths = 0;
6047 
6048   while (cur != NULL) {
6049     assert(!is_on_master_free_list(cur), "sanity");
6050     if (non_young) {
6051       if (cur->is_young()) {
6052         double end_sec = os::elapsedTime();
6053         double elapsed_ms = (end_sec - start_sec) * 1000.0;
6054         non_young_time_ms += elapsed_ms;
6055 
6056         start_sec = os::elapsedTime();
6057         non_young = false;
6058       }
6059     } else {
6060       if (!cur->is_young()) {
6061         double end_sec = os::elapsedTime();
6062         double elapsed_ms = (end_sec - start_sec) * 1000.0;
6063         young_time_ms += elapsed_ms;
6064 
6065         start_sec = os::elapsedTime();
6066         non_young = true;
6067       }
6068     }
6069 
6070     rs_lengths += cur->rem_set()->occupied_locked();
6071 
6072     HeapRegion* next = cur->next_in_collection_set();
6073     assert(cur->in_collection_set(), "bad CS");
6074     cur->set_next_in_collection_set(NULL);
6075     cur->set_in_collection_set(false);
6076 
6077     if (cur->is_young()) {
6078       int index = cur->young_index_in_cset();
6079       assert(index != -1, "invariant");
6080       assert((uint) index < policy->young_cset_region_length(), "invariant");
6081       size_t words_survived = _surviving_young_words[index];
6082       cur->record_surv_words_in_group(words_survived);
6083 
6084       // At this point the we have 'popped' cur from the collection set
6085       // (linked via next_in_collection_set()) but it is still in the
6086       // young list (linked via next_young_region()). Clear the
6087       // _next_young_region field.
6088       cur->set_next_young_region(NULL);
6089     } else {
6090       int index = cur->young_index_in_cset();
6091       assert(index == -1, "invariant");
6092     }
6093 
6094     assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
6095             (!cur->is_young() && cur->young_index_in_cset() == -1),
6096             "invariant" );
6097 
6098     if (!cur->evacuation_failed()) {
6099       MemRegion used_mr = cur->used_region();
6100 
6101       // And the region is empty.
6102       assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
6103       pre_used += cur->used();
6104       free_region(cur, &local_free_list, false /* par */, true /* locked */);
6105     } else {
6106       cur->uninstall_surv_rate_group();
6107       if (cur->is_young()) {
6108         cur->set_young_index_in_cset(-1);
6109       }
6110       cur->set_evacuation_failed(false);
6111       // The region is now considered to be old.
6112       cur->set_old();
6113       _old_set.add(cur);
6114       evacuation_info.increment_collectionset_used_after(cur->used());
6115     }
6116     cur = next;
6117   }
6118 
6119   evacuation_info.set_regions_freed(local_free_list.length());
6120   policy->record_max_rs_lengths(rs_lengths);
6121   policy->cset_regions_freed();
6122 
6123   double end_sec = os::elapsedTime();
6124   double elapsed_ms = (end_sec - start_sec) * 1000.0;
6125 
6126   if (non_young) {
6127     non_young_time_ms += elapsed_ms;
6128   } else {
6129     young_time_ms += elapsed_ms;
6130   }
6131 
6132   prepend_to_freelist(&local_free_list);
6133   decrement_summary_bytes(pre_used);
6134   policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
6135   policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
6136 }
6137 
6138 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
6139  private:
6140   FreeRegionList* _free_region_list;
6141   HeapRegionSet* _proxy_set;
6142   HeapRegionSetCount _humongous_regions_removed;
6143   size_t _freed_bytes;
6144  public:
6145 
6146   G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
6147     _free_region_list(free_region_list), _humongous_regions_removed(), _freed_bytes(0) {
6148   }
6149 
6150   virtual bool doHeapRegion(HeapRegion* r) {
6151     if (!r->is_starts_humongous()) {
6152       return false;
6153     }
6154 
6155     G1CollectedHeap* g1h = G1CollectedHeap::heap();
6156 
6157     oop obj = (oop)r->bottom();
6158     CMBitMap* next_bitmap = g1h->concurrent_mark()->nextMarkBitMap();
6159 
6160     // The following checks whether the humongous object is live are sufficient.
6161     // The main additional check (in addition to having a reference from the roots
6162     // or the young gen) is whether the humongous object has a remembered set entry.
6163     //
6164     // A humongous object cannot be live if there is no remembered set for it
6165     // because:
6166     // - there can be no references from within humongous starts regions referencing
6167     // the object because we never allocate other objects into them.
6168     // (I.e. there are no intra-region references that may be missed by the
6169     // remembered set)
6170     // - as soon there is a remembered set entry to the humongous starts region
6171     // (i.e. it has "escaped" to an old object) this remembered set entry will stay
6172     // until the end of a concurrent mark.
6173     //
6174     // It is not required to check whether the object has been found dead by marking
6175     // or not, in fact it would prevent reclamation within a concurrent cycle, as
6176     // all objects allocated during that time are considered live.
6177     // SATB marking is even more conservative than the remembered set.
6178     // So if at this point in the collection there is no remembered set entry,
6179     // nobody has a reference to it.
6180     // At the start of collection we flush all refinement logs, and remembered sets
6181     // are completely up-to-date wrt to references to the humongous object.
6182     //
6183     // Other implementation considerations:
6184     // - never consider object arrays at this time because they would pose
6185     // considerable effort for cleaning up the the remembered sets. This is
6186     // required because stale remembered sets might reference locations that
6187     // are currently allocated into.
6188     uint region_idx = r->hrm_index();
6189     if (g1h->humongous_is_live(region_idx) ||
6190         g1h->humongous_region_is_always_live(region_idx)) {
6191 
6192       if (G1TraceEagerReclaimHumongousObjects) {
6193         gclog_or_tty->print_cr("Live humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
6194                                region_idx,
6195                                obj->size()*HeapWordSize,
6196                                r->bottom(),
6197                                r->region_num(),
6198                                r->rem_set()->occupied(),
6199                                r->rem_set()->strong_code_roots_list_length(),
6200                                next_bitmap->isMarked(r->bottom()),
6201                                g1h->humongous_is_live(region_idx),
6202                                obj->is_objArray()
6203                               );
6204       }
6205 
6206       return false;
6207     }
6208 
6209     guarantee(!obj->is_objArray(),
6210               err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.",
6211                       r->bottom()));
6212 
6213     if (G1TraceEagerReclaimHumongousObjects) {
6214       gclog_or_tty->print_cr("Dead humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
6215                              region_idx,
6216                              obj->size()*HeapWordSize,
6217                              r->bottom(),
6218                              r->region_num(),
6219                              r->rem_set()->occupied(),
6220                              r->rem_set()->strong_code_roots_list_length(),
6221                              next_bitmap->isMarked(r->bottom()),
6222                              g1h->humongous_is_live(region_idx),
6223                              obj->is_objArray()
6224                             );
6225     }
6226     // Need to clear mark bit of the humongous object if already set.
6227     if (next_bitmap->isMarked(r->bottom())) {
6228       next_bitmap->clear(r->bottom());
6229     }
6230     _freed_bytes += r->used();
6231     r->set_containing_set(NULL);
6232     _humongous_regions_removed.increment(1u, r->capacity());
6233     g1h->free_humongous_region(r, _free_region_list, false);
6234 
6235     return false;
6236   }
6237 
6238   HeapRegionSetCount& humongous_free_count() {
6239     return _humongous_regions_removed;
6240   }
6241 
6242   size_t bytes_freed() const {
6243     return _freed_bytes;
6244   }
6245 
6246   size_t humongous_reclaimed() const {
6247     return _humongous_regions_removed.length();
6248   }
6249 };
6250 
6251 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
6252   assert_at_safepoint(true);
6253 
6254   if (!G1EagerReclaimHumongousObjects ||
6255       (!_has_humongous_reclaim_candidates && !G1TraceEagerReclaimHumongousObjects)) {
6256     g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
6257     return;
6258   }
6259 
6260   double start_time = os::elapsedTime();
6261 
6262   FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
6263 
6264   G1FreeHumongousRegionClosure cl(&local_cleanup_list);
6265   heap_region_iterate(&cl);
6266 
6267   HeapRegionSetCount empty_set;
6268   remove_from_old_sets(empty_set, cl.humongous_free_count());
6269 
6270   G1HRPrinter* hr_printer = _g1h->hr_printer();
6271   if (hr_printer->is_active()) {
6272     FreeRegionListIterator iter(&local_cleanup_list);
6273     while (iter.more_available()) {
6274       HeapRegion* hr = iter.get_next();
6275       hr_printer->cleanup(hr);
6276     }
6277   }
6278 
6279   prepend_to_freelist(&local_cleanup_list);
6280   decrement_summary_bytes(cl.bytes_freed());
6281 
6282   g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
6283                                                                     cl.humongous_reclaimed());
6284 }
6285 
6286 // This routine is similar to the above but does not record
6287 // any policy statistics or update free lists; we are abandoning
6288 // the current incremental collection set in preparation of a
6289 // full collection. After the full GC we will start to build up
6290 // the incremental collection set again.
6291 // This is only called when we're doing a full collection
6292 // and is immediately followed by the tearing down of the young list.
6293 
6294 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
6295   HeapRegion* cur = cs_head;
6296 
6297   while (cur != NULL) {
6298     HeapRegion* next = cur->next_in_collection_set();
6299     assert(cur->in_collection_set(), "bad CS");
6300     cur->set_next_in_collection_set(NULL);
6301     cur->set_in_collection_set(false);
6302     cur->set_young_index_in_cset(-1);
6303     cur = next;
6304   }
6305 }
6306 
6307 void G1CollectedHeap::set_free_regions_coming() {
6308   if (G1ConcRegionFreeingVerbose) {
6309     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
6310                            "setting free regions coming");
6311   }
6312 
6313   assert(!free_regions_coming(), "pre-condition");
6314   _free_regions_coming = true;
6315 }
6316 
6317 void G1CollectedHeap::reset_free_regions_coming() {
6318   assert(free_regions_coming(), "pre-condition");
6319 
6320   {
6321     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6322     _free_regions_coming = false;
6323     SecondaryFreeList_lock->notify_all();
6324   }
6325 
6326   if (G1ConcRegionFreeingVerbose) {
6327     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
6328                            "reset free regions coming");
6329   }
6330 }
6331 
6332 void G1CollectedHeap::wait_while_free_regions_coming() {
6333   // Most of the time we won't have to wait, so let's do a quick test
6334   // first before we take the lock.
6335   if (!free_regions_coming()) {
6336     return;
6337   }
6338 
6339   if (G1ConcRegionFreeingVerbose) {
6340     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
6341                            "waiting for free regions");
6342   }
6343 
6344   {
6345     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6346     while (free_regions_coming()) {
6347       SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
6348     }
6349   }
6350 
6351   if (G1ConcRegionFreeingVerbose) {
6352     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
6353                            "done waiting for free regions");
6354   }
6355 }
6356 
6357 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
6358   assert(heap_lock_held_for_gc(),
6359               "the heap lock should already be held by or for this thread");
6360   _young_list->push_region(hr);
6361 }
6362 
6363 class NoYoungRegionsClosure: public HeapRegionClosure {
6364 private:
6365   bool _success;
6366 public:
6367   NoYoungRegionsClosure() : _success(true) { }
6368   bool doHeapRegion(HeapRegion* r) {
6369     if (r->is_young()) {
6370       gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
6371                              r->bottom(), r->end());
6372       _success = false;
6373     }
6374     return false;
6375   }
6376   bool success() { return _success; }
6377 };
6378 
6379 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
6380   bool ret = _young_list->check_list_empty(check_sample);
6381 
6382   if (check_heap) {
6383     NoYoungRegionsClosure closure;
6384     heap_region_iterate(&closure);
6385     ret = ret && closure.success();
6386   }
6387 
6388   return ret;
6389 }
6390 
6391 class TearDownRegionSetsClosure : public HeapRegionClosure {
6392 private:
6393   HeapRegionSet *_old_set;
6394 
6395 public:
6396   TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
6397 
6398   bool doHeapRegion(HeapRegion* r) {
6399     if (r->is_old()) {
6400       _old_set->remove(r);
6401     } else {
6402       // We ignore free regions, we'll empty the free list afterwards.
6403       // We ignore young regions, we'll empty the young list afterwards.
6404       // We ignore humongous regions, we're not tearing down the
6405       // humongous regions set.
6406       assert(r->is_free() || r->is_young() || r->is_humongous(),
6407              "it cannot be another type");
6408     }
6409     return false;
6410   }
6411 
6412   ~TearDownRegionSetsClosure() {
6413     assert(_old_set->is_empty(), "post-condition");
6414   }
6415 };
6416 
6417 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
6418   assert_at_safepoint(true /* should_be_vm_thread */);
6419 
6420   if (!free_list_only) {
6421     TearDownRegionSetsClosure cl(&_old_set);
6422     heap_region_iterate(&cl);
6423 
6424     // Note that emptying the _young_list is postponed and instead done as
6425     // the first step when rebuilding the regions sets again. The reason for
6426     // this is that during a full GC string deduplication needs to know if
6427     // a collected region was young or old when the full GC was initiated.
6428   }
6429   _hrm.remove_all_free_regions();
6430 }
6431 
6432 class RebuildRegionSetsClosure : public HeapRegionClosure {
6433 private:
6434   bool            _free_list_only;
6435   HeapRegionSet*   _old_set;
6436   HeapRegionManager*   _hrm;
6437   size_t          _total_used;
6438 
6439 public:
6440   RebuildRegionSetsClosure(bool free_list_only,
6441                            HeapRegionSet* old_set, HeapRegionManager* hrm) :
6442     _free_list_only(free_list_only),
6443     _old_set(old_set), _hrm(hrm), _total_used(0) {
6444     assert(_hrm->num_free_regions() == 0, "pre-condition");
6445     if (!free_list_only) {
6446       assert(_old_set->is_empty(), "pre-condition");
6447     }
6448   }
6449 
6450   bool doHeapRegion(HeapRegion* r) {
6451     if (r->is_continues_humongous()) {
6452       return false;
6453     }
6454 
6455     if (r->is_empty()) {
6456       // Add free regions to the free list
6457       r->set_free();
6458       r->set_allocation_context(AllocationContext::system());
6459       _hrm->insert_into_free_list(r);
6460     } else if (!_free_list_only) {
6461       assert(!r->is_young(), "we should not come across young regions");
6462 
6463       if (r->is_humongous()) {
6464         // We ignore humongous regions, we left the humongous set unchanged
6465       } else {
6466         // Objects that were compacted would have ended up on regions
6467         // that were previously old or free.
6468         assert(r->is_free() || r->is_old(), "invariant");
6469         // We now consider them old, so register as such.
6470         r->set_old();
6471         _old_set->add(r);
6472       }
6473       _total_used += r->used();
6474     }
6475 
6476     return false;
6477   }
6478 
6479   size_t total_used() {
6480     return _total_used;
6481   }
6482 };
6483 
6484 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6485   assert_at_safepoint(true /* should_be_vm_thread */);
6486 
6487   if (!free_list_only) {
6488     _young_list->empty_list();
6489   }
6490 
6491   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
6492   heap_region_iterate(&cl);
6493 
6494   if (!free_list_only) {
6495     _allocator->set_used(cl.total_used());
6496   }
6497   assert(_allocator->used_unlocked() == recalculate_used(),
6498          err_msg("inconsistent _allocator->used_unlocked(), "
6499                  "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
6500                  _allocator->used_unlocked(), recalculate_used()));
6501 }
6502 
6503 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6504   _refine_cte_cl->set_concurrent(concurrent);
6505 }
6506 
6507 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6508   HeapRegion* hr = heap_region_containing(p);
6509   return hr->is_in(p);
6510 }
6511 
6512 // Methods for the mutator alloc region
6513 
6514 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
6515                                                       bool force) {
6516   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6517   assert(!force || g1_policy()->can_expand_young_list(),
6518          "if force is true we should be able to expand the young list");
6519   bool young_list_full = g1_policy()->is_young_list_full();
6520   if (force || !young_list_full) {
6521     HeapRegion* new_alloc_region = new_region(word_size,
6522                                               false /* is_old */,
6523                                               false /* do_expand */);
6524     if (new_alloc_region != NULL) {
6525       set_region_short_lived_locked(new_alloc_region);
6526       _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
6527       check_bitmaps("Mutator Region Allocation", new_alloc_region);
6528       return new_alloc_region;
6529     }
6530   }
6531   return NULL;
6532 }
6533 
6534 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
6535                                                   size_t allocated_bytes) {
6536   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6537   assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
6538 
6539   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
6540   _allocator->increase_used(allocated_bytes);
6541   _hr_printer.retire(alloc_region);
6542   // We update the eden sizes here, when the region is retired,
6543   // instead of when it's allocated, since this is the point that its
6544   // used space has been recored in _summary_bytes_used.
6545   g1mm()->update_eden_size();
6546 }
6547 
6548 void G1CollectedHeap::set_par_threads() {
6549   // Don't change the number of workers.  Use the value previously set
6550   // in the workgroup.
6551   uint n_workers = workers()->active_workers();
6552   assert(UseDynamicNumberOfGCThreads ||
6553            n_workers == workers()->total_workers(),
6554       "Otherwise should be using the total number of workers");
6555   if (n_workers == 0) {
6556     assert(false, "Should have been set in prior evacuation pause.");
6557     n_workers = ParallelGCThreads;
6558     workers()->set_active_workers(n_workers);
6559   }
6560   set_par_threads(n_workers);
6561 }
6562 
6563 // Methods for the GC alloc regions
6564 
6565 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6566                                                  uint count,
6567                                                  InCSetState dest) {
6568   assert(FreeList_lock->owned_by_self(), "pre-condition");
6569 
6570   if (count < g1_policy()->max_regions(dest)) {
6571     const bool is_survivor = (dest.is_young());
6572     HeapRegion* new_alloc_region = new_region(word_size,
6573                                               !is_survivor,
6574                                               true /* do_expand */);
6575     if (new_alloc_region != NULL) {
6576       // We really only need to do this for old regions given that we
6577       // should never scan survivors. But it doesn't hurt to do it
6578       // for survivors too.
6579       new_alloc_region->record_timestamp();
6580       if (is_survivor) {
6581         new_alloc_region->set_survivor();
6582         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6583         check_bitmaps("Survivor Region Allocation", new_alloc_region);
6584       } else {
6585         new_alloc_region->set_old();
6586         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
6587         check_bitmaps("Old Region Allocation", new_alloc_region);
6588       }
6589       bool during_im = g1_policy()->during_initial_mark_pause();
6590       new_alloc_region->note_start_of_copying(during_im);
6591       return new_alloc_region;
6592     }
6593   }
6594   return NULL;
6595 }
6596 
6597 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6598                                              size_t allocated_bytes,
6599                                              InCSetState dest) {
6600   bool during_im = g1_policy()->during_initial_mark_pause();
6601   alloc_region->note_end_of_copying(during_im);
6602   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6603   if (dest.is_young()) {
6604     young_list()->add_survivor_region(alloc_region);
6605   } else {
6606     _old_set.add(alloc_region);
6607   }
6608   _hr_printer.retire(alloc_region);
6609 }
6610 
6611 // Heap region set verification
6612 
6613 class VerifyRegionListsClosure : public HeapRegionClosure {
6614 private:
6615   HeapRegionSet*   _old_set;
6616   HeapRegionSet*   _humongous_set;
6617   HeapRegionManager*   _hrm;
6618 
6619 public:
6620   HeapRegionSetCount _old_count;
6621   HeapRegionSetCount _humongous_count;
6622   HeapRegionSetCount _free_count;
6623 
6624   VerifyRegionListsClosure(HeapRegionSet* old_set,
6625                            HeapRegionSet* humongous_set,
6626                            HeapRegionManager* hrm) :
6627     _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
6628     _old_count(), _humongous_count(), _free_count(){ }
6629 
6630   bool doHeapRegion(HeapRegion* hr) {
6631     if (hr->is_continues_humongous()) {
6632       return false;
6633     }
6634 
6635     if (hr->is_young()) {
6636       // TODO
6637     } else if (hr->is_starts_humongous()) {
6638       assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()));
6639       _humongous_count.increment(1u, hr->capacity());
6640     } else if (hr->is_empty()) {
6641       assert(_hrm->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrm_index()));
6642       _free_count.increment(1u, hr->capacity());
6643     } else if (hr->is_old()) {
6644       assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
6645       _old_count.increment(1u, hr->capacity());
6646     } else {
6647       ShouldNotReachHere();
6648     }
6649     return false;
6650   }
6651 
6652   void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
6653     guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
6654     guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6655         old_set->total_capacity_bytes(), _old_count.capacity()));
6656 
6657     guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length()));
6658     guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6659         humongous_set->total_capacity_bytes(), _humongous_count.capacity()));
6660 
6661     guarantee(free_list->num_free_regions() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length()));
6662     guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6663         free_list->total_capacity_bytes(), _free_count.capacity()));
6664   }
6665 };
6666 
6667 void G1CollectedHeap::verify_region_sets() {
6668   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6669 
6670   // First, check the explicit lists.
6671   _hrm.verify();
6672   {
6673     // Given that a concurrent operation might be adding regions to
6674     // the secondary free list we have to take the lock before
6675     // verifying it.
6676     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6677     _secondary_free_list.verify_list();
6678   }
6679 
6680   // If a concurrent region freeing operation is in progress it will
6681   // be difficult to correctly attributed any free regions we come
6682   // across to the correct free list given that they might belong to
6683   // one of several (free_list, secondary_free_list, any local lists,
6684   // etc.). So, if that's the case we will skip the rest of the
6685   // verification operation. Alternatively, waiting for the concurrent
6686   // operation to complete will have a non-trivial effect on the GC's
6687   // operation (no concurrent operation will last longer than the
6688   // interval between two calls to verification) and it might hide
6689   // any issues that we would like to catch during testing.
6690   if (free_regions_coming()) {
6691     return;
6692   }
6693 
6694   // Make sure we append the secondary_free_list on the free_list so
6695   // that all free regions we will come across can be safely
6696   // attributed to the free_list.
6697   append_secondary_free_list_if_not_empty_with_lock();
6698 
6699   // Finally, make sure that the region accounting in the lists is
6700   // consistent with what we see in the heap.
6701 
6702   VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrm);
6703   heap_region_iterate(&cl);
6704   cl.verify_counts(&_old_set, &_humongous_set, &_hrm);
6705 }
6706 
6707 // Optimized nmethod scanning
6708 
6709 class RegisterNMethodOopClosure: public OopClosure {
6710   G1CollectedHeap* _g1h;
6711   nmethod* _nm;
6712 
6713   template <class T> void do_oop_work(T* p) {
6714     T heap_oop = oopDesc::load_heap_oop(p);
6715     if (!oopDesc::is_null(heap_oop)) {
6716       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6717       HeapRegion* hr = _g1h->heap_region_containing(obj);
6718       assert(!hr->is_continues_humongous(),
6719              err_msg("trying to add code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
6720                      " starting at "HR_FORMAT,
6721                      _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
6722 
6723       // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
6724       hr->add_strong_code_root_locked(_nm);
6725     }
6726   }
6727 
6728 public:
6729   RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6730     _g1h(g1h), _nm(nm) {}
6731 
6732   void do_oop(oop* p)       { do_oop_work(p); }
6733   void do_oop(narrowOop* p) { do_oop_work(p); }
6734 };
6735 
6736 class UnregisterNMethodOopClosure: public OopClosure {
6737   G1CollectedHeap* _g1h;
6738   nmethod* _nm;
6739 
6740   template <class T> void do_oop_work(T* p) {
6741     T heap_oop = oopDesc::load_heap_oop(p);
6742     if (!oopDesc::is_null(heap_oop)) {
6743       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
6744       HeapRegion* hr = _g1h->heap_region_containing(obj);
6745       assert(!hr->is_continues_humongous(),
6746              err_msg("trying to remove code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT
6747                      " starting at "HR_FORMAT,
6748                      _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
6749 
6750       hr->remove_strong_code_root(_nm);
6751     }
6752   }
6753 
6754 public:
6755   UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
6756     _g1h(g1h), _nm(nm) {}
6757 
6758   void do_oop(oop* p)       { do_oop_work(p); }
6759   void do_oop(narrowOop* p) { do_oop_work(p); }
6760 };
6761 
6762 void G1CollectedHeap::register_nmethod(nmethod* nm) {
6763   CollectedHeap::register_nmethod(nm);
6764 
6765   guarantee(nm != NULL, "sanity");
6766   RegisterNMethodOopClosure reg_cl(this, nm);
6767   nm->oops_do(&reg_cl);
6768 }
6769 
6770 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
6771   CollectedHeap::unregister_nmethod(nm);
6772 
6773   guarantee(nm != NULL, "sanity");
6774   UnregisterNMethodOopClosure reg_cl(this, nm);
6775   nm->oops_do(&reg_cl, true);
6776 }
6777 
6778 void G1CollectedHeap::purge_code_root_memory() {
6779   double purge_start = os::elapsedTime();
6780   G1CodeRootSet::purge();
6781   double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
6782   g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
6783 }
6784 
6785 class RebuildStrongCodeRootClosure: public CodeBlobClosure {
6786   G1CollectedHeap* _g1h;
6787 
6788 public:
6789   RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
6790     _g1h(g1h) {}
6791 
6792   void do_code_blob(CodeBlob* cb) {
6793     nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
6794     if (nm == NULL) {
6795       return;
6796     }
6797 
6798     if (ScavengeRootsInCode) {
6799       _g1h->register_nmethod(nm);
6800     }
6801   }
6802 };
6803 
6804 void G1CollectedHeap::rebuild_strong_code_roots() {
6805   RebuildStrongCodeRootClosure blob_cl(this);
6806   CodeCache::blobs_do(&blob_cl);
6807 }