1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/icBuffer.hpp"
  27 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  28 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  33 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  34 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  35 #include "gc_implementation/g1/g1EvacFailure.hpp"
  36 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  37 #include "gc_implementation/g1/g1Log.hpp"
  38 #include "gc_implementation/g1/g1MarkSweep.hpp"
  39 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  40 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  41 #include "gc_implementation/g1/heapRegion.inline.hpp"
  42 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  43 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  44 #include "gc_implementation/g1/vm_operations_g1.hpp"
  45 #include "gc_implementation/shared/isGCActiveMark.hpp"
  46 #include "memory/gcLocker.inline.hpp"
  47 #include "memory/genOopClosures.inline.hpp"
  48 #include "memory/generationSpec.hpp"
  49 #include "memory/referenceProcessor.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "oops/oop.pcgc.inline.hpp"
  52 #include "runtime/aprofiler.hpp"
  53 #include "runtime/vmThread.hpp"
  54 
  55 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  56 
  57 // turn it on so that the contents of the young list (scan-only /
  58 // to-be-collected) are printed at "strategic" points before / during
  59 // / after the collection --- this is useful for debugging
  60 #define YOUNG_LIST_VERBOSE 0
  61 // CURRENT STATUS
  62 // This file is under construction.  Search for "FIXME".
  63 
  64 // INVARIANTS/NOTES
  65 //
  66 // All allocation activity covered by the G1CollectedHeap interface is
  67 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  68 // and allocate_new_tlab, which are the "entry" points to the
  69 // allocation code from the rest of the JVM.  (Note that this does not
  70 // apply to TLAB allocation, which is not part of this interface: it
  71 // is done by clients of this interface.)
  72 
  73 // Notes on implementation of parallelism in different tasks.
  74 //
  75 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
  76 // The number of GC workers is passed to heap_region_par_iterate_chunked().
  77 // It does use run_task() which sets _n_workers in the task.
  78 // G1ParTask executes g1_process_strong_roots() ->
  79 // SharedHeap::process_strong_roots() which calls eventuall to
  80 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
  81 // SequentialSubTasksDone.  SharedHeap::process_strong_roots() also
  82 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
  83 //
  84 
  85 // Local to this file.
  86 
  87 class RefineCardTableEntryClosure: public CardTableEntryClosure {
  88   SuspendibleThreadSet* _sts;
  89   G1RemSet* _g1rs;
  90   ConcurrentG1Refine* _cg1r;
  91   bool _concurrent;
  92 public:
  93   RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
  94                               G1RemSet* g1rs,
  95                               ConcurrentG1Refine* cg1r) :
  96     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
  97   {}
  98   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
  99     bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);
 100     // This path is executed by the concurrent refine or mutator threads,
 101     // concurrently, and so we do not care if card_ptr contains references
 102     // that point into the collection set.
 103     assert(!oops_into_cset, "should be");
 104 
 105     if (_concurrent && _sts->should_yield()) {
 106       // Caller will actually yield.
 107       return false;
 108     }
 109     // Otherwise, we finished successfully; return true.
 110     return true;
 111   }
 112   void set_concurrent(bool b) { _concurrent = b; }
 113 };
 114 
 115 
 116 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
 117   int _calls;
 118   G1CollectedHeap* _g1h;
 119   CardTableModRefBS* _ctbs;
 120   int _histo[256];
 121 public:
 122   ClearLoggedCardTableEntryClosure() :
 123     _calls(0)
 124   {
 125     _g1h = G1CollectedHeap::heap();
 126     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
 127     for (int i = 0; i < 256; i++) _histo[i] = 0;
 128   }
 129   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
 130     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
 131       _calls++;
 132       unsigned char* ujb = (unsigned char*)card_ptr;
 133       int ind = (int)(*ujb);
 134       _histo[ind]++;
 135       *card_ptr = -1;
 136     }
 137     return true;
 138   }
 139   int calls() { return _calls; }
 140   void print_histo() {
 141     gclog_or_tty->print_cr("Card table value histogram:");
 142     for (int i = 0; i < 256; i++) {
 143       if (_histo[i] != 0) {
 144         gclog_or_tty->print_cr("  %d: %d", i, _histo[i]);
 145       }
 146     }
 147   }
 148 };
 149 
 150 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
 151   int _calls;
 152   G1CollectedHeap* _g1h;
 153   CardTableModRefBS* _ctbs;
 154 public:
 155   RedirtyLoggedCardTableEntryClosure() :
 156     _calls(0)
 157   {
 158     _g1h = G1CollectedHeap::heap();
 159     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
 160   }
 161   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
 162     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
 163       _calls++;
 164       *card_ptr = 0;
 165     }
 166     return true;
 167   }
 168   int calls() { return _calls; }
 169 };
 170 
 171 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
 172 public:
 173   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
 174     *card_ptr = CardTableModRefBS::dirty_card_val();
 175     return true;
 176   }
 177 };
 178 
 179 YoungList::YoungList(G1CollectedHeap* g1h) :
 180     _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
 181     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
 182   guarantee(check_list_empty(false), "just making sure...");
 183 }
 184 
 185 void YoungList::push_region(HeapRegion *hr) {
 186   assert(!hr->is_young(), "should not already be young");
 187   assert(hr->get_next_young_region() == NULL, "cause it should!");
 188 
 189   hr->set_next_young_region(_head);
 190   _head = hr;
 191 
 192   _g1h->g1_policy()->set_region_eden(hr, (int) _length);
 193   ++_length;
 194 }
 195 
 196 void YoungList::add_survivor_region(HeapRegion* hr) {
 197   assert(hr->is_survivor(), "should be flagged as survivor region");
 198   assert(hr->get_next_young_region() == NULL, "cause it should!");
 199 
 200   hr->set_next_young_region(_survivor_head);
 201   if (_survivor_head == NULL) {
 202     _survivor_tail = hr;
 203   }
 204   _survivor_head = hr;
 205   ++_survivor_length;
 206 }
 207 
 208 void YoungList::empty_list(HeapRegion* list) {
 209   while (list != NULL) {
 210     HeapRegion* next = list->get_next_young_region();
 211     list->set_next_young_region(NULL);
 212     list->uninstall_surv_rate_group();
 213     list->set_not_young();
 214     list = next;
 215   }
 216 }
 217 
 218 void YoungList::empty_list() {
 219   assert(check_list_well_formed(), "young list should be well formed");
 220 
 221   empty_list(_head);
 222   _head = NULL;
 223   _length = 0;
 224 
 225   empty_list(_survivor_head);
 226   _survivor_head = NULL;
 227   _survivor_tail = NULL;
 228   _survivor_length = 0;
 229 
 230   _last_sampled_rs_lengths = 0;
 231 
 232   assert(check_list_empty(false), "just making sure...");
 233 }
 234 
 235 bool YoungList::check_list_well_formed() {
 236   bool ret = true;
 237 
 238   uint length = 0;
 239   HeapRegion* curr = _head;
 240   HeapRegion* last = NULL;
 241   while (curr != NULL) {
 242     if (!curr->is_young()) {
 243       gclog_or_tty->print_cr("### YOUNG REGION "PTR_FORMAT"-"PTR_FORMAT" "
 244                              "incorrectly tagged (y: %d, surv: %d)",
 245                              curr->bottom(), curr->end(),
 246                              curr->is_young(), curr->is_survivor());
 247       ret = false;
 248     }
 249     ++length;
 250     last = curr;
 251     curr = curr->get_next_young_region();
 252   }
 253   ret = ret && (length == _length);
 254 
 255   if (!ret) {
 256     gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
 257     gclog_or_tty->print_cr("###   list has %u entries, _length is %u",
 258                            length, _length);
 259   }
 260 
 261   return ret;
 262 }
 263 
 264 bool YoungList::check_list_empty(bool check_sample) {
 265   bool ret = true;
 266 
 267   if (_length != 0) {
 268     gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
 269                   _length);
 270     ret = false;
 271   }
 272   if (check_sample && _last_sampled_rs_lengths != 0) {
 273     gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
 274     ret = false;
 275   }
 276   if (_head != NULL) {
 277     gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
 278     ret = false;
 279   }
 280   if (!ret) {
 281     gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
 282   }
 283 
 284   return ret;
 285 }
 286 
 287 void
 288 YoungList::rs_length_sampling_init() {
 289   _sampled_rs_lengths = 0;
 290   _curr               = _head;
 291 }
 292 
 293 bool
 294 YoungList::rs_length_sampling_more() {
 295   return _curr != NULL;
 296 }
 297 
 298 void
 299 YoungList::rs_length_sampling_next() {
 300   assert( _curr != NULL, "invariant" );
 301   size_t rs_length = _curr->rem_set()->occupied();
 302 
 303   _sampled_rs_lengths += rs_length;
 304 
 305   // The current region may not yet have been added to the
 306   // incremental collection set (it gets added when it is
 307   // retired as the current allocation region).
 308   if (_curr->in_collection_set()) {
 309     // Update the collection set policy information for this region
 310     _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
 311   }
 312 
 313   _curr = _curr->get_next_young_region();
 314   if (_curr == NULL) {
 315     _last_sampled_rs_lengths = _sampled_rs_lengths;
 316     // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
 317   }
 318 }
 319 
 320 void
 321 YoungList::reset_auxilary_lists() {
 322   guarantee( is_empty(), "young list should be empty" );
 323   assert(check_list_well_formed(), "young list should be well formed");
 324 
 325   // Add survivor regions to SurvRateGroup.
 326   _g1h->g1_policy()->note_start_adding_survivor_regions();
 327   _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
 328 
 329   int young_index_in_cset = 0;
 330   for (HeapRegion* curr = _survivor_head;
 331        curr != NULL;
 332        curr = curr->get_next_young_region()) {
 333     _g1h->g1_policy()->set_region_survivor(curr, young_index_in_cset);
 334 
 335     // The region is a non-empty survivor so let's add it to
 336     // the incremental collection set for the next evacuation
 337     // pause.
 338     _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
 339     young_index_in_cset += 1;
 340   }
 341   assert((uint) young_index_in_cset == _survivor_length, "post-condition");
 342   _g1h->g1_policy()->note_stop_adding_survivor_regions();
 343 
 344   _head   = _survivor_head;
 345   _length = _survivor_length;
 346   if (_survivor_head != NULL) {
 347     assert(_survivor_tail != NULL, "cause it shouldn't be");
 348     assert(_survivor_length > 0, "invariant");
 349     _survivor_tail->set_next_young_region(NULL);
 350   }
 351 
 352   // Don't clear the survivor list handles until the start of
 353   // the next evacuation pause - we need it in order to re-tag
 354   // the survivor regions from this evacuation pause as 'young'
 355   // at the start of the next.
 356 
 357   _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
 358 
 359   assert(check_list_well_formed(), "young list should be well formed");
 360 }
 361 
 362 void YoungList::print() {
 363   HeapRegion* lists[] = {_head,   _survivor_head};
 364   const char* names[] = {"YOUNG", "SURVIVOR"};
 365 
 366   for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
 367     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
 368     HeapRegion *curr = lists[list];
 369     if (curr == NULL)
 370       gclog_or_tty->print_cr("  empty");
 371     while (curr != NULL) {
 372       gclog_or_tty->print_cr("  "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
 373                              HR_FORMAT_PARAMS(curr),
 374                              curr->prev_top_at_mark_start(),
 375                              curr->next_top_at_mark_start(),
 376                              curr->age_in_surv_rate_group_cond());
 377       curr = curr->get_next_young_region();
 378     }
 379   }
 380 
 381   gclog_or_tty->print_cr("");
 382 }
 383 
 384 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
 385 {
 386   // Claim the right to put the region on the dirty cards region list
 387   // by installing a self pointer.
 388   HeapRegion* next = hr->get_next_dirty_cards_region();
 389   if (next == NULL) {
 390     HeapRegion* res = (HeapRegion*)
 391       Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
 392                           NULL);
 393     if (res == NULL) {
 394       HeapRegion* head;
 395       do {
 396         // Put the region to the dirty cards region list.
 397         head = _dirty_cards_region_list;
 398         next = (HeapRegion*)
 399           Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
 400         if (next == head) {
 401           assert(hr->get_next_dirty_cards_region() == hr,
 402                  "hr->get_next_dirty_cards_region() != hr");
 403           if (next == NULL) {
 404             // The last region in the list points to itself.
 405             hr->set_next_dirty_cards_region(hr);
 406           } else {
 407             hr->set_next_dirty_cards_region(next);
 408           }
 409         }
 410       } while (next != head);
 411     }
 412   }
 413 }
 414 
 415 HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
 416 {
 417   HeapRegion* head;
 418   HeapRegion* hr;
 419   do {
 420     head = _dirty_cards_region_list;
 421     if (head == NULL) {
 422       return NULL;
 423     }
 424     HeapRegion* new_head = head->get_next_dirty_cards_region();
 425     if (head == new_head) {
 426       // The last region.
 427       new_head = NULL;
 428     }
 429     hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
 430                                           head);
 431   } while (hr != head);
 432   assert(hr != NULL, "invariant");
 433   hr->set_next_dirty_cards_region(NULL);
 434   return hr;
 435 }
 436 
 437 void G1CollectedHeap::stop_conc_gc_threads() {
 438   _cg1r->stop();
 439   _cmThread->stop();
 440 }
 441 
 442 #ifdef ASSERT
 443 // A region is added to the collection set as it is retired
 444 // so an address p can point to a region which will be in the
 445 // collection set but has not yet been retired.  This method
 446 // therefore is only accurate during a GC pause after all
 447 // regions have been retired.  It is used for debugging
 448 // to check if an nmethod has references to objects that can
 449 // be move during a partial collection.  Though it can be
 450 // inaccurate, it is sufficient for G1 because the conservative
 451 // implementation of is_scavengable() for G1 will indicate that
 452 // all nmethods must be scanned during a partial collection.
 453 bool G1CollectedHeap::is_in_partial_collection(const void* p) {
 454   HeapRegion* hr = heap_region_containing(p);
 455   return hr != NULL && hr->in_collection_set();
 456 }
 457 #endif
 458 
 459 // Returns true if the reference points to an object that
 460 // can move in an incremental collecction.
 461 bool G1CollectedHeap::is_scavengable(const void* p) {
 462   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 463   G1CollectorPolicy* g1p = g1h->g1_policy();
 464   HeapRegion* hr = heap_region_containing(p);
 465   if (hr == NULL) {
 466      // null
 467      assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p));
 468      return false;
 469   } else {
 470     return !hr->isHumongous();
 471   }
 472 }
 473 
 474 void G1CollectedHeap::check_ct_logs_at_safepoint() {
 475   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
 476   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
 477 
 478   // Count the dirty cards at the start.
 479   CountNonCleanMemRegionClosure count1(this);
 480   ct_bs->mod_card_iterate(&count1);
 481   int orig_count = count1.n();
 482 
 483   // First clear the logged cards.
 484   ClearLoggedCardTableEntryClosure clear;
 485   dcqs.set_closure(&clear);
 486   dcqs.apply_closure_to_all_completed_buffers();
 487   dcqs.iterate_closure_all_threads(false);
 488   clear.print_histo();
 489 
 490   // Now ensure that there's no dirty cards.
 491   CountNonCleanMemRegionClosure count2(this);
 492   ct_bs->mod_card_iterate(&count2);
 493   if (count2.n() != 0) {
 494     gclog_or_tty->print_cr("Card table has %d entries; %d originally",
 495                            count2.n(), orig_count);
 496   }
 497   guarantee(count2.n() == 0, "Card table should be clean.");
 498 
 499   RedirtyLoggedCardTableEntryClosure redirty;
 500   JavaThread::dirty_card_queue_set().set_closure(&redirty);
 501   dcqs.apply_closure_to_all_completed_buffers();
 502   dcqs.iterate_closure_all_threads(false);
 503   gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
 504                          clear.calls(), orig_count);
 505   guarantee(redirty.calls() == clear.calls(),
 506             "Or else mechanism is broken.");
 507 
 508   CountNonCleanMemRegionClosure count3(this);
 509   ct_bs->mod_card_iterate(&count3);
 510   if (count3.n() != orig_count) {
 511     gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
 512                            orig_count, count3.n());
 513     guarantee(count3.n() >= orig_count, "Should have restored them all.");
 514   }
 515 
 516   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
 517 }
 518 
 519 // Private class members.
 520 
 521 G1CollectedHeap* G1CollectedHeap::_g1h;
 522 
 523 // Private methods.
 524 
 525 HeapRegion*
 526 G1CollectedHeap::new_region_try_secondary_free_list() {
 527   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 528   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 529     if (!_secondary_free_list.is_empty()) {
 530       if (G1ConcRegionFreeingVerbose) {
 531         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 532                                "secondary_free_list has %u entries",
 533                                _secondary_free_list.length());
 534       }
 535       // It looks as if there are free regions available on the
 536       // secondary_free_list. Let's move them to the free_list and try
 537       // again to allocate from it.
 538       append_secondary_free_list();
 539 
 540       assert(!_free_list.is_empty(), "if the secondary_free_list was not "
 541              "empty we should have moved at least one entry to the free_list");
 542       HeapRegion* res = _free_list.remove_head();
 543       if (G1ConcRegionFreeingVerbose) {
 544         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 545                                "allocated "HR_FORMAT" from secondary_free_list",
 546                                HR_FORMAT_PARAMS(res));
 547       }
 548       return res;
 549     }
 550 
 551     // Wait here until we get notifed either when (a) there are no
 552     // more free regions coming or (b) some regions have been moved on
 553     // the secondary_free_list.
 554     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
 555   }
 556 
 557   if (G1ConcRegionFreeingVerbose) {
 558     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 559                            "could not allocate from secondary_free_list");
 560   }
 561   return NULL;
 562 }
 563 
 564 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
 565   assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
 566          "the only time we use this to allocate a humongous region is "
 567          "when we are allocating a single humongous region");
 568 
 569   HeapRegion* res;
 570   if (G1StressConcRegionFreeing) {
 571     if (!_secondary_free_list.is_empty()) {
 572       if (G1ConcRegionFreeingVerbose) {
 573         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 574                                "forced to look at the secondary_free_list");
 575       }
 576       res = new_region_try_secondary_free_list();
 577       if (res != NULL) {
 578         return res;
 579       }
 580     }
 581   }
 582   res = _free_list.remove_head_or_null();
 583   if (res == NULL) {
 584     if (G1ConcRegionFreeingVerbose) {
 585       gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 586                              "res == NULL, trying the secondary_free_list");
 587     }
 588     res = new_region_try_secondary_free_list();
 589   }
 590   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
 591     // Currently, only attempts to allocate GC alloc regions set
 592     // do_expand to true. So, we should only reach here during a
 593     // safepoint. If this assumption changes we might have to
 594     // reconsider the use of _expand_heap_after_alloc_failure.
 595     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 596 
 597     ergo_verbose1(ErgoHeapSizing,
 598                   "attempt heap expansion",
 599                   ergo_format_reason("region allocation request failed")
 600                   ergo_format_byte("allocation request"),
 601                   word_size * HeapWordSize);
 602     if (expand(word_size * HeapWordSize)) {
 603       // Given that expand() succeeded in expanding the heap, and we
 604       // always expand the heap by an amount aligned to the heap
 605       // region size, the free list should in theory not be empty. So
 606       // it would probably be OK to use remove_head(). But the extra
 607       // check for NULL is unlikely to be a performance issue here (we
 608       // just expanded the heap!) so let's just be conservative and
 609       // use remove_head_or_null().
 610       res = _free_list.remove_head_or_null();
 611     } else {
 612       _expand_heap_after_alloc_failure = false;
 613     }
 614   }
 615   return res;
 616 }
 617 
 618 uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
 619                                                         size_t word_size) {
 620   assert(isHumongous(word_size), "word_size should be humongous");
 621   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 622 
 623   uint first = G1_NULL_HRS_INDEX;
 624   if (num_regions == 1) {
 625     // Only one region to allocate, no need to go through the slower
 626     // path. The caller will attempt the expasion if this fails, so
 627     // let's not try to expand here too.
 628     HeapRegion* hr = new_region(word_size, false /* do_expand */);
 629     if (hr != NULL) {
 630       first = hr->hrs_index();
 631     } else {
 632       first = G1_NULL_HRS_INDEX;
 633     }
 634   } else {
 635     // We can't allocate humongous regions while cleanupComplete() is
 636     // running, since some of the regions we find to be empty might not
 637     // yet be added to the free list and it is not straightforward to
 638     // know which list they are on so that we can remove them. Note
 639     // that we only need to do this if we need to allocate more than
 640     // one region to satisfy the current humongous allocation
 641     // request. If we are only allocating one region we use the common
 642     // region allocation code (see above).
 643     wait_while_free_regions_coming();
 644     append_secondary_free_list_if_not_empty_with_lock();
 645 
 646     if (free_regions() >= num_regions) {
 647       first = _hrs.find_contiguous(num_regions);
 648       if (first != G1_NULL_HRS_INDEX) {
 649         for (uint i = first; i < first + num_regions; ++i) {
 650           HeapRegion* hr = region_at(i);
 651           assert(hr->is_empty(), "sanity");
 652           assert(is_on_master_free_list(hr), "sanity");
 653           hr->set_pending_removal(true);
 654         }
 655         _free_list.remove_all_pending(num_regions);
 656       }
 657     }
 658   }
 659   return first;
 660 }
 661 
 662 HeapWord*
 663 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
 664                                                            uint num_regions,
 665                                                            size_t word_size) {
 666   assert(first != G1_NULL_HRS_INDEX, "pre-condition");
 667   assert(isHumongous(word_size), "word_size should be humongous");
 668   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 669 
 670   // Index of last region in the series + 1.
 671   uint last = first + num_regions;
 672 
 673   // We need to initialize the region(s) we just discovered. This is
 674   // a bit tricky given that it can happen concurrently with
 675   // refinement threads refining cards on these regions and
 676   // potentially wanting to refine the BOT as they are scanning
 677   // those cards (this can happen shortly after a cleanup; see CR
 678   // 6991377). So we have to set up the region(s) carefully and in
 679   // a specific order.
 680 
 681   // The word size sum of all the regions we will allocate.
 682   size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
 683   assert(word_size <= word_size_sum, "sanity");
 684 
 685   // This will be the "starts humongous" region.
 686   HeapRegion* first_hr = region_at(first);
 687   // The header of the new object will be placed at the bottom of
 688   // the first region.
 689   HeapWord* new_obj = first_hr->bottom();
 690   // This will be the new end of the first region in the series that
 691   // should also match the end of the last region in the seriers.
 692   HeapWord* new_end = new_obj + word_size_sum;
 693   // This will be the new top of the first region that will reflect
 694   // this allocation.
 695   HeapWord* new_top = new_obj + word_size;
 696 
 697   // First, we need to zero the header of the space that we will be
 698   // allocating. When we update top further down, some refinement
 699   // threads might try to scan the region. By zeroing the header we
 700   // ensure that any thread that will try to scan the region will
 701   // come across the zero klass word and bail out.
 702   //
 703   // NOTE: It would not have been correct to have used
 704   // CollectedHeap::fill_with_object() and make the space look like
 705   // an int array. The thread that is doing the allocation will
 706   // later update the object header to a potentially different array
 707   // type and, for a very short period of time, the klass and length
 708   // fields will be inconsistent. This could cause a refinement
 709   // thread to calculate the object size incorrectly.
 710   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
 711 
 712   // We will set up the first region as "starts humongous". This
 713   // will also update the BOT covering all the regions to reflect
 714   // that there is a single object that starts at the bottom of the
 715   // first region.
 716   first_hr->set_startsHumongous(new_top, new_end);
 717 
 718   // Then, if there are any, we will set up the "continues
 719   // humongous" regions.
 720   HeapRegion* hr = NULL;
 721   for (uint i = first + 1; i < last; ++i) {
 722     hr = region_at(i);
 723     hr->set_continuesHumongous(first_hr);
 724   }
 725   // If we have "continues humongous" regions (hr != NULL), then the
 726   // end of the last one should match new_end.
 727   assert(hr == NULL || hr->end() == new_end, "sanity");
 728 
 729   // Up to this point no concurrent thread would have been able to
 730   // do any scanning on any region in this series. All the top
 731   // fields still point to bottom, so the intersection between
 732   // [bottom,top] and [card_start,card_end] will be empty. Before we
 733   // update the top fields, we'll do a storestore to make sure that
 734   // no thread sees the update to top before the zeroing of the
 735   // object header and the BOT initialization.
 736   OrderAccess::storestore();
 737 
 738   // Now that the BOT and the object header have been initialized,
 739   // we can update top of the "starts humongous" region.
 740   assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
 741          "new_top should be in this region");
 742   first_hr->set_top(new_top);
 743   if (_hr_printer.is_active()) {
 744     HeapWord* bottom = first_hr->bottom();
 745     HeapWord* end = first_hr->orig_end();
 746     if ((first + 1) == last) {
 747       // the series has a single humongous region
 748       _hr_printer.alloc(G1HRPrinter::SingleHumongous, first_hr, new_top);
 749     } else {
 750       // the series has more than one humongous regions
 751       _hr_printer.alloc(G1HRPrinter::StartsHumongous, first_hr, end);
 752     }
 753   }
 754 
 755   // Now, we will update the top fields of the "continues humongous"
 756   // regions. The reason we need to do this is that, otherwise,
 757   // these regions would look empty and this will confuse parts of
 758   // G1. For example, the code that looks for a consecutive number
 759   // of empty regions will consider them empty and try to
 760   // re-allocate them. We can extend is_empty() to also include
 761   // !continuesHumongous(), but it is easier to just update the top
 762   // fields here. The way we set top for all regions (i.e., top ==
 763   // end for all regions but the last one, top == new_top for the
 764   // last one) is actually used when we will free up the humongous
 765   // region in free_humongous_region().
 766   hr = NULL;
 767   for (uint i = first + 1; i < last; ++i) {
 768     hr = region_at(i);
 769     if ((i + 1) == last) {
 770       // last continues humongous region
 771       assert(hr->bottom() < new_top && new_top <= hr->end(),
 772              "new_top should fall on this region");
 773       hr->set_top(new_top);
 774       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
 775     } else {
 776       // not last one
 777       assert(new_top > hr->end(), "new_top should be above this region");
 778       hr->set_top(hr->end());
 779       _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
 780     }
 781   }
 782   // If we have continues humongous regions (hr != NULL), then the
 783   // end of the last one should match new_end and its top should
 784   // match new_top.
 785   assert(hr == NULL ||
 786          (hr->end() == new_end && hr->top() == new_top), "sanity");
 787 
 788   assert(first_hr->used() == word_size * HeapWordSize, "invariant");
 789   _summary_bytes_used += first_hr->used();
 790   _humongous_set.add(first_hr);
 791 
 792   return new_obj;
 793 }
 794 
 795 // If could fit into free regions w/o expansion, try.
 796 // Otherwise, if can expand, do so.
 797 // Otherwise, if using ex regions might help, try with ex given back.
 798 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
 799   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
 800 
 801   verify_region_sets_optional();
 802 
 803   size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
 804   uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
 805   uint x_num = expansion_regions();
 806   uint fs = _hrs.free_suffix();
 807   uint first = humongous_obj_allocate_find_first(num_regions, word_size);
 808   if (first == G1_NULL_HRS_INDEX) {
 809     // The only thing we can do now is attempt expansion.
 810     if (fs + x_num >= num_regions) {
 811       // If the number of regions we're trying to allocate for this
 812       // object is at most the number of regions in the free suffix,
 813       // then the call to humongous_obj_allocate_find_first() above
 814       // should have succeeded and we wouldn't be here.
 815       //
 816       // We should only be trying to expand when the free suffix is
 817       // not sufficient for the object _and_ we have some expansion
 818       // room available.
 819       assert(num_regions > fs, "earlier allocation should have succeeded");
 820 
 821       ergo_verbose1(ErgoHeapSizing,
 822                     "attempt heap expansion",
 823                     ergo_format_reason("humongous allocation request failed")
 824                     ergo_format_byte("allocation request"),
 825                     word_size * HeapWordSize);
 826       if (expand((num_regions - fs) * HeapRegion::GrainBytes)) {
 827         // Even though the heap was expanded, it might not have
 828         // reached the desired size. So, we cannot assume that the
 829         // allocation will succeed.
 830         first = humongous_obj_allocate_find_first(num_regions, word_size);
 831       }
 832     }
 833   }
 834 
 835   HeapWord* result = NULL;
 836   if (first != G1_NULL_HRS_INDEX) {
 837     result =
 838       humongous_obj_allocate_initialize_regions(first, num_regions, word_size);
 839     assert(result != NULL, "it should always return a valid result");
 840 
 841     // A successful humongous object allocation changes the used space
 842     // information of the old generation so we need to recalculate the
 843     // sizes and update the jstat counters here.
 844     g1mm()->update_sizes();
 845   }
 846 
 847   verify_region_sets_optional();
 848 
 849   return result;
 850 }
 851 
 852 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
 853   assert_heap_not_locked_and_not_at_safepoint();
 854   assert(!isHumongous(word_size), "we do not allow humongous TLABs");
 855 
 856   unsigned int dummy_gc_count_before;
 857   int dummy_gclocker_retry_count = 0;
 858   return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
 859 }
 860 
 861 HeapWord*
 862 G1CollectedHeap::mem_allocate(size_t word_size,
 863                               bool*  gc_overhead_limit_was_exceeded) {
 864   assert_heap_not_locked_and_not_at_safepoint();
 865 
 866   // Loop until the allocation is satisified, or unsatisfied after GC.
 867   for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
 868     unsigned int gc_count_before;
 869 
 870     HeapWord* result = NULL;
 871     if (!isHumongous(word_size)) {
 872       result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
 873     } else {
 874       result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
 875     }
 876     if (result != NULL) {
 877       return result;
 878     }
 879 
 880     // Create the garbage collection operation...
 881     VM_G1CollectForAllocation op(gc_count_before, word_size);
 882     // ...and get the VM thread to execute it.
 883     VMThread::execute(&op);
 884 
 885     if (op.prologue_succeeded() && op.pause_succeeded()) {
 886       // If the operation was successful we'll return the result even
 887       // if it is NULL. If the allocation attempt failed immediately
 888       // after a Full GC, it's unlikely we'll be able to allocate now.
 889       HeapWord* result = op.result();
 890       if (result != NULL && !isHumongous(word_size)) {
 891         // Allocations that take place on VM operations do not do any
 892         // card dirtying and we have to do it here. We only have to do
 893         // this for non-humongous allocations, though.
 894         dirty_young_block(result, word_size);
 895       }
 896       return result;
 897     } else {
 898       if (gclocker_retry_count > GCLockerRetryAllocationCount) {
 899         return NULL;
 900       }
 901       assert(op.result() == NULL,
 902              "the result should be NULL if the VM op did not succeed");
 903     }
 904 
 905     // Give a warning if we seem to be looping forever.
 906     if ((QueuedAllocationWarningCount > 0) &&
 907         (try_count % QueuedAllocationWarningCount == 0)) {
 908       warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
 909     }
 910   }
 911 
 912   ShouldNotReachHere();
 913   return NULL;
 914 }
 915 
 916 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
 917                                            unsigned int *gc_count_before_ret,
 918                                            int* gclocker_retry_count_ret) {
 919   // Make sure you read the note in attempt_allocation_humongous().
 920 
 921   assert_heap_not_locked_and_not_at_safepoint();
 922   assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
 923          "be called for humongous allocation requests");
 924 
 925   // We should only get here after the first-level allocation attempt
 926   // (attempt_allocation()) failed to allocate.
 927 
 928   // We will loop until a) we manage to successfully perform the
 929   // allocation or b) we successfully schedule a collection which
 930   // fails to perform the allocation. b) is the only case when we'll
 931   // return NULL.
 932   HeapWord* result = NULL;
 933   for (int try_count = 1; /* we'll return */; try_count += 1) {
 934     bool should_try_gc;
 935     unsigned int gc_count_before;
 936 
 937     {
 938       MutexLockerEx x(Heap_lock);
 939 
 940       result = _mutator_alloc_region.attempt_allocation_locked(word_size,
 941                                                       false /* bot_updates */);
 942       if (result != NULL) {
 943         return result;
 944       }
 945 
 946       // If we reach here, attempt_allocation_locked() above failed to
 947       // allocate a new region. So the mutator alloc region should be NULL.
 948       assert(_mutator_alloc_region.get() == NULL, "only way to get here");
 949 
 950       if (GC_locker::is_active_and_needs_gc()) {
 951         if (g1_policy()->can_expand_young_list()) {
 952           // No need for an ergo verbose message here,
 953           // can_expand_young_list() does this when it returns true.
 954           result = _mutator_alloc_region.attempt_allocation_force(word_size,
 955                                                       false /* bot_updates */);
 956           if (result != NULL) {
 957             return result;
 958           }
 959         }
 960         should_try_gc = false;
 961       } else {
 962         // The GCLocker may not be active but the GCLocker initiated
 963         // GC may not yet have been performed (GCLocker::needs_gc()
 964         // returns true). In this case we do not try this GC and
 965         // wait until the GCLocker initiated GC is performed, and
 966         // then retry the allocation.
 967         if (GC_locker::needs_gc()) {
 968           should_try_gc = false;
 969         } else {
 970           // Read the GC count while still holding the Heap_lock.
 971           gc_count_before = total_collections();
 972           should_try_gc = true;
 973         }
 974       }
 975     }
 976 
 977     if (should_try_gc) {
 978       bool succeeded;
 979       result = do_collection_pause(word_size, gc_count_before, &succeeded);
 980       if (result != NULL) {
 981         assert(succeeded, "only way to get back a non-NULL result");
 982         return result;
 983       }
 984 
 985       if (succeeded) {
 986         // If we get here we successfully scheduled a collection which
 987         // failed to allocate. No point in trying to allocate
 988         // further. We'll just return NULL.
 989         MutexLockerEx x(Heap_lock);
 990         *gc_count_before_ret = total_collections();
 991         return NULL;
 992       }
 993     } else {
 994       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
 995         MutexLockerEx x(Heap_lock);
 996         *gc_count_before_ret = total_collections();
 997         return NULL;
 998       }
 999       // The GCLocker is either active or the GCLocker initiated
1000       // GC has not yet been performed. Stall until it is and
1001       // then retry the allocation.
1002       GC_locker::stall_until_clear();
1003       (*gclocker_retry_count_ret) += 1;
1004     }
1005 
1006     // We can reach here if we were unsuccessul in scheduling a
1007     // collection (because another thread beat us to it) or if we were
1008     // stalled due to the GC locker. In either can we should retry the
1009     // allocation attempt in case another thread successfully
1010     // performed a collection and reclaimed enough space. We do the
1011     // first attempt (without holding the Heap_lock) here and the
1012     // follow-on attempt will be at the start of the next loop
1013     // iteration (after taking the Heap_lock).
1014     result = _mutator_alloc_region.attempt_allocation(word_size,
1015                                                       false /* bot_updates */);
1016     if (result != NULL) {
1017       return result;
1018     }
1019 
1020     // Give a warning if we seem to be looping forever.
1021     if ((QueuedAllocationWarningCount > 0) &&
1022         (try_count % QueuedAllocationWarningCount == 0)) {
1023       warning("G1CollectedHeap::attempt_allocation_slow() "
1024               "retries %d times", try_count);
1025     }
1026   }
1027 
1028   ShouldNotReachHere();
1029   return NULL;
1030 }
1031 
1032 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
1033                                           unsigned int * gc_count_before_ret,
1034                                           int* gclocker_retry_count_ret) {
1035   // The structure of this method has a lot of similarities to
1036   // attempt_allocation_slow(). The reason these two were not merged
1037   // into a single one is that such a method would require several "if
1038   // allocation is not humongous do this, otherwise do that"
1039   // conditional paths which would obscure its flow. In fact, an early
1040   // version of this code did use a unified method which was harder to
1041   // follow and, as a result, it had subtle bugs that were hard to
1042   // track down. So keeping these two methods separate allows each to
1043   // be more readable. It will be good to keep these two in sync as
1044   // much as possible.
1045 
1046   assert_heap_not_locked_and_not_at_safepoint();
1047   assert(isHumongous(word_size), "attempt_allocation_humongous() "
1048          "should only be called for humongous allocations");
1049 
1050   // Humongous objects can exhaust the heap quickly, so we should check if we
1051   // need to start a marking cycle at each humongous object allocation. We do
1052   // the check before we do the actual allocation. The reason for doing it
1053   // before the allocation is that we avoid having to keep track of the newly
1054   // allocated memory while we do a GC.
1055   if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
1056                                            word_size)) {
1057     collect(GCCause::_g1_humongous_allocation);
1058   }
1059 
1060   // We will loop until a) we manage to successfully perform the
1061   // allocation or b) we successfully schedule a collection which
1062   // fails to perform the allocation. b) is the only case when we'll
1063   // return NULL.
1064   HeapWord* result = NULL;
1065   for (int try_count = 1; /* we'll return */; try_count += 1) {
1066     bool should_try_gc;
1067     unsigned int gc_count_before;
1068 
1069     {
1070       MutexLockerEx x(Heap_lock);
1071 
1072       // Given that humongous objects are not allocated in young
1073       // regions, we'll first try to do the allocation without doing a
1074       // collection hoping that there's enough space in the heap.
1075       result = humongous_obj_allocate(word_size);
1076       if (result != NULL) {
1077         return result;
1078       }
1079 
1080       if (GC_locker::is_active_and_needs_gc()) {
1081         should_try_gc = false;
1082       } else {
1083          // The GCLocker may not be active but the GCLocker initiated
1084         // GC may not yet have been performed (GCLocker::needs_gc()
1085         // returns true). In this case we do not try this GC and
1086         // wait until the GCLocker initiated GC is performed, and
1087         // then retry the allocation.
1088         if (GC_locker::needs_gc()) {
1089           should_try_gc = false;
1090         } else {
1091           // Read the GC count while still holding the Heap_lock.
1092           gc_count_before = total_collections();
1093           should_try_gc = true;
1094         }
1095       }
1096     }
1097 
1098     if (should_try_gc) {
1099       // If we failed to allocate the humongous object, we should try to
1100       // do a collection pause (if we're allowed) in case it reclaims
1101       // enough space for the allocation to succeed after the pause.
1102 
1103       bool succeeded;
1104       result = do_collection_pause(word_size, gc_count_before, &succeeded);
1105       if (result != NULL) {
1106         assert(succeeded, "only way to get back a non-NULL result");
1107         return result;
1108       }
1109 
1110       if (succeeded) {
1111         // If we get here we successfully scheduled a collection which
1112         // failed to allocate. No point in trying to allocate
1113         // further. We'll just return NULL.
1114         MutexLockerEx x(Heap_lock);
1115         *gc_count_before_ret = total_collections();
1116         return NULL;
1117       }
1118     } else {
1119       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
1120         MutexLockerEx x(Heap_lock);
1121         *gc_count_before_ret = total_collections();
1122         return NULL;
1123       }
1124       // The GCLocker is either active or the GCLocker initiated
1125       // GC has not yet been performed. Stall until it is and
1126       // then retry the allocation.
1127       GC_locker::stall_until_clear();
1128       (*gclocker_retry_count_ret) += 1;
1129     }
1130 
1131     // We can reach here if we were unsuccessul in scheduling a
1132     // collection (because another thread beat us to it) or if we were
1133     // stalled due to the GC locker. In either can we should retry the
1134     // allocation attempt in case another thread successfully
1135     // performed a collection and reclaimed enough space.  Give a
1136     // warning if we seem to be looping forever.
1137 
1138     if ((QueuedAllocationWarningCount > 0) &&
1139         (try_count % QueuedAllocationWarningCount == 0)) {
1140       warning("G1CollectedHeap::attempt_allocation_humongous() "
1141               "retries %d times", try_count);
1142     }
1143   }
1144 
1145   ShouldNotReachHere();
1146   return NULL;
1147 }
1148 
1149 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1150                                        bool expect_null_mutator_alloc_region) {
1151   assert_at_safepoint(true /* should_be_vm_thread */);
1152   assert(_mutator_alloc_region.get() == NULL ||
1153                                              !expect_null_mutator_alloc_region,
1154          "the current alloc region was unexpectedly found to be non-NULL");
1155 
1156   if (!isHumongous(word_size)) {
1157     return _mutator_alloc_region.attempt_allocation_locked(word_size,
1158                                                       false /* bot_updates */);
1159   } else {
1160     HeapWord* result = humongous_obj_allocate(word_size);
1161     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1162       g1_policy()->set_initiate_conc_mark_if_possible();
1163     }
1164     return result;
1165   }
1166 
1167   ShouldNotReachHere();
1168 }
1169 
1170 class PostMCRemSetClearClosure: public HeapRegionClosure {
1171   G1CollectedHeap* _g1h;
1172   ModRefBarrierSet* _mr_bs;
1173 public:
1174   PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1175     _g1h(g1h), _mr_bs(mr_bs) { }
1176   bool doHeapRegion(HeapRegion* r) {
1177     if (r->continuesHumongous()) {
1178       return false;
1179     }
1180     _g1h->reset_gc_time_stamps(r);
1181     HeapRegionRemSet* hrrs = r->rem_set();
1182     if (hrrs != NULL) hrrs->clear();
1183     // You might think here that we could clear just the cards
1184     // corresponding to the used region.  But no: if we leave a dirty card
1185     // in a region we might allocate into, then it would prevent that card
1186     // from being enqueued, and cause it to be missed.
1187     // Re: the performance cost: we shouldn't be doing full GC anyway!
1188     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1189     return false;
1190   }
1191 };
1192 
1193 void G1CollectedHeap::clear_rsets_post_compaction() {
1194   PostMCRemSetClearClosure rs_clear(this, mr_bs());
1195   heap_region_iterate(&rs_clear);
1196 }
1197 
1198 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1199   G1CollectedHeap*   _g1h;
1200   UpdateRSOopClosure _cl;
1201   int                _worker_i;
1202 public:
1203   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
1204     _cl(g1->g1_rem_set(), worker_i),
1205     _worker_i(worker_i),
1206     _g1h(g1)
1207   { }
1208 
1209   bool doHeapRegion(HeapRegion* r) {
1210     if (!r->continuesHumongous()) {
1211       _cl.set_from(r);
1212       r->oop_iterate(&_cl);
1213     }
1214     return false;
1215   }
1216 };
1217 
1218 class ParRebuildRSTask: public AbstractGangTask {
1219   G1CollectedHeap* _g1;
1220 public:
1221   ParRebuildRSTask(G1CollectedHeap* g1)
1222     : AbstractGangTask("ParRebuildRSTask"),
1223       _g1(g1)
1224   { }
1225 
1226   void work(uint worker_id) {
1227     RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
1228     _g1->heap_region_par_iterate_chunked(&rebuild_rs, worker_id,
1229                                           _g1->workers()->active_workers(),
1230                                          HeapRegion::RebuildRSClaimValue);
1231   }
1232 };
1233 
1234 class PostCompactionPrinterClosure: public HeapRegionClosure {
1235 private:
1236   G1HRPrinter* _hr_printer;
1237 public:
1238   bool doHeapRegion(HeapRegion* hr) {
1239     assert(!hr->is_young(), "not expecting to find young regions");
1240     // We only generate output for non-empty regions.
1241     if (!hr->is_empty()) {
1242       if (!hr->isHumongous()) {
1243         _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1244       } else if (hr->startsHumongous()) {
1245         if (hr->region_num() == 1) {
1246           // single humongous region
1247           _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
1248         } else {
1249           _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1250         }
1251       } else {
1252         assert(hr->continuesHumongous(), "only way to get here");
1253         _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1254       }
1255     }
1256     return false;
1257   }
1258 
1259   PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1260     : _hr_printer(hr_printer) { }
1261 };
1262 
1263 void G1CollectedHeap::print_hrs_post_compaction() {
1264   PostCompactionPrinterClosure cl(hr_printer());
1265   heap_region_iterate(&cl);
1266 }
1267 
1268 double G1CollectedHeap::verify(bool guard, const char* msg) {
1269   double verify_time_ms = 0.0;
1270 
1271   if (guard && total_collections() >= VerifyGCStartAt) {
1272     double verify_start = os::elapsedTime();
1273     HandleMark hm;  // Discard invalid handles created during verification
1274     prepare_for_verify();
1275     Universe::verify(VerifyOption_G1UsePrevMarking, msg);
1276     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
1277   }
1278 
1279   return verify_time_ms;
1280 }
1281 
1282 void G1CollectedHeap::verify_before_gc() {
1283   double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
1284   g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
1285 }
1286 
1287 void G1CollectedHeap::verify_after_gc() {
1288   double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
1289   g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
1290 }
1291 
1292 bool G1CollectedHeap::do_collection(bool explicit_gc,
1293                                     bool clear_all_soft_refs,
1294                                     size_t word_size) {
1295   assert_at_safepoint(true /* should_be_vm_thread */);
1296 
1297   if (GC_locker::check_active_before_gc()) {
1298     return false;
1299   }
1300 
1301   SvcGCMarker sgcm(SvcGCMarker::FULL);
1302   ResourceMark rm;
1303 
1304   print_heap_before_gc();
1305 
1306   size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
1307 
1308   HRSPhaseSetter x(HRSPhaseFullGC);
1309   verify_region_sets_optional();
1310 
1311   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1312                            collector_policy()->should_clear_all_soft_refs();
1313 
1314   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1315 
1316   {
1317     IsGCActiveMark x;
1318 
1319     // Timing
1320     assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
1321     gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
1322     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1323 
1324     {
1325       TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty);
1326       TraceCollectorStats tcs(g1mm()->full_collection_counters());
1327       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1328 
1329       double start = os::elapsedTime();
1330       g1_policy()->record_full_collection_start();
1331 
1332       // Note: When we have a more flexible GC logging framework that
1333       // allows us to add optional attributes to a GC log record we
1334       // could consider timing and reporting how long we wait in the
1335       // following two methods.
1336       wait_while_free_regions_coming();
1337       // If we start the compaction before the CM threads finish
1338       // scanning the root regions we might trip them over as we'll
1339       // be moving objects / updating references. So let's wait until
1340       // they are done. By telling them to abort, they should complete
1341       // early.
1342       _cm->root_regions()->abort();
1343       _cm->root_regions()->wait_until_scan_finished();
1344       append_secondary_free_list_if_not_empty_with_lock();
1345 
1346       gc_prologue(true);
1347       increment_total_collections(true /* full gc */);
1348       increment_old_marking_cycles_started();
1349 
1350       assert(used() == recalculate_used(), "Should be equal");
1351 
1352       verify_before_gc();
1353 
1354       pre_full_gc_dump();
1355 
1356       COMPILER2_PRESENT(DerivedPointerTable::clear());
1357 
1358       // Disable discovery and empty the discovered lists
1359       // for the CM ref processor.
1360       ref_processor_cm()->disable_discovery();
1361       ref_processor_cm()->abandon_partial_discovery();
1362       ref_processor_cm()->verify_no_references_recorded();
1363 
1364       // Abandon current iterations of concurrent marking and concurrent
1365       // refinement, if any are in progress. We have to do this before
1366       // wait_until_scan_finished() below.
1367       concurrent_mark()->abort();
1368 
1369       // Make sure we'll choose a new allocation region afterwards.
1370       release_mutator_alloc_region();
1371       abandon_gc_alloc_regions();
1372       g1_rem_set()->cleanupHRRS();
1373 
1374       // We should call this after we retire any currently active alloc
1375       // regions so that all the ALLOC / RETIRE events are generated
1376       // before the start GC event.
1377       _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1378 
1379       // We may have added regions to the current incremental collection
1380       // set between the last GC or pause and now. We need to clear the
1381       // incremental collection set and then start rebuilding it afresh
1382       // after this full GC.
1383       abandon_collection_set(g1_policy()->inc_cset_head());
1384       g1_policy()->clear_incremental_cset();
1385       g1_policy()->stop_incremental_cset_building();
1386 
1387       tear_down_region_sets(false /* free_list_only */);
1388       g1_policy()->set_gcs_are_young(true);
1389 
1390       // See the comments in g1CollectedHeap.hpp and
1391       // G1CollectedHeap::ref_processing_init() about
1392       // how reference processing currently works in G1.
1393 
1394       // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1395       ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1396 
1397       // Temporarily clear the STW ref processor's _is_alive_non_header field.
1398       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1399 
1400       ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
1401       ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1402 
1403       // Do collection work
1404       {
1405         HandleMark hm;  // Discard invalid handles created during gc
1406         G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1407       }
1408 
1409       assert(free_regions() == 0, "we should not have added any free regions");
1410       rebuild_region_sets(false /* free_list_only */);
1411 
1412       // Enqueue any discovered reference objects that have
1413       // not been removed from the discovered lists.
1414       ref_processor_stw()->enqueue_discovered_references();
1415 
1416       COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1417 
1418       MemoryService::track_memory_usage();
1419 
1420       verify_after_gc();
1421 
1422       assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1423       ref_processor_stw()->verify_no_references_recorded();
1424 
1425       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1426       ClassLoaderDataGraph::purge();
1427     MetaspaceAux::verify_metrics();
1428 
1429       // Note: since we've just done a full GC, concurrent
1430       // marking is no longer active. Therefore we need not
1431       // re-enable reference discovery for the CM ref processor.
1432       // That will be done at the start of the next marking cycle.
1433       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1434       ref_processor_cm()->verify_no_references_recorded();
1435 
1436       reset_gc_time_stamp();
1437       // Since everything potentially moved, we will clear all remembered
1438       // sets, and clear all cards.  Later we will rebuild remebered
1439       // sets. We will also reset the GC time stamps of the regions.
1440       clear_rsets_post_compaction();
1441       check_gc_time_stamps();
1442 
1443       // Resize the heap if necessary.
1444       resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1445 
1446       if (_hr_printer.is_active()) {
1447         // We should do this after we potentially resize the heap so
1448         // that all the COMMIT / UNCOMMIT events are generated before
1449         // the end GC event.
1450 
1451         print_hrs_post_compaction();
1452         _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1453       }
1454 
1455       G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1456       if (hot_card_cache->use_cache()) {
1457         hot_card_cache->reset_card_counts();
1458         hot_card_cache->reset_hot_cache();
1459       }
1460 
1461       // Rebuild remembered sets of all regions.
1462       if (G1CollectedHeap::use_parallel_gc_threads()) {
1463         uint n_workers =
1464           AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1465                                                   workers()->active_workers(),
1466                                                   Threads::number_of_non_daemon_threads());
1467         assert(UseDynamicNumberOfGCThreads ||
1468                n_workers == workers()->total_workers(),
1469                "If not dynamic should be using all the  workers");
1470         workers()->set_active_workers(n_workers);
1471         // Set parallel threads in the heap (_n_par_threads) only
1472         // before a parallel phase and always reset it to 0 after
1473         // the phase so that the number of parallel threads does
1474         // no get carried forward to a serial phase where there
1475         // may be code that is "possibly_parallel".
1476         set_par_threads(n_workers);
1477 
1478         ParRebuildRSTask rebuild_rs_task(this);
1479         assert(check_heap_region_claim_values(
1480                HeapRegion::InitialClaimValue), "sanity check");
1481         assert(UseDynamicNumberOfGCThreads ||
1482                workers()->active_workers() == workers()->total_workers(),
1483                "Unless dynamic should use total workers");
1484         // Use the most recent number of  active workers
1485         assert(workers()->active_workers() > 0,
1486                "Active workers not properly set");
1487         set_par_threads(workers()->active_workers());
1488         workers()->run_task(&rebuild_rs_task);
1489         set_par_threads(0);
1490         assert(check_heap_region_claim_values(
1491                HeapRegion::RebuildRSClaimValue), "sanity check");
1492         reset_heap_region_claim_values();
1493       } else {
1494         RebuildRSOutOfRegionClosure rebuild_rs(this);
1495         heap_region_iterate(&rebuild_rs);
1496       }
1497 
1498       if (true) { // FIXME
1499         MetaspaceGC::compute_new_size();
1500       }
1501 
1502 #ifdef TRACESPINNING
1503       ParallelTaskTerminator::print_termination_counts();
1504 #endif
1505 
1506       // Discard all rset updates
1507       JavaThread::dirty_card_queue_set().abandon_logs();
1508       assert(!G1DeferredRSUpdate
1509              || (G1DeferredRSUpdate &&
1510                 (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
1511 
1512       _young_list->reset_sampled_info();
1513       // At this point there should be no regions in the
1514       // entire heap tagged as young.
1515       assert(check_young_list_empty(true /* check_heap */),
1516              "young list should be empty at this point");
1517 
1518       // Update the number of full collections that have been completed.
1519       increment_old_marking_cycles_completed(false /* concurrent */);
1520 
1521       _hrs.verify_optional();
1522       verify_region_sets_optional();
1523 
1524       // Start a new incremental collection set for the next pause
1525       assert(g1_policy()->collection_set() == NULL, "must be");
1526       g1_policy()->start_incremental_cset_building();
1527 
1528       // Clear the _cset_fast_test bitmap in anticipation of adding
1529       // regions to the incremental collection set for the next
1530       // evacuation pause.
1531       clear_cset_fast_test();
1532 
1533       init_mutator_alloc_region();
1534 
1535       double end = os::elapsedTime();
1536       g1_policy()->record_full_collection_end();
1537 
1538       if (G1Log::fine()) {
1539         g1_policy()->print_heap_transition();
1540       }
1541 
1542       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1543       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1544       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1545       // before any GC notifications are raised.
1546       g1mm()->update_sizes();
1547 
1548       gc_epilogue(true);
1549     }
1550 
1551     if (G1Log::finer()) {
1552       g1_policy()->print_detailed_heap_transition(true /* full */);
1553     }
1554 
1555     print_heap_after_gc();
1556 
1557     post_full_gc_dump();
1558   }
1559 
1560   return true;
1561 }
1562 
1563 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1564   // do_collection() will return whether it succeeded in performing
1565   // the GC. Currently, there is no facility on the
1566   // do_full_collection() API to notify the caller than the collection
1567   // did not succeed (e.g., because it was locked out by the GC
1568   // locker). So, right now, we'll ignore the return value.
1569   bool dummy = do_collection(true,                /* explicit_gc */
1570                              clear_all_soft_refs,
1571                              0                    /* word_size */);
1572 }
1573 
1574 // This code is mostly copied from TenuredGeneration.
1575 void
1576 G1CollectedHeap::
1577 resize_if_necessary_after_full_collection(size_t word_size) {
1578   assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
1579 
1580   // Include the current allocation, if any, and bytes that will be
1581   // pre-allocated to support collections, as "used".
1582   const size_t used_after_gc = used();
1583   const size_t capacity_after_gc = capacity();
1584   const size_t free_after_gc = capacity_after_gc - used_after_gc;
1585 
1586   // This is enforced in arguments.cpp.
1587   assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1588          "otherwise the code below doesn't make sense");
1589 
1590   // We don't have floating point command-line arguments
1591   const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1592   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1593   const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1594   const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1595 
1596   const size_t min_heap_size = collector_policy()->min_heap_byte_size();
1597   const size_t max_heap_size = collector_policy()->max_heap_byte_size();
1598 
1599   // We have to be careful here as these two calculations can overflow
1600   // 32-bit size_t's.
1601   double used_after_gc_d = (double) used_after_gc;
1602   double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
1603   double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
1604 
1605   // Let's make sure that they are both under the max heap size, which
1606   // by default will make them fit into a size_t.
1607   double desired_capacity_upper_bound = (double) max_heap_size;
1608   minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
1609                                     desired_capacity_upper_bound);
1610   maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
1611                                     desired_capacity_upper_bound);
1612 
1613   // We can now safely turn them into size_t's.
1614   size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
1615   size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
1616 
1617   // This assert only makes sense here, before we adjust them
1618   // with respect to the min and max heap size.
1619   assert(minimum_desired_capacity <= maximum_desired_capacity,
1620          err_msg("minimum_desired_capacity = "SIZE_FORMAT", "
1621                  "maximum_desired_capacity = "SIZE_FORMAT,
1622                  minimum_desired_capacity, maximum_desired_capacity));
1623 
1624   // Should not be greater than the heap max size. No need to adjust
1625   // it with respect to the heap min size as it's a lower bound (i.e.,
1626   // we'll try to make the capacity larger than it, not smaller).
1627   minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1628   // Should not be less than the heap min size. No need to adjust it
1629   // with respect to the heap max size as it's an upper bound (i.e.,
1630   // we'll try to make the capacity smaller than it, not greater).
1631   maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
1632 
1633   if (capacity_after_gc < minimum_desired_capacity) {
1634     // Don't expand unless it's significant
1635     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1636     ergo_verbose4(ErgoHeapSizing,
1637                   "attempt heap expansion",
1638                   ergo_format_reason("capacity lower than "
1639                                      "min desired capacity after Full GC")
1640                   ergo_format_byte("capacity")
1641                   ergo_format_byte("occupancy")
1642                   ergo_format_byte_perc("min desired capacity"),
1643                   capacity_after_gc, used_after_gc,
1644                   minimum_desired_capacity, (double) MinHeapFreeRatio);
1645     expand(expand_bytes);
1646 
1647     // No expansion, now see if we want to shrink
1648   } else if (capacity_after_gc > maximum_desired_capacity) {
1649     // Capacity too large, compute shrinking size
1650     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1651     ergo_verbose4(ErgoHeapSizing,
1652                   "attempt heap shrinking",
1653                   ergo_format_reason("capacity higher than "
1654                                      "max desired capacity after Full GC")
1655                   ergo_format_byte("capacity")
1656                   ergo_format_byte("occupancy")
1657                   ergo_format_byte_perc("max desired capacity"),
1658                   capacity_after_gc, used_after_gc,
1659                   maximum_desired_capacity, (double) MaxHeapFreeRatio);
1660     shrink(shrink_bytes);
1661   }
1662 }
1663 
1664 
1665 HeapWord*
1666 G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
1667                                            bool* succeeded) {
1668   assert_at_safepoint(true /* should_be_vm_thread */);
1669 
1670   *succeeded = true;
1671   // Let's attempt the allocation first.
1672   HeapWord* result =
1673     attempt_allocation_at_safepoint(word_size,
1674                                  false /* expect_null_mutator_alloc_region */);
1675   if (result != NULL) {
1676     assert(*succeeded, "sanity");
1677     return result;
1678   }
1679 
1680   // In a G1 heap, we're supposed to keep allocation from failing by
1681   // incremental pauses.  Therefore, at least for now, we'll favor
1682   // expansion over collection.  (This might change in the future if we can
1683   // do something smarter than full collection to satisfy a failed alloc.)
1684   result = expand_and_allocate(word_size);
1685   if (result != NULL) {
1686     assert(*succeeded, "sanity");
1687     return result;
1688   }
1689 
1690   // Expansion didn't work, we'll try to do a Full GC.
1691   bool gc_succeeded = do_collection(false, /* explicit_gc */
1692                                     false, /* clear_all_soft_refs */
1693                                     word_size);
1694   if (!gc_succeeded) {
1695     *succeeded = false;
1696     return NULL;
1697   }
1698 
1699   // Retry the allocation
1700   result = attempt_allocation_at_safepoint(word_size,
1701                                   true /* expect_null_mutator_alloc_region */);
1702   if (result != NULL) {
1703     assert(*succeeded, "sanity");
1704     return result;
1705   }
1706 
1707   // Then, try a Full GC that will collect all soft references.
1708   gc_succeeded = do_collection(false, /* explicit_gc */
1709                                true,  /* clear_all_soft_refs */
1710                                word_size);
1711   if (!gc_succeeded) {
1712     *succeeded = false;
1713     return NULL;
1714   }
1715 
1716   // Retry the allocation once more
1717   result = attempt_allocation_at_safepoint(word_size,
1718                                   true /* expect_null_mutator_alloc_region */);
1719   if (result != NULL) {
1720     assert(*succeeded, "sanity");
1721     return result;
1722   }
1723 
1724   assert(!collector_policy()->should_clear_all_soft_refs(),
1725          "Flag should have been handled and cleared prior to this point");
1726 
1727   // What else?  We might try synchronous finalization later.  If the total
1728   // space available is large enough for the allocation, then a more
1729   // complete compaction phase than we've tried so far might be
1730   // appropriate.
1731   assert(*succeeded, "sanity");
1732   return NULL;
1733 }
1734 
1735 // Attempting to expand the heap sufficiently
1736 // to support an allocation of the given "word_size".  If
1737 // successful, perform the allocation and return the address of the
1738 // allocated block, or else "NULL".
1739 
1740 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1741   assert_at_safepoint(true /* should_be_vm_thread */);
1742 
1743   verify_region_sets_optional();
1744 
1745   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1746   ergo_verbose1(ErgoHeapSizing,
1747                 "attempt heap expansion",
1748                 ergo_format_reason("allocation request failed")
1749                 ergo_format_byte("allocation request"),
1750                 word_size * HeapWordSize);
1751   if (expand(expand_bytes)) {
1752     _hrs.verify_optional();
1753     verify_region_sets_optional();
1754     return attempt_allocation_at_safepoint(word_size,
1755                                  false /* expect_null_mutator_alloc_region */);
1756   }
1757   return NULL;
1758 }
1759 
1760 void G1CollectedHeap::update_committed_space(HeapWord* old_end,
1761                                              HeapWord* new_end) {
1762   assert(old_end != new_end, "don't call this otherwise");
1763   assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
1764 
1765   // Update the committed mem region.
1766   _g1_committed.set_end(new_end);
1767   // Tell the card table about the update.
1768   Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
1769   // Tell the BOT about the update.
1770   _bot_shared->resize(_g1_committed.word_size());
1771   // Tell the hot card cache about the update
1772   _cg1r->hot_card_cache()->resize_card_counts(capacity());
1773 }
1774 
1775 bool G1CollectedHeap::expand(size_t expand_bytes) {
1776   size_t old_mem_size = _g1_storage.committed_size();
1777   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1778   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1779                                        HeapRegion::GrainBytes);
1780   ergo_verbose2(ErgoHeapSizing,
1781                 "expand the heap",
1782                 ergo_format_byte("requested expansion amount")
1783                 ergo_format_byte("attempted expansion amount"),
1784                 expand_bytes, aligned_expand_bytes);
1785 
1786   // First commit the memory.
1787   HeapWord* old_end = (HeapWord*) _g1_storage.high();
1788   bool successful = _g1_storage.expand_by(aligned_expand_bytes);
1789   if (successful) {
1790     // Then propagate this update to the necessary data structures.
1791     HeapWord* new_end = (HeapWord*) _g1_storage.high();
1792     update_committed_space(old_end, new_end);
1793 
1794     FreeRegionList expansion_list("Local Expansion List");
1795     MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list);
1796     assert(mr.start() == old_end, "post-condition");
1797     // mr might be a smaller region than what was requested if
1798     // expand_by() was unable to allocate the HeapRegion instances
1799     assert(mr.end() <= new_end, "post-condition");
1800 
1801     size_t actual_expand_bytes = mr.byte_size();
1802     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1803     assert(actual_expand_bytes == expansion_list.total_capacity_bytes(),
1804            "post-condition");
1805     if (actual_expand_bytes < aligned_expand_bytes) {
1806       // We could not expand _hrs to the desired size. In this case we
1807       // need to shrink the committed space accordingly.
1808       assert(mr.end() < new_end, "invariant");
1809 
1810       size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes;
1811       // First uncommit the memory.
1812       _g1_storage.shrink_by(diff_bytes);
1813       // Then propagate this update to the necessary data structures.
1814       update_committed_space(new_end, mr.end());
1815     }
1816     _free_list.add_as_tail(&expansion_list);
1817 
1818     if (_hr_printer.is_active()) {
1819       HeapWord* curr = mr.start();
1820       while (curr < mr.end()) {
1821         HeapWord* curr_end = curr + HeapRegion::GrainWords;
1822         _hr_printer.commit(curr, curr_end);
1823         curr = curr_end;
1824       }
1825       assert(curr == mr.end(), "post-condition");
1826     }
1827     g1_policy()->record_new_heap_size(n_regions());
1828   } else {
1829     ergo_verbose0(ErgoHeapSizing,
1830                   "did not expand the heap",
1831                   ergo_format_reason("heap expansion operation failed"));
1832     // The expansion of the virtual storage space was unsuccessful.
1833     // Let's see if it was because we ran out of swap.
1834     if (G1ExitOnExpansionFailure &&
1835         _g1_storage.uncommitted_size() >= aligned_expand_bytes) {
1836       // We had head room...
1837       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1838     }
1839   }
1840   return successful;
1841 }
1842 
1843 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1844   size_t old_mem_size = _g1_storage.committed_size();
1845   size_t aligned_shrink_bytes =
1846     ReservedSpace::page_align_size_down(shrink_bytes);
1847   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1848                                          HeapRegion::GrainBytes);
1849   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1850 
1851   uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove);
1852   HeapWord* old_end = (HeapWord*) _g1_storage.high();
1853   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1854 
1855   ergo_verbose3(ErgoHeapSizing,
1856                 "shrink the heap",
1857                 ergo_format_byte("requested shrinking amount")
1858                 ergo_format_byte("aligned shrinking amount")
1859                 ergo_format_byte("attempted shrinking amount"),
1860                 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1861   if (num_regions_removed > 0) {
1862     _g1_storage.shrink_by(shrunk_bytes);
1863     HeapWord* new_end = (HeapWord*) _g1_storage.high();
1864 
1865     if (_hr_printer.is_active()) {
1866       HeapWord* curr = old_end;
1867       while (curr > new_end) {
1868         HeapWord* curr_end = curr;
1869         curr -= HeapRegion::GrainWords;
1870         _hr_printer.uncommit(curr, curr_end);
1871       }
1872     }
1873 
1874     _expansion_regions += num_regions_removed;
1875     update_committed_space(old_end, new_end);
1876     HeapRegionRemSet::shrink_heap(n_regions());
1877     g1_policy()->record_new_heap_size(n_regions());
1878   } else {
1879     ergo_verbose0(ErgoHeapSizing,
1880                   "did not shrink the heap",
1881                   ergo_format_reason("heap shrinking operation failed"));
1882   }
1883 }
1884 
1885 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1886   verify_region_sets_optional();
1887 
1888   // We should only reach here at the end of a Full GC which means we
1889   // should not not be holding to any GC alloc regions. The method
1890   // below will make sure of that and do any remaining clean up.
1891   abandon_gc_alloc_regions();
1892 
1893   // Instead of tearing down / rebuilding the free lists here, we
1894   // could instead use the remove_all_pending() method on free_list to
1895   // remove only the ones that we need to remove.
1896   tear_down_region_sets(true /* free_list_only */);
1897   shrink_helper(shrink_bytes);
1898   rebuild_region_sets(true /* free_list_only */);
1899 
1900   _hrs.verify_optional();
1901   verify_region_sets_optional();
1902 }
1903 
1904 // Public methods.
1905 
1906 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1907 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1908 #endif // _MSC_VER
1909 
1910 
1911 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1912   SharedHeap(policy_),
1913   _g1_policy(policy_),
1914   _dirty_card_queue_set(false),
1915   _into_cset_dirty_card_queue_set(false),
1916   _is_alive_closure_cm(this),
1917   _is_alive_closure_stw(this),
1918   _ref_processor_cm(NULL),
1919   _ref_processor_stw(NULL),
1920   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1921   _bot_shared(NULL),
1922   _evac_failure_scan_stack(NULL) ,
1923   _mark_in_progress(false),
1924   _cg1r(NULL), _summary_bytes_used(0),
1925   _g1mm(NULL),
1926   _refine_cte_cl(NULL),
1927   _full_collection(false),
1928   _free_list("Master Free List"),
1929   _secondary_free_list("Secondary Free List"),
1930   _old_set("Old Set"),
1931   _humongous_set("Master Humongous Set"),
1932   _free_regions_coming(false),
1933   _young_list(new YoungList(this)),
1934   _gc_time_stamp(0),
1935   _retained_old_gc_alloc_region(NULL),
1936   _survivor_plab_stats(YoungPLABSize, PLABWeight),
1937   _old_plab_stats(OldPLABSize, PLABWeight),
1938   _expand_heap_after_alloc_failure(true),
1939   _surviving_young_words(NULL),
1940   _old_marking_cycles_started(0),
1941   _old_marking_cycles_completed(0),
1942   _in_cset_fast_test(NULL),
1943   _in_cset_fast_test_base(NULL),
1944   _dirty_cards_region_list(NULL),
1945   _worker_cset_start_region(NULL),
1946   _worker_cset_start_region_time_stamp(NULL) {
1947   _g1h = this; // To catch bugs.
1948   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1949     vm_exit_during_initialization("Failed necessary allocation.");
1950   }
1951 
1952   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1953 
1954   int n_queues = MAX2((int)ParallelGCThreads, 1);
1955   _task_queues = new RefToScanQueueSet(n_queues);
1956 
1957   int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1958   assert(n_rem_sets > 0, "Invariant.");
1959 
1960   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1961   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
1962 
1963   for (int i = 0; i < n_queues; i++) {
1964     RefToScanQueue* q = new RefToScanQueue();
1965     q->initialize();
1966     _task_queues->register_queue(i, q);
1967   }
1968 
1969   clear_cset_start_regions();
1970 
1971   // Initialize the G1EvacuationFailureALot counters and flags.
1972   NOT_PRODUCT(reset_evacuation_should_fail();)
1973 
1974   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1975 }
1976 
1977 jint G1CollectedHeap::initialize() {
1978   CollectedHeap::pre_initialize();
1979   os::enable_vtime();
1980 
1981   G1Log::init();
1982 
1983   // Necessary to satisfy locking discipline assertions.
1984 
1985   MutexLocker x(Heap_lock);
1986 
1987   // We have to initialize the printer before committing the heap, as
1988   // it will be used then.
1989   _hr_printer.set_active(G1PrintHeapRegions);
1990 
1991   // While there are no constraints in the GC code that HeapWordSize
1992   // be any particular value, there are multiple other areas in the
1993   // system which believe this to be true (e.g. oop->object_size in some
1994   // cases incorrectly returns the size in wordSize units rather than
1995   // HeapWordSize).
1996   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1997 
1998   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1999   size_t max_byte_size = collector_policy()->max_heap_byte_size();
2000 
2001   // Ensure that the sizes are properly aligned.
2002   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
2003   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
2004 
2005   _cg1r = new ConcurrentG1Refine(this);
2006 
2007   // Reserve the maximum.
2008 
2009   // When compressed oops are enabled, the preferred heap base
2010   // is calculated by subtracting the requested size from the
2011   // 32Gb boundary and using the result as the base address for
2012   // heap reservation. If the requested size is not aligned to
2013   // HeapRegion::GrainBytes (i.e. the alignment that is passed
2014   // into the ReservedHeapSpace constructor) then the actual
2015   // base of the reserved heap may end up differing from the
2016   // address that was requested (i.e. the preferred heap base).
2017   // If this happens then we could end up using a non-optimal
2018   // compressed oops mode.
2019 
2020   // Since max_byte_size is aligned to the size of a heap region (checked
2021   // above).
2022   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
2023 
2024   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
2025                                                  HeapRegion::GrainBytes);
2026 
2027   // It is important to do this in a way such that concurrent readers can't
2028   // temporarily think somethings in the heap.  (I've actually seen this
2029   // happen in asserts: DLD.)
2030   _reserved.set_word_size(0);
2031   _reserved.set_start((HeapWord*)heap_rs.base());
2032   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
2033 
2034   _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
2035 
2036   // Create the gen rem set (and barrier set) for the entire reserved region.
2037   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
2038   set_barrier_set(rem_set()->bs());
2039   if (barrier_set()->is_a(BarrierSet::ModRef)) {
2040     _mr_bs = (ModRefBarrierSet*)_barrier_set;
2041   } else {
2042     vm_exit_during_initialization("G1 requires a mod ref bs.");
2043     return JNI_ENOMEM;
2044   }
2045 
2046   // Also create a G1 rem set.
2047   if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
2048     _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
2049   } else {
2050     vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
2051     return JNI_ENOMEM;
2052   }
2053 
2054   // Carve out the G1 part of the heap.
2055 
2056   ReservedSpace g1_rs   = heap_rs.first_part(max_byte_size);
2057   _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
2058                            g1_rs.size()/HeapWordSize);
2059 
2060   _g1_storage.initialize(g1_rs, 0);
2061   _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
2062   _hrs.initialize((HeapWord*) _g1_reserved.start(),
2063                   (HeapWord*) _g1_reserved.end(),
2064                   _expansion_regions);
2065 
2066   // Do later initialization work for concurrent refinement.
2067   _cg1r->init();
2068 
2069   // 6843694 - ensure that the maximum region index can fit
2070   // in the remembered set structures.
2071   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2072   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2073 
2074   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2075   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2076   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2077             "too many cards per region");
2078 
2079   HeapRegionSet::set_unrealistically_long_length(max_regions() + 1);
2080 
2081   _bot_shared = new G1BlockOffsetSharedArray(_reserved,
2082                                              heap_word_size(init_byte_size));
2083 
2084   _g1h = this;
2085 
2086   _in_cset_fast_test_length = max_regions();
2087   _in_cset_fast_test_base =
2088                    NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
2089 
2090   // We're biasing _in_cset_fast_test to avoid subtracting the
2091   // beginning of the heap every time we want to index; basically
2092   // it's the same with what we do with the card table.
2093   _in_cset_fast_test = _in_cset_fast_test_base -
2094                ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
2095 
2096   // Clear the _cset_fast_test bitmap in anticipation of adding
2097   // regions to the incremental collection set for the first
2098   // evacuation pause.
2099   clear_cset_fast_test();
2100 
2101   // Create the ConcurrentMark data structure and thread.
2102   // (Must do this late, so that "max_regions" is defined.)
2103   _cm = new ConcurrentMark(this, heap_rs);
2104   if (_cm == NULL || !_cm->completed_initialization()) {
2105     vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2106     return JNI_ENOMEM;
2107   }
2108   _cmThread = _cm->cmThread();
2109 
2110   // Initialize the from_card cache structure of HeapRegionRemSet.
2111   HeapRegionRemSet::init_heap(max_regions());
2112 
2113   // Now expand into the initial heap size.
2114   if (!expand(init_byte_size)) {
2115     vm_shutdown_during_initialization("Failed to allocate initial heap.");
2116     return JNI_ENOMEM;
2117   }
2118 
2119   // Perform any initialization actions delegated to the policy.
2120   g1_policy()->init();
2121 
2122   _refine_cte_cl =
2123     new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(),
2124                                     g1_rem_set(),
2125                                     concurrent_g1_refine());
2126   JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl);
2127 
2128   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
2129                                                SATB_Q_FL_lock,
2130                                                G1SATBProcessCompletedThreshold,
2131                                                Shared_SATB_Q_lock);
2132 
2133   JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
2134                                                 DirtyCardQ_FL_lock,
2135                                                 concurrent_g1_refine()->yellow_zone(),
2136                                                 concurrent_g1_refine()->red_zone(),
2137                                                 Shared_DirtyCardQ_lock);
2138 
2139   if (G1DeferredRSUpdate) {
2140     dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
2141                                       DirtyCardQ_FL_lock,
2142                                       -1, // never trigger processing
2143                                       -1, // no limit on length
2144                                       Shared_DirtyCardQ_lock,
2145                                       &JavaThread::dirty_card_queue_set());
2146   }
2147 
2148   // Initialize the card queue set used to hold cards containing
2149   // references into the collection set.
2150   _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon,
2151                                              DirtyCardQ_FL_lock,
2152                                              -1, // never trigger processing
2153                                              -1, // no limit on length
2154                                              Shared_DirtyCardQ_lock,
2155                                              &JavaThread::dirty_card_queue_set());
2156 
2157   // In case we're keeping closure specialization stats, initialize those
2158   // counts and that mechanism.
2159   SpecializationStats::clear();
2160 
2161   // Here we allocate the dummy full region that is required by the
2162   // G1AllocRegion class. If we don't pass an address in the reserved
2163   // space here, lots of asserts fire.
2164 
2165   HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
2166                                              _g1_reserved.start());
2167   // We'll re-use the same region whether the alloc region will
2168   // require BOT updates or not and, if it doesn't, then a non-young
2169   // region will complain that it cannot support allocations without
2170   // BOT updates. So we'll tag the dummy region as young to avoid that.
2171   dummy_region->set_young();
2172   // Make sure it's full.
2173   dummy_region->set_top(dummy_region->end());
2174   G1AllocRegion::setup(this, dummy_region);
2175 
2176   init_mutator_alloc_region();
2177 
2178   // Do create of the monitoring and management support so that
2179   // values in the heap have been properly initialized.
2180   _g1mm = new G1MonitoringSupport(this);
2181 
2182   return JNI_OK;
2183 }
2184 
2185 size_t G1CollectedHeap::max_heap_alignment() {
2186   return HeapRegion::max_heap_alignment();
2187 }
2188 
2189 void G1CollectedHeap::ref_processing_init() {
2190   // Reference processing in G1 currently works as follows:
2191   //
2192   // * There are two reference processor instances. One is
2193   //   used to record and process discovered references
2194   //   during concurrent marking; the other is used to
2195   //   record and process references during STW pauses
2196   //   (both full and incremental).
2197   // * Both ref processors need to 'span' the entire heap as
2198   //   the regions in the collection set may be dotted around.
2199   //
2200   // * For the concurrent marking ref processor:
2201   //   * Reference discovery is enabled at initial marking.
2202   //   * Reference discovery is disabled and the discovered
2203   //     references processed etc during remarking.
2204   //   * Reference discovery is MT (see below).
2205   //   * Reference discovery requires a barrier (see below).
2206   //   * Reference processing may or may not be MT
2207   //     (depending on the value of ParallelRefProcEnabled
2208   //     and ParallelGCThreads).
2209   //   * A full GC disables reference discovery by the CM
2210   //     ref processor and abandons any entries on it's
2211   //     discovered lists.
2212   //
2213   // * For the STW processor:
2214   //   * Non MT discovery is enabled at the start of a full GC.
2215   //   * Processing and enqueueing during a full GC is non-MT.
2216   //   * During a full GC, references are processed after marking.
2217   //
2218   //   * Discovery (may or may not be MT) is enabled at the start
2219   //     of an incremental evacuation pause.
2220   //   * References are processed near the end of a STW evacuation pause.
2221   //   * For both types of GC:
2222   //     * Discovery is atomic - i.e. not concurrent.
2223   //     * Reference discovery will not need a barrier.
2224 
2225   SharedHeap::ref_processing_init();
2226   MemRegion mr = reserved_region();
2227 
2228   // Concurrent Mark ref processor
2229   _ref_processor_cm =
2230     new ReferenceProcessor(mr,    // span
2231                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
2232                                 // mt processing
2233                            (int) ParallelGCThreads,
2234                                 // degree of mt processing
2235                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
2236                                 // mt discovery
2237                            (int) MAX2(ParallelGCThreads, ConcGCThreads),
2238                                 // degree of mt discovery
2239                            false,
2240                                 // Reference discovery is not atomic
2241                            &_is_alive_closure_cm,
2242                                 // is alive closure
2243                                 // (for efficiency/performance)
2244                            true);
2245                                 // Setting next fields of discovered
2246                                 // lists requires a barrier.
2247 
2248   // STW ref processor
2249   _ref_processor_stw =
2250     new ReferenceProcessor(mr,    // span
2251                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
2252                                 // mt processing
2253                            MAX2((int)ParallelGCThreads, 1),
2254                                 // degree of mt processing
2255                            (ParallelGCThreads > 1),
2256                                 // mt discovery
2257                            MAX2((int)ParallelGCThreads, 1),
2258                                 // degree of mt discovery
2259                            true,
2260                                 // Reference discovery is atomic
2261                            &_is_alive_closure_stw,
2262                                 // is alive closure
2263                                 // (for efficiency/performance)
2264                            false);
2265                                 // Setting next fields of discovered
2266                                 // lists requires a barrier.
2267 }
2268 
2269 size_t G1CollectedHeap::capacity() const {
2270   return _g1_committed.byte_size();
2271 }
2272 
2273 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2274   assert(!hr->continuesHumongous(), "pre-condition");
2275   hr->reset_gc_time_stamp();
2276   if (hr->startsHumongous()) {
2277     uint first_index = hr->hrs_index() + 1;
2278     uint last_index = hr->last_hc_index();
2279     for (uint i = first_index; i < last_index; i += 1) {
2280       HeapRegion* chr = region_at(i);
2281       assert(chr->continuesHumongous(), "sanity");
2282       chr->reset_gc_time_stamp();
2283     }
2284   }
2285 }
2286 
2287 #ifndef PRODUCT
2288 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2289 private:
2290   unsigned _gc_time_stamp;
2291   bool _failures;
2292 
2293 public:
2294   CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2295     _gc_time_stamp(gc_time_stamp), _failures(false) { }
2296 
2297   virtual bool doHeapRegion(HeapRegion* hr) {
2298     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2299     if (_gc_time_stamp != region_gc_time_stamp) {
2300       gclog_or_tty->print_cr("Region "HR_FORMAT" has GC time stamp = %d, "
2301                              "expected %d", HR_FORMAT_PARAMS(hr),
2302                              region_gc_time_stamp, _gc_time_stamp);
2303       _failures = true;
2304     }
2305     return false;
2306   }
2307 
2308   bool failures() { return _failures; }
2309 };
2310 
2311 void G1CollectedHeap::check_gc_time_stamps() {
2312   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2313   heap_region_iterate(&cl);
2314   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2315 }
2316 #endif // PRODUCT
2317 
2318 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2319                                                  DirtyCardQueue* into_cset_dcq,
2320                                                  bool concurrent,
2321                                                  int worker_i) {
2322   // Clean cards in the hot card cache
2323   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
2324   hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
2325 
2326   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2327   int n_completed_buffers = 0;
2328   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2329     n_completed_buffers++;
2330   }
2331   g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
2332   dcqs.clear_n_completed_buffers();
2333   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2334 }
2335 
2336 
2337 // Computes the sum of the storage used by the various regions.
2338 
2339 size_t G1CollectedHeap::used() const {
2340   assert(Heap_lock->owner() != NULL,
2341          "Should be owned on this thread's behalf.");
2342   size_t result = _summary_bytes_used;
2343   // Read only once in case it is set to NULL concurrently
2344   HeapRegion* hr = _mutator_alloc_region.get();
2345   if (hr != NULL)
2346     result += hr->used();
2347   return result;
2348 }
2349 
2350 size_t G1CollectedHeap::used_unlocked() const {
2351   size_t result = _summary_bytes_used;
2352   return result;
2353 }
2354 
2355 class SumUsedClosure: public HeapRegionClosure {
2356   size_t _used;
2357 public:
2358   SumUsedClosure() : _used(0) {}
2359   bool doHeapRegion(HeapRegion* r) {
2360     if (!r->continuesHumongous()) {
2361       _used += r->used();
2362     }
2363     return false;
2364   }
2365   size_t result() { return _used; }
2366 };
2367 
2368 size_t G1CollectedHeap::recalculate_used() const {
2369   SumUsedClosure blk;
2370   heap_region_iterate(&blk);
2371   return blk.result();
2372 }
2373 
2374 size_t G1CollectedHeap::unsafe_max_alloc() {
2375   if (free_regions() > 0) return HeapRegion::GrainBytes;
2376   // otherwise, is there space in the current allocation region?
2377 
2378   // We need to store the current allocation region in a local variable
2379   // here. The problem is that this method doesn't take any locks and
2380   // there may be other threads which overwrite the current allocation
2381   // region field. attempt_allocation(), for example, sets it to NULL
2382   // and this can happen *after* the NULL check here but before the call
2383   // to free(), resulting in a SIGSEGV. Note that this doesn't appear
2384   // to be a problem in the optimized build, since the two loads of the
2385   // current allocation region field are optimized away.
2386   HeapRegion* hr = _mutator_alloc_region.get();
2387   if (hr == NULL) {
2388     return 0;
2389   }
2390   return hr->free();
2391 }
2392 
2393 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2394   switch (cause) {
2395     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
2396     case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
2397     case GCCause::_g1_humongous_allocation: return true;
2398     default:                                return false;
2399   }
2400 }
2401 
2402 #ifndef PRODUCT
2403 void G1CollectedHeap::allocate_dummy_regions() {
2404   // Let's fill up most of the region
2405   size_t word_size = HeapRegion::GrainWords - 1024;
2406   // And as a result the region we'll allocate will be humongous.
2407   guarantee(isHumongous(word_size), "sanity");
2408 
2409   for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2410     // Let's use the existing mechanism for the allocation
2411     HeapWord* dummy_obj = humongous_obj_allocate(word_size);
2412     if (dummy_obj != NULL) {
2413       MemRegion mr(dummy_obj, word_size);
2414       CollectedHeap::fill_with_object(mr);
2415     } else {
2416       // If we can't allocate once, we probably cannot allocate
2417       // again. Let's get out of the loop.
2418       break;
2419     }
2420   }
2421 }
2422 #endif // !PRODUCT
2423 
2424 void G1CollectedHeap::increment_old_marking_cycles_started() {
2425   assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
2426     _old_marking_cycles_started == _old_marking_cycles_completed + 1,
2427     err_msg("Wrong marking cycle count (started: %d, completed: %d)",
2428     _old_marking_cycles_started, _old_marking_cycles_completed));
2429 
2430   _old_marking_cycles_started++;
2431 }
2432 
2433 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
2434   MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
2435 
2436   // We assume that if concurrent == true, then the caller is a
2437   // concurrent thread that was joined the Suspendible Thread
2438   // Set. If there's ever a cheap way to check this, we should add an
2439   // assert here.
2440 
2441   // Given that this method is called at the end of a Full GC or of a
2442   // concurrent cycle, and those can be nested (i.e., a Full GC can
2443   // interrupt a concurrent cycle), the number of full collections
2444   // completed should be either one (in the case where there was no
2445   // nesting) or two (when a Full GC interrupted a concurrent cycle)
2446   // behind the number of full collections started.
2447 
2448   // This is the case for the inner caller, i.e. a Full GC.
2449   assert(concurrent ||
2450          (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
2451          (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
2452          err_msg("for inner caller (Full GC): _old_marking_cycles_started = %u "
2453                  "is inconsistent with _old_marking_cycles_completed = %u",
2454                  _old_marking_cycles_started, _old_marking_cycles_completed));
2455 
2456   // This is the case for the outer caller, i.e. the concurrent cycle.
2457   assert(!concurrent ||
2458          (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
2459          err_msg("for outer caller (concurrent cycle): "
2460                  "_old_marking_cycles_started = %u "
2461                  "is inconsistent with _old_marking_cycles_completed = %u",
2462                  _old_marking_cycles_started, _old_marking_cycles_completed));
2463 
2464   _old_marking_cycles_completed += 1;
2465 
2466   // We need to clear the "in_progress" flag in the CM thread before
2467   // we wake up any waiters (especially when ExplicitInvokesConcurrent
2468   // is set) so that if a waiter requests another System.gc() it doesn't
2469   // incorrectly see that a marking cyle is still in progress.
2470   if (concurrent) {
2471     _cmThread->clear_in_progress();
2472   }
2473 
2474   // This notify_all() will ensure that a thread that called
2475   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2476   // and it's waiting for a full GC to finish will be woken up. It is
2477   // waiting in VM_G1IncCollectionPause::doit_epilogue().
2478   FullGCCount_lock->notify_all();
2479 }
2480 
2481 void G1CollectedHeap::collect(GCCause::Cause cause) {
2482   assert_heap_not_locked();
2483 
2484   unsigned int gc_count_before;
2485   unsigned int old_marking_count_before;
2486   bool retry_gc;
2487 
2488   do {
2489     retry_gc = false;
2490 
2491     {
2492       MutexLocker ml(Heap_lock);
2493 
2494       // Read the GC count while holding the Heap_lock
2495       gc_count_before = total_collections();
2496       old_marking_count_before = _old_marking_cycles_started;
2497     }
2498 
2499     if (should_do_concurrent_full_gc(cause)) {
2500       // Schedule an initial-mark evacuation pause that will start a
2501       // concurrent cycle. We're setting word_size to 0 which means that
2502       // we are not requesting a post-GC allocation.
2503       VM_G1IncCollectionPause op(gc_count_before,
2504                                  0,     /* word_size */
2505                                  true,  /* should_initiate_conc_mark */
2506                                  g1_policy()->max_pause_time_ms(),
2507                                  cause);
2508 
2509       VMThread::execute(&op);
2510       if (!op.pause_succeeded()) {
2511         if (old_marking_count_before == _old_marking_cycles_started) {
2512           retry_gc = op.should_retry_gc();
2513         } else {
2514           // A Full GC happened while we were trying to schedule the
2515           // initial-mark GC. No point in starting a new cycle given
2516           // that the whole heap was collected anyway.
2517         }
2518 
2519         if (retry_gc) {
2520           if (GC_locker::is_active_and_needs_gc()) {
2521             GC_locker::stall_until_clear();
2522           }
2523         }
2524       }
2525     } else {
2526       if (cause == GCCause::_gc_locker
2527           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2528 
2529         // Schedule a standard evacuation pause. We're setting word_size
2530         // to 0 which means that we are not requesting a post-GC allocation.
2531         VM_G1IncCollectionPause op(gc_count_before,
2532                                    0,     /* word_size */
2533                                    false, /* should_initiate_conc_mark */
2534                                    g1_policy()->max_pause_time_ms(),
2535                                    cause);
2536         VMThread::execute(&op);
2537       } else {
2538         // Schedule a Full GC.
2539         VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause);
2540         VMThread::execute(&op);
2541       }
2542     }
2543   } while (retry_gc);
2544 }
2545 
2546 bool G1CollectedHeap::is_in(const void* p) const {
2547   if (_g1_committed.contains(p)) {
2548     // Given that we know that p is in the committed space,
2549     // heap_region_containing_raw() should successfully
2550     // return the containing region.
2551     HeapRegion* hr = heap_region_containing_raw(p);
2552     return hr->is_in(p);
2553   } else {
2554     return false;
2555   }
2556 }
2557 
2558 // Iteration functions.
2559 
2560 // Iterates an OopClosure over all ref-containing fields of objects
2561 // within a HeapRegion.
2562 
2563 class IterateOopClosureRegionClosure: public HeapRegionClosure {
2564   MemRegion _mr;
2565   ExtendedOopClosure* _cl;
2566 public:
2567   IterateOopClosureRegionClosure(MemRegion mr, ExtendedOopClosure* cl)
2568     : _mr(mr), _cl(cl) {}
2569   bool doHeapRegion(HeapRegion* r) {
2570     if (!r->continuesHumongous()) {
2571       r->oop_iterate(_cl);
2572     }
2573     return false;
2574   }
2575 };
2576 
2577 void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
2578   IterateOopClosureRegionClosure blk(_g1_committed, cl);
2579   heap_region_iterate(&blk);
2580 }
2581 
2582 void G1CollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
2583   IterateOopClosureRegionClosure blk(mr, cl);
2584   heap_region_iterate(&blk);
2585 }
2586 
2587 // Iterates an ObjectClosure over all objects within a HeapRegion.
2588 
2589 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2590   ObjectClosure* _cl;
2591 public:
2592   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2593   bool doHeapRegion(HeapRegion* r) {
2594     if (! r->continuesHumongous()) {
2595       r->object_iterate(_cl);
2596     }
2597     return false;
2598   }
2599 };
2600 
2601 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2602   IterateObjectClosureRegionClosure blk(cl);
2603   heap_region_iterate(&blk);
2604 }
2605 
2606 void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
2607   // FIXME: is this right?
2608   guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
2609 }
2610 
2611 // Calls a SpaceClosure on a HeapRegion.
2612 
2613 class SpaceClosureRegionClosure: public HeapRegionClosure {
2614   SpaceClosure* _cl;
2615 public:
2616   SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
2617   bool doHeapRegion(HeapRegion* r) {
2618     _cl->do_space(r);
2619     return false;
2620   }
2621 };
2622 
2623 void G1CollectedHeap::space_iterate(SpaceClosure* cl) {
2624   SpaceClosureRegionClosure blk(cl);
2625   heap_region_iterate(&blk);
2626 }
2627 
2628 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2629   _hrs.iterate(cl);
2630 }
2631 
2632 void
2633 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
2634                                                  uint worker_id,
2635                                                  uint no_of_par_workers,
2636                                                  jint claim_value) {
2637   const uint regions = n_regions();
2638   const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
2639                              no_of_par_workers :
2640                              1);
2641   assert(UseDynamicNumberOfGCThreads ||
2642          no_of_par_workers == workers()->total_workers(),
2643          "Non dynamic should use fixed number of workers");
2644   // try to spread out the starting points of the workers
2645   const HeapRegion* start_hr =
2646                         start_region_for_worker(worker_id, no_of_par_workers);
2647   const uint start_index = start_hr->hrs_index();
2648 
2649   // each worker will actually look at all regions
2650   for (uint count = 0; count < regions; ++count) {
2651     const uint index = (start_index + count) % regions;
2652     assert(0 <= index && index < regions, "sanity");
2653     HeapRegion* r = region_at(index);
2654     // we'll ignore "continues humongous" regions (we'll process them
2655     // when we come across their corresponding "start humongous"
2656     // region) and regions already claimed
2657     if (r->claim_value() == claim_value || r->continuesHumongous()) {
2658       continue;
2659     }
2660     // OK, try to claim it
2661     if (r->claimHeapRegion(claim_value)) {
2662       // success!
2663       assert(!r->continuesHumongous(), "sanity");
2664       if (r->startsHumongous()) {
2665         // If the region is "starts humongous" we'll iterate over its
2666         // "continues humongous" first; in fact we'll do them
2667         // first. The order is important. In on case, calling the
2668         // closure on the "starts humongous" region might de-allocate
2669         // and clear all its "continues humongous" regions and, as a
2670         // result, we might end up processing them twice. So, we'll do
2671         // them first (notice: most closures will ignore them anyway) and
2672         // then we'll do the "starts humongous" region.
2673         for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
2674           HeapRegion* chr = region_at(ch_index);
2675 
2676           // if the region has already been claimed or it's not
2677           // "continues humongous" we're done
2678           if (chr->claim_value() == claim_value ||
2679               !chr->continuesHumongous()) {
2680             break;
2681           }
2682 
2683           // Noone should have claimed it directly. We can given
2684           // that we claimed its "starts humongous" region.
2685           assert(chr->claim_value() != claim_value, "sanity");
2686           assert(chr->humongous_start_region() == r, "sanity");
2687 
2688           if (chr->claimHeapRegion(claim_value)) {
2689             // we should always be able to claim it; noone else should
2690             // be trying to claim this region
2691 
2692             bool res2 = cl->doHeapRegion(chr);
2693             assert(!res2, "Should not abort");
2694 
2695             // Right now, this holds (i.e., no closure that actually
2696             // does something with "continues humongous" regions
2697             // clears them). We might have to weaken it in the future,
2698             // but let's leave these two asserts here for extra safety.
2699             assert(chr->continuesHumongous(), "should still be the case");
2700             assert(chr->humongous_start_region() == r, "sanity");
2701           } else {
2702             guarantee(false, "we should not reach here");
2703           }
2704         }
2705       }
2706 
2707       assert(!r->continuesHumongous(), "sanity");
2708       bool res = cl->doHeapRegion(r);
2709       assert(!res, "Should not abort");
2710     }
2711   }
2712 }
2713 
2714 class ResetClaimValuesClosure: public HeapRegionClosure {
2715 public:
2716   bool doHeapRegion(HeapRegion* r) {
2717     r->set_claim_value(HeapRegion::InitialClaimValue);
2718     return false;
2719   }
2720 };
2721 
2722 void G1CollectedHeap::reset_heap_region_claim_values() {
2723   ResetClaimValuesClosure blk;
2724   heap_region_iterate(&blk);
2725 }
2726 
2727 void G1CollectedHeap::reset_cset_heap_region_claim_values() {
2728   ResetClaimValuesClosure blk;
2729   collection_set_iterate(&blk);
2730 }
2731 
2732 #ifdef ASSERT
2733 // This checks whether all regions in the heap have the correct claim
2734 // value. I also piggy-backed on this a check to ensure that the
2735 // humongous_start_region() information on "continues humongous"
2736 // regions is correct.
2737 
2738 class CheckClaimValuesClosure : public HeapRegionClosure {
2739 private:
2740   jint _claim_value;
2741   uint _failures;
2742   HeapRegion* _sh_region;
2743 
2744 public:
2745   CheckClaimValuesClosure(jint claim_value) :
2746     _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
2747   bool doHeapRegion(HeapRegion* r) {
2748     if (r->claim_value() != _claim_value) {
2749       gclog_or_tty->print_cr("Region " HR_FORMAT ", "
2750                              "claim value = %d, should be %d",
2751                              HR_FORMAT_PARAMS(r),
2752                              r->claim_value(), _claim_value);
2753       ++_failures;
2754     }
2755     if (!r->isHumongous()) {
2756       _sh_region = NULL;
2757     } else if (r->startsHumongous()) {
2758       _sh_region = r;
2759     } else if (r->continuesHumongous()) {
2760       if (r->humongous_start_region() != _sh_region) {
2761         gclog_or_tty->print_cr("Region " HR_FORMAT ", "
2762                                "HS = "PTR_FORMAT", should be "PTR_FORMAT,
2763                                HR_FORMAT_PARAMS(r),
2764                                r->humongous_start_region(),
2765                                _sh_region);
2766         ++_failures;
2767       }
2768     }
2769     return false;
2770   }
2771   uint failures() { return _failures; }
2772 };
2773 
2774 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
2775   CheckClaimValuesClosure cl(claim_value);
2776   heap_region_iterate(&cl);
2777   return cl.failures() == 0;
2778 }
2779 
2780 class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
2781 private:
2782   jint _claim_value;
2783   uint _failures;
2784 
2785 public:
2786   CheckClaimValuesInCSetHRClosure(jint claim_value) :
2787     _claim_value(claim_value), _failures(0) { }
2788 
2789   uint failures() { return _failures; }
2790 
2791   bool doHeapRegion(HeapRegion* hr) {
2792     assert(hr->in_collection_set(), "how?");
2793     assert(!hr->isHumongous(), "H-region in CSet");
2794     if (hr->claim_value() != _claim_value) {
2795       gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "
2796                              "claim value = %d, should be %d",
2797                              HR_FORMAT_PARAMS(hr),
2798                              hr->claim_value(), _claim_value);
2799       _failures += 1;
2800     }
2801     return false;
2802   }
2803 };
2804 
2805 bool G1CollectedHeap::check_cset_heap_region_claim_values(jint claim_value) {
2806   CheckClaimValuesInCSetHRClosure cl(claim_value);
2807   collection_set_iterate(&cl);
2808   return cl.failures() == 0;
2809 }
2810 #endif // ASSERT
2811 
2812 // Clear the cached CSet starting regions and (more importantly)
2813 // the time stamps. Called when we reset the GC time stamp.
2814 void G1CollectedHeap::clear_cset_start_regions() {
2815   assert(_worker_cset_start_region != NULL, "sanity");
2816   assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
2817 
2818   int n_queues = MAX2((int)ParallelGCThreads, 1);
2819   for (int i = 0; i < n_queues; i++) {
2820     _worker_cset_start_region[i] = NULL;
2821     _worker_cset_start_region_time_stamp[i] = 0;
2822   }
2823 }
2824 
2825 // Given the id of a worker, obtain or calculate a suitable
2826 // starting region for iterating over the current collection set.
2827 HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
2828   assert(get_gc_time_stamp() > 0, "should have been updated by now");
2829 
2830   HeapRegion* result = NULL;
2831   unsigned gc_time_stamp = get_gc_time_stamp();
2832 
2833   if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
2834     // Cached starting region for current worker was set
2835     // during the current pause - so it's valid.
2836     // Note: the cached starting heap region may be NULL
2837     // (when the collection set is empty).
2838     result = _worker_cset_start_region[worker_i];
2839     assert(result == NULL || result->in_collection_set(), "sanity");
2840     return result;
2841   }
2842 
2843   // The cached entry was not valid so let's calculate
2844   // a suitable starting heap region for this worker.
2845 
2846   // We want the parallel threads to start their collection
2847   // set iteration at different collection set regions to
2848   // avoid contention.
2849   // If we have:
2850   //          n collection set regions
2851   //          p threads
2852   // Then thread t will start at region floor ((t * n) / p)
2853 
2854   result = g1_policy()->collection_set();
2855   if (G1CollectedHeap::use_parallel_gc_threads()) {
2856     uint cs_size = g1_policy()->cset_region_length();
2857     uint active_workers = workers()->active_workers();
2858     assert(UseDynamicNumberOfGCThreads ||
2859              active_workers == workers()->total_workers(),
2860              "Unless dynamic should use total workers");
2861 
2862     uint end_ind   = (cs_size * worker_i) / active_workers;
2863     uint start_ind = 0;
2864 
2865     if (worker_i > 0 &&
2866         _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
2867       // Previous workers starting region is valid
2868       // so let's iterate from there
2869       start_ind = (cs_size * (worker_i - 1)) / active_workers;
2870       result = _worker_cset_start_region[worker_i - 1];
2871     }
2872 
2873     for (uint i = start_ind; i < end_ind; i++) {
2874       result = result->next_in_collection_set();
2875     }
2876   }
2877 
2878   // Note: the calculated starting heap region may be NULL
2879   // (when the collection set is empty).
2880   assert(result == NULL || result->in_collection_set(), "sanity");
2881   assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
2882          "should be updated only once per pause");
2883   _worker_cset_start_region[worker_i] = result;
2884   OrderAccess::storestore();
2885   _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
2886   return result;
2887 }
2888 
2889 HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i,
2890                                                      uint no_of_par_workers) {
2891   uint worker_num =
2892            G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U;
2893   assert(UseDynamicNumberOfGCThreads ||
2894          no_of_par_workers == workers()->total_workers(),
2895          "Non dynamic should use fixed number of workers");
2896   const uint start_index = n_regions() * worker_i / worker_num;
2897   return region_at(start_index);
2898 }
2899 
2900 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2901   HeapRegion* r = g1_policy()->collection_set();
2902   while (r != NULL) {
2903     HeapRegion* next = r->next_in_collection_set();
2904     if (cl->doHeapRegion(r)) {
2905       cl->incomplete();
2906       return;
2907     }
2908     r = next;
2909   }
2910 }
2911 
2912 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
2913                                                   HeapRegionClosure *cl) {
2914   if (r == NULL) {
2915     // The CSet is empty so there's nothing to do.
2916     return;
2917   }
2918 
2919   assert(r->in_collection_set(),
2920          "Start region must be a member of the collection set.");
2921   HeapRegion* cur = r;
2922   while (cur != NULL) {
2923     HeapRegion* next = cur->next_in_collection_set();
2924     if (cl->doHeapRegion(cur) && false) {
2925       cl->incomplete();
2926       return;
2927     }
2928     cur = next;
2929   }
2930   cur = g1_policy()->collection_set();
2931   while (cur != r) {
2932     HeapRegion* next = cur->next_in_collection_set();
2933     if (cl->doHeapRegion(cur) && false) {
2934       cl->incomplete();
2935       return;
2936     }
2937     cur = next;
2938   }
2939 }
2940 
2941 CompactibleSpace* G1CollectedHeap::first_compactible_space() {
2942   return n_regions() > 0 ? region_at(0) : NULL;
2943 }
2944 
2945 
2946 Space* G1CollectedHeap::space_containing(const void* addr) const {
2947   Space* res = heap_region_containing(addr);
2948   return res;
2949 }
2950 
2951 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2952   Space* sp = space_containing(addr);
2953   if (sp != NULL) {
2954     return sp->block_start(addr);
2955   }
2956   return NULL;
2957 }
2958 
2959 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2960   Space* sp = space_containing(addr);
2961   assert(sp != NULL, "block_size of address outside of heap");
2962   return sp->block_size(addr);
2963 }
2964 
2965 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2966   Space* sp = space_containing(addr);
2967   return sp->block_is_obj(addr);
2968 }
2969 
2970 bool G1CollectedHeap::supports_tlab_allocation() const {
2971   return true;
2972 }
2973 
2974 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2975   return HeapRegion::GrainBytes;
2976 }
2977 
2978 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2979   // Return the remaining space in the cur alloc region, but not less than
2980   // the min TLAB size.
2981 
2982   // Also, this value can be at most the humongous object threshold,
2983   // since we can't allow tlabs to grow big enough to accomodate
2984   // humongous objects.
2985 
2986   HeapRegion* hr = _mutator_alloc_region.get();
2987   size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
2988   if (hr == NULL) {
2989     return max_tlab_size;
2990   } else {
2991     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size);
2992   }
2993 }
2994 
2995 size_t G1CollectedHeap::max_capacity() const {
2996   return _g1_reserved.byte_size();
2997 }
2998 
2999 jlong G1CollectedHeap::millis_since_last_gc() {
3000   // assert(false, "NYI");
3001   return 0;
3002 }
3003 
3004 void G1CollectedHeap::prepare_for_verify() {
3005   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
3006     ensure_parsability(false);
3007   }
3008   g1_rem_set()->prepare_for_verify();
3009 }
3010 
3011 bool G1CollectedHeap::allocated_since_marking(oop obj, HeapRegion* hr,
3012                                               VerifyOption vo) {
3013   switch (vo) {
3014   case VerifyOption_G1UsePrevMarking:
3015     return hr->obj_allocated_since_prev_marking(obj);
3016   case VerifyOption_G1UseNextMarking:
3017     return hr->obj_allocated_since_next_marking(obj);
3018   case VerifyOption_G1UseMarkWord:
3019     return false;
3020   default:
3021     ShouldNotReachHere();
3022   }
3023   return false; // keep some compilers happy
3024 }
3025 
3026 HeapWord* G1CollectedHeap::top_at_mark_start(HeapRegion* hr, VerifyOption vo) {
3027   switch (vo) {
3028   case VerifyOption_G1UsePrevMarking: return hr->prev_top_at_mark_start();
3029   case VerifyOption_G1UseNextMarking: return hr->next_top_at_mark_start();
3030   case VerifyOption_G1UseMarkWord:    return NULL;
3031   default:                            ShouldNotReachHere();
3032   }
3033   return NULL; // keep some compilers happy
3034 }
3035 
3036 bool G1CollectedHeap::is_marked(oop obj, VerifyOption vo) {
3037   switch (vo) {
3038   case VerifyOption_G1UsePrevMarking: return isMarkedPrev(obj);
3039   case VerifyOption_G1UseNextMarking: return isMarkedNext(obj);
3040   case VerifyOption_G1UseMarkWord:    return obj->is_gc_marked();
3041   default:                            ShouldNotReachHere();
3042   }
3043   return false; // keep some compilers happy
3044 }
3045 
3046 const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
3047   switch (vo) {
3048   case VerifyOption_G1UsePrevMarking: return "PTAMS";
3049   case VerifyOption_G1UseNextMarking: return "NTAMS";
3050   case VerifyOption_G1UseMarkWord:    return "NONE";
3051   default:                            ShouldNotReachHere();
3052   }
3053   return NULL; // keep some compilers happy
3054 }
3055 
3056 class VerifyLivenessOopClosure: public OopClosure {
3057   G1CollectedHeap* _g1h;
3058   VerifyOption _vo;
3059 public:
3060   VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
3061     _g1h(g1h), _vo(vo)
3062   { }
3063   void do_oop(narrowOop *p) { do_oop_work(p); }
3064   void do_oop(      oop *p) { do_oop_work(p); }
3065 
3066   template <class T> void do_oop_work(T *p) {
3067     oop obj = oopDesc::load_decode_heap_oop(p);
3068     guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
3069               "Dead object referenced by a not dead object");
3070   }
3071 };
3072 
3073 class VerifyObjsInRegionClosure: public ObjectClosure {
3074 private:
3075   G1CollectedHeap* _g1h;
3076   size_t _live_bytes;
3077   HeapRegion *_hr;
3078   VerifyOption _vo;
3079 public:
3080   // _vo == UsePrevMarking -> use "prev" marking information,
3081   // _vo == UseNextMarking -> use "next" marking information,
3082   // _vo == UseMarkWord    -> use mark word from object header.
3083   VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
3084     : _live_bytes(0), _hr(hr), _vo(vo) {
3085     _g1h = G1CollectedHeap::heap();
3086   }
3087   void do_object(oop o) {
3088     VerifyLivenessOopClosure isLive(_g1h, _vo);
3089     assert(o != NULL, "Huh?");
3090     if (!_g1h->is_obj_dead_cond(o, _vo)) {
3091       // If the object is alive according to the mark word,
3092       // then verify that the marking information agrees.
3093       // Note we can't verify the contra-positive of the
3094       // above: if the object is dead (according to the mark
3095       // word), it may not be marked, or may have been marked
3096       // but has since became dead, or may have been allocated
3097       // since the last marking.
3098       if (_vo == VerifyOption_G1UseMarkWord) {
3099         guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
3100       }
3101 
3102       o->oop_iterate_no_header(&isLive);
3103       if (!_hr->obj_allocated_since_prev_marking(o)) {
3104         size_t obj_size = o->size();    // Make sure we don't overflow
3105         _live_bytes += (obj_size * HeapWordSize);
3106       }
3107     }
3108   }
3109   size_t live_bytes() { return _live_bytes; }
3110 };
3111 
3112 class PrintObjsInRegionClosure : public ObjectClosure {
3113   HeapRegion *_hr;
3114   G1CollectedHeap *_g1;
3115 public:
3116   PrintObjsInRegionClosure(HeapRegion *hr) : _hr(hr) {
3117     _g1 = G1CollectedHeap::heap();
3118   };
3119 
3120   void do_object(oop o) {
3121     if (o != NULL) {
3122       HeapWord *start = (HeapWord *) o;
3123       size_t word_sz = o->size();
3124       gclog_or_tty->print("\nPrinting obj "PTR_FORMAT" of size " SIZE_FORMAT
3125                           " isMarkedPrev %d isMarkedNext %d isAllocSince %d\n",
3126                           (void*) o, word_sz,
3127                           _g1->isMarkedPrev(o),
3128                           _g1->isMarkedNext(o),
3129                           _hr->obj_allocated_since_prev_marking(o));
3130       HeapWord *end = start + word_sz;
3131       HeapWord *cur;
3132       int *val;
3133       for (cur = start; cur < end; cur++) {
3134         val = (int *) cur;
3135         gclog_or_tty->print("\t "PTR_FORMAT":"PTR_FORMAT"\n", val, *val);
3136       }
3137     }
3138   }
3139 };
3140 
3141 class VerifyRegionClosure: public HeapRegionClosure {
3142 private:
3143   bool             _par;
3144   VerifyOption     _vo;
3145   bool             _failures;
3146 public:
3147   // _vo == UsePrevMarking -> use "prev" marking information,
3148   // _vo == UseNextMarking -> use "next" marking information,
3149   // _vo == UseMarkWord    -> use mark word from object header.
3150   VerifyRegionClosure(bool par, VerifyOption vo)
3151     : _par(par),
3152       _vo(vo),
3153       _failures(false) {}
3154 
3155   bool failures() {
3156     return _failures;
3157   }
3158 
3159   bool doHeapRegion(HeapRegion* r) {
3160     if (!r->continuesHumongous()) {
3161       bool failures = false;
3162       r->verify(_vo, &failures);
3163       if (failures) {
3164         _failures = true;
3165       } else {
3166         VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
3167         r->object_iterate(&not_dead_yet_cl);
3168         if (_vo != VerifyOption_G1UseNextMarking) {
3169           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
3170             gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
3171                                    "max_live_bytes "SIZE_FORMAT" "
3172                                    "< calculated "SIZE_FORMAT,
3173                                    r->bottom(), r->end(),
3174                                    r->max_live_bytes(),
3175                                  not_dead_yet_cl.live_bytes());
3176             _failures = true;
3177           }
3178         } else {
3179           // When vo == UseNextMarking we cannot currently do a sanity
3180           // check on the live bytes as the calculation has not been
3181           // finalized yet.
3182         }
3183       }
3184     }
3185     return false; // stop the region iteration if we hit a failure
3186   }
3187 };
3188 
3189 class YoungRefCounterClosure : public OopClosure {
3190   G1CollectedHeap* _g1h;
3191   int              _count;
3192  public:
3193   YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
3194   void do_oop(oop* p)       { if (_g1h->is_in_young(*p)) { _count++; } }
3195   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3196 
3197   int count() { return _count; }
3198   void reset_count() { _count = 0; };
3199 };
3200 
3201 class VerifyKlassClosure: public KlassClosure {
3202   YoungRefCounterClosure _young_ref_counter_closure;
3203   OopClosure *_oop_closure;
3204  public:
3205   VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
3206   void do_klass(Klass* k) {
3207     k->oops_do(_oop_closure);
3208 
3209     _young_ref_counter_closure.reset_count();
3210     k->oops_do(&_young_ref_counter_closure);
3211     if (_young_ref_counter_closure.count() > 0) {
3212       guarantee(k->has_modified_oops(), err_msg("Klass %p, has young refs but is not dirty.", k));
3213     }
3214   }
3215 };
3216 
3217 // TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
3218 //       pass it as the perm_blk to SharedHeap::process_strong_roots.
3219 //       When process_strong_roots stop calling perm_blk->younger_refs_iterate
3220 //       we can change this closure to extend the simpler OopClosure.
3221 class VerifyRootsClosure: public OopsInGenClosure {
3222 private:
3223   G1CollectedHeap* _g1h;
3224   VerifyOption     _vo;
3225   bool             _failures;
3226 public:
3227   // _vo == UsePrevMarking -> use "prev" marking information,
3228   // _vo == UseNextMarking -> use "next" marking information,
3229   // _vo == UseMarkWord    -> use mark word from object header.
3230   VerifyRootsClosure(VerifyOption vo) :
3231     _g1h(G1CollectedHeap::heap()),
3232     _vo(vo),
3233     _failures(false) { }
3234 
3235   bool failures() { return _failures; }
3236 
3237   template <class T> void do_oop_nv(T* p) {
3238     T heap_oop = oopDesc::load_heap_oop(p);
3239     if (!oopDesc::is_null(heap_oop)) {
3240       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
3241       if (_g1h->is_obj_dead_cond(obj, _vo)) {
3242         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
3243                               "points to dead obj "PTR_FORMAT, p, (void*) obj);
3244         if (_vo == VerifyOption_G1UseMarkWord) {
3245           gclog_or_tty->print_cr("  Mark word: "PTR_FORMAT, (void*)(obj->mark()));
3246         }
3247         obj->print_on(gclog_or_tty);
3248         _failures = true;
3249       }
3250     }
3251   }
3252 
3253   void do_oop(oop* p)       { do_oop_nv(p); }
3254   void do_oop(narrowOop* p) { do_oop_nv(p); }
3255 };
3256 
3257 // This is the task used for parallel heap verification.
3258 
3259 class G1ParVerifyTask: public AbstractGangTask {
3260 private:
3261   G1CollectedHeap* _g1h;
3262   VerifyOption     _vo;
3263   bool             _failures;
3264 
3265 public:
3266   // _vo == UsePrevMarking -> use "prev" marking information,
3267   // _vo == UseNextMarking -> use "next" marking information,
3268   // _vo == UseMarkWord    -> use mark word from object header.
3269   G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
3270     AbstractGangTask("Parallel verify task"),
3271     _g1h(g1h),
3272     _vo(vo),
3273     _failures(false) { }
3274 
3275   bool failures() {
3276     return _failures;
3277   }
3278 
3279   void work(uint worker_id) {
3280     HandleMark hm;
3281     VerifyRegionClosure blk(true, _vo);
3282     _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
3283                                           _g1h->workers()->active_workers(),
3284                                           HeapRegion::ParVerifyClaimValue);
3285     if (blk.failures()) {
3286       _failures = true;
3287     }
3288   }
3289 };
3290 
3291 void G1CollectedHeap::verify(bool silent) {
3292   verify(silent, VerifyOption_G1UsePrevMarking);
3293 }
3294 
3295 void G1CollectedHeap::verify(bool silent,
3296                              VerifyOption vo) {
3297   if (SafepointSynchronize::is_at_safepoint()) {
3298     if (!silent) { gclog_or_tty->print("Roots "); }
3299     VerifyRootsClosure rootsCl(vo);
3300 
3301     assert(Thread::current()->is_VM_thread(),
3302            "Expected to be executed serially by the VM thread at this point");
3303 
3304     CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
3305     VerifyKlassClosure klassCl(this, &rootsCl);
3306 
3307     // We apply the relevant closures to all the oops in the
3308     // system dictionary, the string table and the code cache.
3309     const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
3310 
3311     // Need cleared claim bits for the strong roots processing
3312     ClassLoaderDataGraph::clear_claimed_marks();
3313 
3314     process_strong_roots(true,      // activate StrongRootsScope
3315                          false,     // we set "is scavenging" to false,
3316                                     // so we don't reset the dirty cards.
3317                          ScanningOption(so),  // roots scanning options
3318                          &rootsCl,
3319                          &blobsCl,
3320                          &klassCl
3321                          );
3322 
3323     bool failures = rootsCl.failures();
3324 
3325     if (vo != VerifyOption_G1UseMarkWord) {
3326       // If we're verifying during a full GC then the region sets
3327       // will have been torn down at the start of the GC. Therefore
3328       // verifying the region sets will fail. So we only verify
3329       // the region sets when not in a full GC.
3330       if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3331       verify_region_sets();
3332     }
3333 
3334     if (!silent) { gclog_or_tty->print("HeapRegions "); }
3335     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3336       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3337              "sanity check");
3338 
3339       G1ParVerifyTask task(this, vo);
3340       assert(UseDynamicNumberOfGCThreads ||
3341         workers()->active_workers() == workers()->total_workers(),
3342         "If not dynamic should be using all the workers");
3343       int n_workers = workers()->active_workers();
3344       set_par_threads(n_workers);
3345       workers()->run_task(&task);
3346       set_par_threads(0);
3347       if (task.failures()) {
3348         failures = true;
3349       }
3350 
3351       // Checks that the expected amount of parallel work was done.
3352       // The implication is that n_workers is > 0.
3353       assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
3354              "sanity check");
3355 
3356       reset_heap_region_claim_values();
3357 
3358       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3359              "sanity check");
3360     } else {
3361       VerifyRegionClosure blk(false, vo);
3362       heap_region_iterate(&blk);
3363       if (blk.failures()) {
3364         failures = true;
3365       }
3366     }
3367     if (!silent) gclog_or_tty->print("RemSet ");
3368     rem_set()->verify();
3369 
3370     if (failures) {
3371       gclog_or_tty->print_cr("Heap:");
3372       // It helps to have the per-region information in the output to
3373       // help us track down what went wrong. This is why we call
3374       // print_extended_on() instead of print_on().
3375       print_extended_on(gclog_or_tty);
3376       gclog_or_tty->print_cr("");
3377 #ifndef PRODUCT
3378       if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
3379         concurrent_mark()->print_reachable("at-verification-failure",
3380                                            vo, false /* all */);
3381       }
3382 #endif
3383       gclog_or_tty->flush();
3384     }
3385     guarantee(!failures, "there should not have been any failures");
3386   } else {
3387     if (!silent)
3388       gclog_or_tty->print("(SKIPPING roots, heapRegionSets, heapRegions, remset) ");
3389   }
3390 }
3391 
3392 class PrintRegionClosure: public HeapRegionClosure {
3393   outputStream* _st;
3394 public:
3395   PrintRegionClosure(outputStream* st) : _st(st) {}
3396   bool doHeapRegion(HeapRegion* r) {
3397     r->print_on(_st);
3398     return false;
3399   }
3400 };
3401 
3402 void G1CollectedHeap::print_on(outputStream* st) const {
3403   st->print(" %-20s", "garbage-first heap");
3404   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
3405             capacity()/K, used_unlocked()/K);
3406   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
3407             _g1_storage.low_boundary(),
3408             _g1_storage.high(),
3409             _g1_storage.high_boundary());
3410   st->cr();
3411   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
3412   uint young_regions = _young_list->length();
3413   st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
3414             (size_t) young_regions * HeapRegion::GrainBytes / K);
3415   uint survivor_regions = g1_policy()->recorded_survivor_regions();
3416   st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
3417             (size_t) survivor_regions * HeapRegion::GrainBytes / K);
3418   st->cr();
3419   MetaspaceAux::print_on(st);
3420 }
3421 
3422 void G1CollectedHeap::print_extended_on(outputStream* st) const {
3423   print_on(st);
3424 
3425   // Print the per-region information.
3426   st->cr();
3427   st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
3428                "HS=humongous(starts), HC=humongous(continues), "
3429                "CS=collection set, F=free, TS=gc time stamp, "
3430                "PTAMS=previous top-at-mark-start, "
3431                "NTAMS=next top-at-mark-start)");
3432   PrintRegionClosure blk(st);
3433   heap_region_iterate(&blk);
3434 }
3435 
3436 void G1CollectedHeap::print_on_error(outputStream* st) const {
3437   this->CollectedHeap::print_on_error(st);
3438 
3439   if (_cm != NULL) {
3440     st->cr();
3441     _cm->print_on_error(st);
3442   }
3443 }
3444 
3445 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
3446   if (G1CollectedHeap::use_parallel_gc_threads()) {
3447     workers()->print_worker_threads_on(st);
3448   }
3449   _cmThread->print_on(st);
3450   st->cr();
3451   _cm->print_worker_threads_on(st);
3452   _cg1r->print_worker_threads_on(st);
3453 }
3454 
3455 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
3456   if (G1CollectedHeap::use_parallel_gc_threads()) {
3457     workers()->threads_do(tc);
3458   }
3459   tc->do_thread(_cmThread);
3460   _cg1r->threads_do(tc);
3461 }
3462 
3463 void G1CollectedHeap::print_tracing_info() const {
3464   // We'll overload this to mean "trace GC pause statistics."
3465   if (TraceGen0Time || TraceGen1Time) {
3466     // The "G1CollectorPolicy" is keeping track of these stats, so delegate
3467     // to that.
3468     g1_policy()->print_tracing_info();
3469   }
3470   if (G1SummarizeRSetStats) {
3471     g1_rem_set()->print_summary_info();
3472   }
3473   if (G1SummarizeConcMark) {
3474     concurrent_mark()->print_summary_info();
3475   }
3476   g1_policy()->print_yg_surv_rate_info();
3477   SpecializationStats::print();
3478 }
3479 
3480 #ifndef PRODUCT
3481 // Helpful for debugging RSet issues.
3482 
3483 class PrintRSetsClosure : public HeapRegionClosure {
3484 private:
3485   const char* _msg;
3486   size_t _occupied_sum;
3487 
3488 public:
3489   bool doHeapRegion(HeapRegion* r) {
3490     HeapRegionRemSet* hrrs = r->rem_set();
3491     size_t occupied = hrrs->occupied();
3492     _occupied_sum += occupied;
3493 
3494     gclog_or_tty->print_cr("Printing RSet for region "HR_FORMAT,
3495                            HR_FORMAT_PARAMS(r));
3496     if (occupied == 0) {
3497       gclog_or_tty->print_cr("  RSet is empty");
3498     } else {
3499       hrrs->print();
3500     }
3501     gclog_or_tty->print_cr("----------");
3502     return false;
3503   }
3504 
3505   PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
3506     gclog_or_tty->cr();
3507     gclog_or_tty->print_cr("========================================");
3508     gclog_or_tty->print_cr(msg);
3509     gclog_or_tty->cr();
3510   }
3511 
3512   ~PrintRSetsClosure() {
3513     gclog_or_tty->print_cr("Occupied Sum: "SIZE_FORMAT, _occupied_sum);
3514     gclog_or_tty->print_cr("========================================");
3515     gclog_or_tty->cr();
3516   }
3517 };
3518 
3519 void G1CollectedHeap::print_cset_rsets() {
3520   PrintRSetsClosure cl("Printing CSet RSets");
3521   collection_set_iterate(&cl);
3522 }
3523 
3524 void G1CollectedHeap::print_all_rsets() {
3525   PrintRSetsClosure cl("Printing All RSets");;
3526   heap_region_iterate(&cl);
3527 }
3528 #endif // PRODUCT
3529 
3530 G1CollectedHeap* G1CollectedHeap::heap() {
3531   assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
3532          "not a garbage-first heap");
3533   return _g1h;
3534 }
3535 
3536 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3537   // always_do_update_barrier = false;
3538   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3539   // Call allocation profiler
3540   AllocationProfiler::iterate_since_last_gc();
3541   // Fill TLAB's and such
3542   ensure_parsability(true);
3543 }
3544 
3545 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
3546 
3547   if (G1SummarizeRSetStats &&
3548       (G1SummarizeRSetStatsPeriod > 0) &&
3549       // we are at the end of the GC. Total collections has already been increased.
3550       ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3551     g1_rem_set()->print_periodic_summary_info();
3552   }
3553 
3554   // FIXME: what is this about?
3555   // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3556   // is set.
3557   COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3558                         "derived pointer present"));
3559   // always_do_update_barrier = true;
3560 
3561   // We have just completed a GC. Update the soft reference
3562   // policy with the new heap occupancy
3563   Universe::update_heap_info_at_gc();
3564 }
3565 
3566 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3567                                                unsigned int gc_count_before,
3568                                                bool* succeeded) {
3569   assert_heap_not_locked_and_not_at_safepoint();
3570   g1_policy()->record_stop_world_start();
3571   VM_G1IncCollectionPause op(gc_count_before,
3572                              word_size,
3573                              false, /* should_initiate_conc_mark */
3574                              g1_policy()->max_pause_time_ms(),
3575                              GCCause::_g1_inc_collection_pause);
3576   VMThread::execute(&op);
3577 
3578   HeapWord* result = op.result();
3579   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3580   assert(result == NULL || ret_succeeded,
3581          "the result should be NULL if the VM did not succeed");
3582   *succeeded = ret_succeeded;
3583 
3584   assert_heap_not_locked();
3585   return result;
3586 }
3587 
3588 void
3589 G1CollectedHeap::doConcurrentMark() {
3590   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
3591   if (!_cmThread->in_progress()) {
3592     _cmThread->set_started();
3593     CGC_lock->notify();
3594   }
3595 }
3596 
3597 size_t G1CollectedHeap::pending_card_num() {
3598   size_t extra_cards = 0;
3599   JavaThread *curr = Threads::first();
3600   while (curr != NULL) {
3601     DirtyCardQueue& dcq = curr->dirty_card_queue();
3602     extra_cards += dcq.size();
3603     curr = curr->next();
3604   }
3605   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3606   size_t buffer_size = dcqs.buffer_size();
3607   size_t buffer_num = dcqs.completed_buffers_num();
3608 
3609   // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
3610   // in bytes - not the number of 'entries'. We need to convert
3611   // into a number of cards.
3612   return (buffer_size * buffer_num + extra_cards) / oopSize;
3613 }
3614 
3615 size_t G1CollectedHeap::cards_scanned() {
3616   return g1_rem_set()->cardsScanned();
3617 }
3618 
3619 void
3620 G1CollectedHeap::setup_surviving_young_words() {
3621   assert(_surviving_young_words == NULL, "pre-condition");
3622   uint array_length = g1_policy()->young_cset_region_length();
3623   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3624   if (_surviving_young_words == NULL) {
3625     vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
3626                           "Not enough space for young surv words summary.");
3627   }
3628   memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3629 #ifdef ASSERT
3630   for (uint i = 0;  i < array_length; ++i) {
3631     assert( _surviving_young_words[i] == 0, "memset above" );
3632   }
3633 #endif // !ASSERT
3634 }
3635 
3636 void
3637 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
3638   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
3639   uint array_length = g1_policy()->young_cset_region_length();
3640   for (uint i = 0; i < array_length; ++i) {
3641     _surviving_young_words[i] += surv_young_words[i];
3642   }
3643 }
3644 
3645 void
3646 G1CollectedHeap::cleanup_surviving_young_words() {
3647   guarantee( _surviving_young_words != NULL, "pre-condition" );
3648   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words, mtGC);
3649   _surviving_young_words = NULL;
3650 }
3651 
3652 #ifdef ASSERT
3653 class VerifyCSetClosure: public HeapRegionClosure {
3654 public:
3655   bool doHeapRegion(HeapRegion* hr) {
3656     // Here we check that the CSet region's RSet is ready for parallel
3657     // iteration. The fields that we'll verify are only manipulated
3658     // when the region is part of a CSet and is collected. Afterwards,
3659     // we reset these fields when we clear the region's RSet (when the
3660     // region is freed) so they are ready when the region is
3661     // re-allocated. The only exception to this is if there's an
3662     // evacuation failure and instead of freeing the region we leave
3663     // it in the heap. In that case, we reset these fields during
3664     // evacuation failure handling.
3665     guarantee(hr->rem_set()->verify_ready_for_par_iteration(), "verification");
3666 
3667     // Here's a good place to add any other checks we'd like to
3668     // perform on CSet regions.
3669     return false;
3670   }
3671 };
3672 #endif // ASSERT
3673 
3674 #if TASKQUEUE_STATS
3675 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
3676   st->print_raw_cr("GC Task Stats");
3677   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
3678   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
3679 }
3680 
3681 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
3682   print_taskqueue_stats_hdr(st);
3683 
3684   TaskQueueStats totals;
3685   const int n = workers() != NULL ? workers()->total_workers() : 1;
3686   for (int i = 0; i < n; ++i) {
3687     st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
3688     totals += task_queue(i)->stats;
3689   }
3690   st->print_raw("tot "); totals.print(st); st->cr();
3691 
3692   DEBUG_ONLY(totals.verify());
3693 }
3694 
3695 void G1CollectedHeap::reset_taskqueue_stats() {
3696   const int n = workers() != NULL ? workers()->total_workers() : 1;
3697   for (int i = 0; i < n; ++i) {
3698     task_queue(i)->stats.reset();
3699   }
3700 }
3701 #endif // TASKQUEUE_STATS
3702 
3703 void G1CollectedHeap::log_gc_header() {
3704   if (!G1Log::fine()) {
3705     return;
3706   }
3707 
3708   gclog_or_tty->date_stamp(PrintGCDateStamps);
3709   gclog_or_tty->stamp(PrintGCTimeStamps);
3710 
3711   GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3712     .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
3713     .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
3714 
3715   gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3716 }
3717 
3718 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3719   if (!G1Log::fine()) {
3720     return;
3721   }
3722 
3723   if (G1Log::finer()) {
3724     if (evacuation_failed()) {
3725       gclog_or_tty->print(" (to-space exhausted)");
3726     }
3727     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3728     g1_policy()->phase_times()->note_gc_end();
3729     g1_policy()->phase_times()->print(pause_time_sec);
3730     g1_policy()->print_detailed_heap_transition();
3731   } else {
3732     if (evacuation_failed()) {
3733       gclog_or_tty->print("--");
3734     }
3735     g1_policy()->print_heap_transition();
3736     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3737   }
3738   gclog_or_tty->flush();
3739 }
3740 
3741 bool
3742 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3743   assert_at_safepoint(true /* should_be_vm_thread */);
3744   guarantee(!is_gc_active(), "collection is not reentrant");
3745 
3746   if (GC_locker::check_active_before_gc()) {
3747     return false;
3748   }
3749 
3750   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3751   ResourceMark rm;
3752 
3753   print_heap_before_gc();
3754 
3755   HRSPhaseSetter x(HRSPhaseEvacuation);
3756   verify_region_sets_optional();
3757   verify_dirty_young_regions();
3758 
3759   // This call will decide whether this pause is an initial-mark
3760   // pause. If it is, during_initial_mark_pause() will return true
3761   // for the duration of this pause.
3762   g1_policy()->decide_on_conc_mark_initiation();
3763 
3764   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3765   assert(!g1_policy()->during_initial_mark_pause() ||
3766           g1_policy()->gcs_are_young(), "sanity");
3767 
3768   // We also do not allow mixed GCs during marking.
3769   assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
3770 
3771   // Record whether this pause is an initial mark. When the current
3772   // thread has completed its logging output and it's safe to signal
3773   // the CM thread, the flag's value in the policy has been reset.
3774   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3775 
3776   // Inner scope for scope based logging, timers, and stats collection
3777   {
3778     if (g1_policy()->during_initial_mark_pause()) {
3779       // We are about to start a marking cycle, so we increment the
3780       // full collection counter.
3781       increment_old_marking_cycles_started();
3782     }
3783     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3784 
3785     int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3786                                 workers()->active_workers() : 1);
3787     double pause_start_sec = os::elapsedTime();
3788     g1_policy()->phase_times()->note_gc_start(active_workers);
3789     log_gc_header();
3790 
3791     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3792     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3793 
3794     // If the secondary_free_list is not empty, append it to the
3795     // free_list. No need to wait for the cleanup operation to finish;
3796     // the region allocation code will check the secondary_free_list
3797     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3798     // set, skip this step so that the region allocation code has to
3799     // get entries from the secondary_free_list.
3800     if (!G1StressConcRegionFreeing) {
3801       append_secondary_free_list_if_not_empty_with_lock();
3802     }
3803 
3804     assert(check_young_list_well_formed(),
3805       "young list should be well formed");
3806 
3807     // Don't dynamically change the number of GC threads this early.  A value of
3808     // 0 is used to indicate serial work.  When parallel work is done,
3809     // it will be set.
3810 
3811     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3812       IsGCActiveMark x;
3813 
3814       gc_prologue(false);
3815       increment_total_collections(false /* full gc */);
3816       increment_gc_time_stamp();
3817 
3818       verify_before_gc();
3819 
3820       COMPILER2_PRESENT(DerivedPointerTable::clear());
3821 
3822       // Please see comment in g1CollectedHeap.hpp and
3823       // G1CollectedHeap::ref_processing_init() to see how
3824       // reference processing currently works in G1.
3825 
3826       // Enable discovery in the STW reference processor
3827       ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
3828                                             true /*verify_no_refs*/);
3829 
3830       {
3831         // We want to temporarily turn off discovery by the
3832         // CM ref processor, if necessary, and turn it back on
3833         // on again later if we do. Using a scoped
3834         // NoRefDiscovery object will do this.
3835         NoRefDiscovery no_cm_discovery(ref_processor_cm());
3836 
3837         // Forget the current alloc region (we might even choose it to be part
3838         // of the collection set!).
3839         release_mutator_alloc_region();
3840 
3841         // We should call this after we retire the mutator alloc
3842         // region(s) so that all the ALLOC / RETIRE events are generated
3843         // before the start GC event.
3844         _hr_printer.start_gc(false /* full */, (size_t) total_collections());
3845 
3846         // This timing is only used by the ergonomics to handle our pause target.
3847         // It is unclear why this should not include the full pause. We will
3848         // investigate this in CR 7178365.
3849         //
3850         // Preserving the old comment here if that helps the investigation:
3851         //
3852         // The elapsed time induced by the start time below deliberately elides
3853         // the possible verification above.
3854         double sample_start_time_sec = os::elapsedTime();
3855 
3856 #if YOUNG_LIST_VERBOSE
3857         gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
3858         _young_list->print();
3859         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3860 #endif // YOUNG_LIST_VERBOSE
3861 
3862         g1_policy()->record_collection_pause_start(sample_start_time_sec);
3863 
3864         double scan_wait_start = os::elapsedTime();
3865         // We have to wait until the CM threads finish scanning the
3866         // root regions as it's the only way to ensure that all the
3867         // objects on them have been correctly scanned before we start
3868         // moving them during the GC.
3869         bool waited = _cm->root_regions()->wait_until_scan_finished();
3870         double wait_time_ms = 0.0;
3871         if (waited) {
3872           double scan_wait_end = os::elapsedTime();
3873           wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3874         }
3875         g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3876 
3877 #if YOUNG_LIST_VERBOSE
3878         gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
3879         _young_list->print();
3880 #endif // YOUNG_LIST_VERBOSE
3881 
3882         if (g1_policy()->during_initial_mark_pause()) {
3883           concurrent_mark()->checkpointRootsInitialPre();
3884         }
3885 
3886 #if YOUNG_LIST_VERBOSE
3887         gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
3888         _young_list->print();
3889         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3890 #endif // YOUNG_LIST_VERBOSE
3891 
3892         g1_policy()->finalize_cset(target_pause_time_ms);
3893 
3894         _cm->note_start_of_gc();
3895         // We should not verify the per-thread SATB buffers given that
3896         // we have not filtered them yet (we'll do so during the
3897         // GC). We also call this after finalize_cset() to
3898         // ensure that the CSet has been finalized.
3899         _cm->verify_no_cset_oops(true  /* verify_stacks */,
3900                                  true  /* verify_enqueued_buffers */,
3901                                  false /* verify_thread_buffers */,
3902                                  true  /* verify_fingers */);
3903 
3904         if (_hr_printer.is_active()) {
3905           HeapRegion* hr = g1_policy()->collection_set();
3906           while (hr != NULL) {
3907             G1HRPrinter::RegionType type;
3908             if (!hr->is_young()) {
3909               type = G1HRPrinter::Old;
3910             } else if (hr->is_survivor()) {
3911               type = G1HRPrinter::Survivor;
3912             } else {
3913               type = G1HRPrinter::Eden;
3914             }
3915             _hr_printer.cset(hr);
3916             hr = hr->next_in_collection_set();
3917           }
3918         }
3919 
3920 #ifdef ASSERT
3921         VerifyCSetClosure cl;
3922         collection_set_iterate(&cl);
3923 #endif // ASSERT
3924 
3925         setup_surviving_young_words();
3926 
3927         // Initialize the GC alloc regions.
3928         init_gc_alloc_regions();
3929 
3930         // Actually do the work...
3931         evacuate_collection_set();
3932 
3933         // We do this to mainly verify the per-thread SATB buffers
3934         // (which have been filtered by now) since we didn't verify
3935         // them earlier. No point in re-checking the stacks / enqueued
3936         // buffers given that the CSet has not changed since last time
3937         // we checked.
3938         _cm->verify_no_cset_oops(false /* verify_stacks */,
3939                                  false /* verify_enqueued_buffers */,
3940                                  true  /* verify_thread_buffers */,
3941                                  true  /* verify_fingers */);
3942 
3943         free_collection_set(g1_policy()->collection_set());
3944         g1_policy()->clear_collection_set();
3945 
3946         cleanup_surviving_young_words();
3947 
3948         // Start a new incremental collection set for the next pause.
3949         g1_policy()->start_incremental_cset_building();
3950 
3951         // Clear the _cset_fast_test bitmap in anticipation of adding
3952         // regions to the incremental collection set for the next
3953         // evacuation pause.
3954         clear_cset_fast_test();
3955 
3956         _young_list->reset_sampled_info();
3957 
3958         // Don't check the whole heap at this point as the
3959         // GC alloc regions from this pause have been tagged
3960         // as survivors and moved on to the survivor list.
3961         // Survivor regions will fail the !is_young() check.
3962         assert(check_young_list_empty(false /* check_heap */),
3963           "young list should be empty");
3964 
3965 #if YOUNG_LIST_VERBOSE
3966         gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
3967         _young_list->print();
3968 #endif // YOUNG_LIST_VERBOSE
3969 
3970         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
3971                                             _young_list->first_survivor_region(),
3972                                             _young_list->last_survivor_region());
3973 
3974         _young_list->reset_auxilary_lists();
3975 
3976         if (evacuation_failed()) {
3977           _summary_bytes_used = recalculate_used();
3978         } else {
3979           // The "used" of the the collection set have already been subtracted
3980           // when they were freed.  Add in the bytes evacuated.
3981           _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
3982         }
3983 
3984         if (g1_policy()->during_initial_mark_pause()) {
3985           // We have to do this before we notify the CM threads that
3986           // they can start working to make sure that all the
3987           // appropriate initialization is done on the CM object.
3988           concurrent_mark()->checkpointRootsInitialPost();
3989           set_marking_started();
3990           // Note that we don't actually trigger the CM thread at
3991           // this point. We do that later when we're sure that
3992           // the current thread has completed its logging output.
3993         }
3994 
3995         allocate_dummy_regions();
3996 
3997 #if YOUNG_LIST_VERBOSE
3998         gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
3999         _young_list->print();
4000         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
4001 #endif // YOUNG_LIST_VERBOSE
4002 
4003         init_mutator_alloc_region();
4004 
4005         {
4006           size_t expand_bytes = g1_policy()->expansion_amount();
4007           if (expand_bytes > 0) {
4008             size_t bytes_before = capacity();
4009             // No need for an ergo verbose message here,
4010             // expansion_amount() does this when it returns a value > 0.
4011             if (!expand(expand_bytes)) {
4012               // We failed to expand the heap so let's verify that
4013               // committed/uncommitted amount match the backing store
4014               assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
4015               assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
4016             }
4017           }
4018         }
4019 
4020         // We redo the verificaiton but now wrt to the new CSet which
4021         // has just got initialized after the previous CSet was freed.
4022         _cm->verify_no_cset_oops(true  /* verify_stacks */,
4023                                  true  /* verify_enqueued_buffers */,
4024                                  true  /* verify_thread_buffers */,
4025                                  true  /* verify_fingers */);
4026         _cm->note_end_of_gc();
4027 
4028         // This timing is only used by the ergonomics to handle our pause target.
4029         // It is unclear why this should not include the full pause. We will
4030         // investigate this in CR 7178365.
4031         double sample_end_time_sec = os::elapsedTime();
4032         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
4033         g1_policy()->record_collection_pause_end(pause_time_ms);
4034 
4035         MemoryService::track_memory_usage();
4036 
4037         // In prepare_for_verify() below we'll need to scan the deferred
4038         // update buffers to bring the RSets up-to-date if
4039         // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
4040         // the update buffers we'll probably need to scan cards on the
4041         // regions we just allocated to (i.e., the GC alloc
4042         // regions). However, during the last GC we called
4043         // set_saved_mark() on all the GC alloc regions, so card
4044         // scanning might skip the [saved_mark_word()...top()] area of
4045         // those regions (i.e., the area we allocated objects into
4046         // during the last GC). But it shouldn't. Given that
4047         // saved_mark_word() is conditional on whether the GC time stamp
4048         // on the region is current or not, by incrementing the GC time
4049         // stamp here we invalidate all the GC time stamps on all the
4050         // regions and saved_mark_word() will simply return top() for
4051         // all the regions. This is a nicer way of ensuring this rather
4052         // than iterating over the regions and fixing them. In fact, the
4053         // GC time stamp increment here also ensures that
4054         // saved_mark_word() will return top() between pauses, i.e.,
4055         // during concurrent refinement. So we don't need the
4056         // is_gc_active() check to decided which top to use when
4057         // scanning cards (see CR 7039627).
4058         increment_gc_time_stamp();
4059 
4060         verify_after_gc();
4061 
4062         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
4063         ref_processor_stw()->verify_no_references_recorded();
4064 
4065         // CM reference discovery will be re-enabled if necessary.
4066       }
4067 
4068       // We should do this after we potentially expand the heap so
4069       // that all the COMMIT events are generated before the end GC
4070       // event, and after we retire the GC alloc regions so that all
4071       // RETIRE events are generated before the end GC event.
4072       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4073 
4074       if (mark_in_progress()) {
4075         concurrent_mark()->update_g1_committed();
4076       }
4077 
4078 #ifdef TRACESPINNING
4079       ParallelTaskTerminator::print_termination_counts();
4080 #endif
4081 
4082       gc_epilogue(false);
4083     }
4084 
4085     // Print the remainder of the GC log output.
4086     log_gc_footer(os::elapsedTime() - pause_start_sec);
4087 
4088     // It is not yet to safe to tell the concurrent mark to
4089     // start as we have some optional output below. We don't want the
4090     // output from the concurrent mark thread interfering with this
4091     // logging output either.
4092 
4093     _hrs.verify_optional();
4094     verify_region_sets_optional();
4095 
4096     TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
4097     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4098 
4099     print_heap_after_gc();
4100 
4101     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4102     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4103     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4104     // before any GC notifications are raised.
4105     g1mm()->update_sizes();
4106   }
4107 
4108   // It should now be safe to tell the concurrent mark thread to start
4109   // without its logging output interfering with the logging output
4110   // that came from the pause.
4111 
4112   if (should_start_conc_mark) {
4113     // CAUTION: after the doConcurrentMark() call below,
4114     // the concurrent marking thread(s) could be running
4115     // concurrently with us. Make sure that anything after
4116     // this point does not assume that we are the only GC thread
4117     // running. Note: of course, the actual marking work will
4118     // not start until the safepoint itself is released in
4119     // ConcurrentGCThread::safepoint_desynchronize().
4120     doConcurrentMark();
4121   }
4122 
4123   return true;
4124 }
4125 
4126 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
4127 {
4128   size_t gclab_word_size;
4129   switch (purpose) {
4130     case GCAllocForSurvived:
4131       gclab_word_size = _survivor_plab_stats.desired_plab_sz();
4132       break;
4133     case GCAllocForTenured:
4134       gclab_word_size = _old_plab_stats.desired_plab_sz();
4135       break;
4136     default:
4137       assert(false, "unknown GCAllocPurpose");
4138       gclab_word_size = _old_plab_stats.desired_plab_sz();
4139       break;
4140   }
4141 
4142   // Prevent humongous PLAB sizes for two reasons:
4143   // * PLABs are allocated using a similar paths as oops, but should
4144   //   never be in a humongous region
4145   // * Allowing humongous PLABs needlessly churns the region free lists
4146   return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
4147 }
4148 
4149 void G1CollectedHeap::init_mutator_alloc_region() {
4150   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
4151   _mutator_alloc_region.init();
4152 }
4153 
4154 void G1CollectedHeap::release_mutator_alloc_region() {
4155   _mutator_alloc_region.release();
4156   assert(_mutator_alloc_region.get() == NULL, "post-condition");
4157 }
4158 
4159 void G1CollectedHeap::init_gc_alloc_regions() {
4160   assert_at_safepoint(true /* should_be_vm_thread */);
4161 
4162   _survivor_gc_alloc_region.init();
4163   _old_gc_alloc_region.init();
4164   HeapRegion* retained_region = _retained_old_gc_alloc_region;
4165   _retained_old_gc_alloc_region = NULL;
4166 
4167   // We will discard the current GC alloc region if:
4168   // a) it's in the collection set (it can happen!),
4169   // b) it's already full (no point in using it),
4170   // c) it's empty (this means that it was emptied during
4171   // a cleanup and it should be on the free list now), or
4172   // d) it's humongous (this means that it was emptied
4173   // during a cleanup and was added to the free list, but
4174   // has been subseqently used to allocate a humongous
4175   // object that may be less than the region size).
4176   if (retained_region != NULL &&
4177       !retained_region->in_collection_set() &&
4178       !(retained_region->top() == retained_region->end()) &&
4179       !retained_region->is_empty() &&
4180       !retained_region->isHumongous()) {
4181     retained_region->set_saved_mark();
4182     // The retained region was added to the old region set when it was
4183     // retired. We have to remove it now, since we don't allow regions
4184     // we allocate to in the region sets. We'll re-add it later, when
4185     // it's retired again.
4186     _old_set.remove(retained_region);
4187     bool during_im = g1_policy()->during_initial_mark_pause();
4188     retained_region->note_start_of_copying(during_im);
4189     _old_gc_alloc_region.set(retained_region);
4190     _hr_printer.reuse(retained_region);
4191   }
4192 }
4193 
4194 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers) {
4195   _survivor_gc_alloc_region.release();
4196   // If we have an old GC alloc region to release, we'll save it in
4197   // _retained_old_gc_alloc_region. If we don't
4198   // _retained_old_gc_alloc_region will become NULL. This is what we
4199   // want either way so no reason to check explicitly for either
4200   // condition.
4201   _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
4202 
4203   if (ResizePLAB) {
4204     _survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
4205     _old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
4206   }
4207 }
4208 
4209 void G1CollectedHeap::abandon_gc_alloc_regions() {
4210   assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
4211   assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
4212   _retained_old_gc_alloc_region = NULL;
4213 }
4214 
4215 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
4216   _drain_in_progress = false;
4217   set_evac_failure_closure(cl);
4218   _evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true);
4219 }
4220 
4221 void G1CollectedHeap::finalize_for_evac_failure() {
4222   assert(_evac_failure_scan_stack != NULL &&
4223          _evac_failure_scan_stack->length() == 0,
4224          "Postcondition");
4225   assert(!_drain_in_progress, "Postcondition");
4226   delete _evac_failure_scan_stack;
4227   _evac_failure_scan_stack = NULL;
4228 }
4229 
4230 void G1CollectedHeap::remove_self_forwarding_pointers() {
4231   assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
4232 
4233   G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
4234 
4235   if (G1CollectedHeap::use_parallel_gc_threads()) {
4236     set_par_threads();
4237     workers()->run_task(&rsfp_task);
4238     set_par_threads(0);
4239   } else {
4240     rsfp_task.work(0);
4241   }
4242 
4243   assert(check_cset_heap_region_claim_values(HeapRegion::ParEvacFailureClaimValue), "sanity");
4244 
4245   // Reset the claim values in the regions in the collection set.
4246   reset_cset_heap_region_claim_values();
4247 
4248   assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
4249 
4250   // Now restore saved marks, if any.
4251   assert(_objs_with_preserved_marks.size() ==
4252             _preserved_marks_of_objs.size(), "Both or none.");
4253   while (!_objs_with_preserved_marks.is_empty()) {
4254     oop obj = _objs_with_preserved_marks.pop();
4255     markOop m = _preserved_marks_of_objs.pop();
4256     obj->set_mark(m);
4257   }
4258   _objs_with_preserved_marks.clear(true);
4259   _preserved_marks_of_objs.clear(true);
4260 }
4261 
4262 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
4263   _evac_failure_scan_stack->push(obj);
4264 }
4265 
4266 void G1CollectedHeap::drain_evac_failure_scan_stack() {
4267   assert(_evac_failure_scan_stack != NULL, "precondition");
4268 
4269   while (_evac_failure_scan_stack->length() > 0) {
4270      oop obj = _evac_failure_scan_stack->pop();
4271      _evac_failure_closure->set_region(heap_region_containing(obj));
4272      obj->oop_iterate_backwards(_evac_failure_closure);
4273   }
4274 }
4275 
4276 oop
4277 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
4278                                                oop old) {
4279   assert(obj_in_cs(old),
4280          err_msg("obj: "PTR_FORMAT" should still be in the CSet",
4281                  (HeapWord*) old));
4282   markOop m = old->mark();
4283   oop forward_ptr = old->forward_to_atomic(old);
4284   if (forward_ptr == NULL) {
4285     // Forward-to-self succeeded.
4286 
4287     if (_evac_failure_closure != cl) {
4288       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
4289       assert(!_drain_in_progress,
4290              "Should only be true while someone holds the lock.");
4291       // Set the global evac-failure closure to the current thread's.
4292       assert(_evac_failure_closure == NULL, "Or locking has failed.");
4293       set_evac_failure_closure(cl);
4294       // Now do the common part.
4295       handle_evacuation_failure_common(old, m);
4296       // Reset to NULL.
4297       set_evac_failure_closure(NULL);
4298     } else {
4299       // The lock is already held, and this is recursive.
4300       assert(_drain_in_progress, "This should only be the recursive case.");
4301       handle_evacuation_failure_common(old, m);
4302     }
4303     return old;
4304   } else {
4305     // Forward-to-self failed. Either someone else managed to allocate
4306     // space for this object (old != forward_ptr) or they beat us in
4307     // self-forwarding it (old == forward_ptr).
4308     assert(old == forward_ptr || !obj_in_cs(forward_ptr),
4309            err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" "
4310                    "should not be in the CSet",
4311                    (HeapWord*) old, (HeapWord*) forward_ptr));
4312     return forward_ptr;
4313   }
4314 }
4315 
4316 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
4317   set_evacuation_failed(true);
4318 
4319   preserve_mark_if_necessary(old, m);
4320 
4321   HeapRegion* r = heap_region_containing(old);
4322   if (!r->evacuation_failed()) {
4323     r->set_evacuation_failed(true);
4324     _hr_printer.evac_failure(r);
4325   }
4326 
4327   push_on_evac_failure_scan_stack(old);
4328 
4329   if (!_drain_in_progress) {
4330     // prevent recursion in copy_to_survivor_space()
4331     _drain_in_progress = true;
4332     drain_evac_failure_scan_stack();
4333     _drain_in_progress = false;
4334   }
4335 }
4336 
4337 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
4338   assert(evacuation_failed(), "Oversaving!");
4339   // We want to call the "for_promotion_failure" version only in the
4340   // case of a promotion failure.
4341   if (m->must_be_preserved_for_promotion_failure(obj)) {
4342     _objs_with_preserved_marks.push(obj);
4343     _preserved_marks_of_objs.push(m);
4344   }
4345 }
4346 
4347 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
4348                                                   size_t word_size) {
4349   if (purpose == GCAllocForSurvived) {
4350     HeapWord* result = survivor_attempt_allocation(word_size);
4351     if (result != NULL) {
4352       return result;
4353     } else {
4354       // Let's try to allocate in the old gen in case we can fit the
4355       // object there.
4356       return old_attempt_allocation(word_size);
4357     }
4358   } else {
4359     assert(purpose ==  GCAllocForTenured, "sanity");
4360     HeapWord* result = old_attempt_allocation(word_size);
4361     if (result != NULL) {
4362       return result;
4363     } else {
4364       // Let's try to allocate in the survivors in case we can fit the
4365       // object there.
4366       return survivor_attempt_allocation(word_size);
4367     }
4368   }
4369 
4370   ShouldNotReachHere();
4371   // Trying to keep some compilers happy.
4372   return NULL;
4373 }
4374 
4375 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
4376   ParGCAllocBuffer(gclab_word_size), _retired(false) { }
4377 
4378 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
4379   : _g1h(g1h),
4380     _refs(g1h->task_queue(queue_num)),
4381     _dcq(&g1h->dirty_card_queue_set()),
4382     _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
4383     _g1_rem(g1h->g1_rem_set()),
4384     _hash_seed(17), _queue_num(queue_num),
4385     _term_attempts(0),
4386     _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
4387     _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
4388     _age_table(false),
4389     _strong_roots_time(0), _term_time(0),
4390     _alloc_buffer_waste(0), _undo_waste(0) {
4391   // we allocate G1YoungSurvRateNumRegions plus one entries, since
4392   // we "sacrifice" entry 0 to keep track of surviving bytes for
4393   // non-young regions (where the age is -1)
4394   // We also add a few elements at the beginning and at the end in
4395   // an attempt to eliminate cache contention
4396   uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
4397   uint array_length = PADDING_ELEM_NUM +
4398                       real_length +
4399                       PADDING_ELEM_NUM;
4400   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
4401   if (_surviving_young_words_base == NULL)
4402     vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
4403                           "Not enough space for young surv histo.");
4404   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
4405   memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
4406 
4407   _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
4408   _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
4409 
4410   _start = os::elapsedTime();
4411 }
4412 
4413 void
4414 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
4415 {
4416   st->print_raw_cr("GC Termination Stats");
4417   st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
4418                    " ------waste (KiB)------");
4419   st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
4420                    "  total   alloc    undo");
4421   st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
4422                    " ------- ------- -------");
4423 }
4424 
4425 void
4426 G1ParScanThreadState::print_termination_stats(int i,
4427                                               outputStream* const st) const
4428 {
4429   const double elapsed_ms = elapsed_time() * 1000.0;
4430   const double s_roots_ms = strong_roots_time() * 1000.0;
4431   const double term_ms    = term_time() * 1000.0;
4432   st->print_cr("%3d %9.2f %9.2f %6.2f "
4433                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
4434                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
4435                i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
4436                term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
4437                (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
4438                alloc_buffer_waste() * HeapWordSize / K,
4439                undo_waste() * HeapWordSize / K);
4440 }
4441 
4442 #ifdef ASSERT
4443 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
4444   assert(ref != NULL, "invariant");
4445   assert(UseCompressedOops, "sanity");
4446   assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
4447   oop p = oopDesc::load_decode_heap_oop(ref);
4448   assert(_g1h->is_in_g1_reserved(p),
4449          err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
4450   return true;
4451 }
4452 
4453 bool G1ParScanThreadState::verify_ref(oop* ref) const {
4454   assert(ref != NULL, "invariant");
4455   if (has_partial_array_mask(ref)) {
4456     // Must be in the collection set--it's already been copied.
4457     oop p = clear_partial_array_mask(ref);
4458     assert(_g1h->obj_in_cs(p),
4459            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
4460   } else {
4461     oop p = oopDesc::load_decode_heap_oop(ref);
4462     assert(_g1h->is_in_g1_reserved(p),
4463            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
4464   }
4465   return true;
4466 }
4467 
4468 bool G1ParScanThreadState::verify_task(StarTask ref) const {
4469   if (ref.is_narrow()) {
4470     return verify_ref((narrowOop*) ref);
4471   } else {
4472     return verify_ref((oop*) ref);
4473   }
4474 }
4475 #endif // ASSERT
4476 
4477 void G1ParScanThreadState::trim_queue() {
4478   assert(_evac_cl != NULL, "not set");
4479   assert(_evac_failure_cl != NULL, "not set");
4480   assert(_partial_scan_cl != NULL, "not set");
4481 
4482   StarTask ref;
4483   do {
4484     // Drain the overflow stack first, so other threads can steal.
4485     while (refs()->pop_overflow(ref)) {
4486       deal_with_reference(ref);
4487     }
4488 
4489     while (refs()->pop_local(ref)) {
4490       deal_with_reference(ref);
4491     }
4492   } while (!refs()->is_empty());
4493 }
4494 
4495 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
4496                                      G1ParScanThreadState* par_scan_state) :
4497   _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
4498   _par_scan_state(par_scan_state),
4499   _worker_id(par_scan_state->queue_num()),
4500   _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
4501   _mark_in_progress(_g1->mark_in_progress()) { }
4502 
4503 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4504 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>::mark_object(oop obj) {
4505 #ifdef ASSERT
4506   HeapRegion* hr = _g1->heap_region_containing(obj);
4507   assert(hr != NULL, "sanity");
4508   assert(!hr->in_collection_set(), "should not mark objects in the CSet");
4509 #endif // ASSERT
4510 
4511   // We know that the object is not moving so it's safe to read its size.
4512   _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
4513 }
4514 
4515 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4516 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4517   ::mark_forwarded_object(oop from_obj, oop to_obj) {
4518 #ifdef ASSERT
4519   assert(from_obj->is_forwarded(), "from obj should be forwarded");
4520   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
4521   assert(from_obj != to_obj, "should not be self-forwarded");
4522 
4523   HeapRegion* from_hr = _g1->heap_region_containing(from_obj);
4524   assert(from_hr != NULL, "sanity");
4525   assert(from_hr->in_collection_set(), "from obj should be in the CSet");
4526 
4527   HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
4528   assert(to_hr != NULL, "sanity");
4529   assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
4530 #endif // ASSERT
4531 
4532   // The object might be in the process of being copied by another
4533   // worker so we cannot trust that its to-space image is
4534   // well-formed. So we have to read its size from its from-space
4535   // image which we know should not be changing.
4536   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
4537 }
4538 
4539 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4540 oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4541   ::copy_to_survivor_space(oop old) {
4542   size_t word_sz = old->size();
4543   HeapRegion* from_region = _g1->heap_region_containing_raw(old);
4544   // +1 to make the -1 indexes valid...
4545   int       young_index = from_region->young_index_in_cset()+1;
4546   assert( (from_region->is_young() && young_index >  0) ||
4547          (!from_region->is_young() && young_index == 0), "invariant" );
4548   G1CollectorPolicy* g1p = _g1->g1_policy();
4549   markOop m = old->mark();
4550   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
4551                                            : m->age();
4552   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
4553                                                              word_sz);
4554   HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
4555 #ifndef PRODUCT
4556   // Should this evacuation fail?
4557   if (_g1->evacuation_should_fail()) {
4558     if (obj_ptr != NULL) {
4559       _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4560       obj_ptr = NULL;
4561     }
4562   }
4563 #endif // !PRODUCT
4564 
4565   if (obj_ptr == NULL) {
4566     // This will either forward-to-self, or detect that someone else has
4567     // installed a forwarding pointer.
4568     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
4569     return _g1->handle_evacuation_failure_par(cl, old);
4570   }
4571 
4572   oop obj = oop(obj_ptr);
4573 
4574   // We're going to allocate linearly, so might as well prefetch ahead.
4575   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
4576 
4577   oop forward_ptr = old->forward_to_atomic(obj);
4578   if (forward_ptr == NULL) {
4579     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
4580     if (g1p->track_object_age(alloc_purpose)) {
4581       // We could simply do obj->incr_age(). However, this causes a
4582       // performance issue. obj->incr_age() will first check whether
4583       // the object has a displaced mark by checking its mark word;
4584       // getting the mark word from the new location of the object
4585       // stalls. So, given that we already have the mark word and we
4586       // are about to install it anyway, it's better to increase the
4587       // age on the mark word, when the object does not have a
4588       // displaced mark word. We're not expecting many objects to have
4589       // a displaced marked word, so that case is not optimized
4590       // further (it could be...) and we simply call obj->incr_age().
4591 
4592       if (m->has_displaced_mark_helper()) {
4593         // in this case, we have to install the mark word first,
4594         // otherwise obj looks to be forwarded (the old mark word,
4595         // which contains the forward pointer, was copied)
4596         obj->set_mark(m);
4597         obj->incr_age();
4598       } else {
4599         m = m->incr_age();
4600         obj->set_mark(m);
4601       }
4602       _par_scan_state->age_table()->add(obj, word_sz);
4603     } else {
4604       obj->set_mark(m);
4605     }
4606 
4607     size_t* surv_young_words = _par_scan_state->surviving_young_words();
4608     surv_young_words[young_index] += word_sz;
4609 
4610     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
4611       // We keep track of the next start index in the length field of
4612       // the to-space object. The actual length can be found in the
4613       // length field of the from-space object.
4614       arrayOop(obj)->set_length(0);
4615       oop* old_p = set_partial_array_mask(old);
4616       _par_scan_state->push_on_queue(old_p);
4617     } else {
4618       // No point in using the slower heap_region_containing() method,
4619       // given that we know obj is in the heap.
4620       _scanner.set_region(_g1->heap_region_containing_raw(obj));
4621       obj->oop_iterate_backwards(&_scanner);
4622     }
4623   } else {
4624     _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4625     obj = forward_ptr;
4626   }
4627   return obj;
4628 }
4629 
4630 template <class T>
4631 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4632   if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4633     _scanned_klass->record_modified_oops();
4634   }
4635 }
4636 
4637 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
4638 template <class T>
4639 void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
4640 ::do_oop_work(T* p) {
4641   oop obj = oopDesc::load_decode_heap_oop(p);
4642   assert(barrier != G1BarrierRS || obj != NULL,
4643          "Precondition: G1BarrierRS implies obj is non-NULL");
4644 
4645   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4646 
4647   // here the null check is implicit in the cset_fast_test() test
4648   if (_g1->in_cset_fast_test(obj)) {
4649     oop forwardee;
4650     if (obj->is_forwarded()) {
4651       forwardee = obj->forwardee();
4652     } else {
4653       forwardee = copy_to_survivor_space(obj);
4654     }
4655     assert(forwardee != NULL, "forwardee should not be NULL");
4656     oopDesc::encode_store_heap_oop(p, forwardee);
4657     if (do_mark_object && forwardee != obj) {
4658       // If the object is self-forwarded we don't need to explicitly
4659       // mark it, the evacuation failure protocol will do so.
4660       mark_forwarded_object(obj, forwardee);
4661     }
4662 
4663     // When scanning the RS, we only care about objs in CS.
4664     if (barrier == G1BarrierRS) {
4665       _par_scan_state->update_rs(_from, p, _worker_id);
4666     } else if (barrier == G1BarrierKlass) {
4667       do_klass_barrier(p, forwardee);
4668     }
4669   } else {
4670     // The object is not in collection set. If we're a root scanning
4671     // closure during an initial mark pause (i.e. do_mark_object will
4672     // be true) then attempt to mark the object.
4673     if (do_mark_object && _g1->is_in_g1_reserved(obj)) {
4674       mark_object(obj);
4675     }
4676   }
4677 
4678   if (barrier == G1BarrierEvac && obj != NULL) {
4679     _par_scan_state->update_rs(_from, p, _worker_id);
4680   }
4681 
4682   if (do_gen_barrier && obj != NULL) {
4683     par_do_barrier(p);
4684   }
4685 }
4686 
4687 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
4688 template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
4689 
4690 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
4691   assert(has_partial_array_mask(p), "invariant");
4692   oop from_obj = clear_partial_array_mask(p);
4693 
4694   assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
4695   assert(from_obj->is_objArray(), "must be obj array");
4696   objArrayOop from_obj_array = objArrayOop(from_obj);
4697   // The from-space object contains the real length.
4698   int length                 = from_obj_array->length();
4699 
4700   assert(from_obj->is_forwarded(), "must be forwarded");
4701   oop to_obj                 = from_obj->forwardee();
4702   assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
4703   objArrayOop to_obj_array   = objArrayOop(to_obj);
4704   // We keep track of the next start index in the length field of the
4705   // to-space object.
4706   int next_index             = to_obj_array->length();
4707   assert(0 <= next_index && next_index < length,
4708          err_msg("invariant, next index: %d, length: %d", next_index, length));
4709 
4710   int start                  = next_index;
4711   int end                    = length;
4712   int remainder              = end - start;
4713   // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
4714   if (remainder > 2 * ParGCArrayScanChunk) {
4715     end = start + ParGCArrayScanChunk;
4716     to_obj_array->set_length(end);
4717     // Push the remainder before we process the range in case another
4718     // worker has run out of things to do and can steal it.
4719     oop* from_obj_p = set_partial_array_mask(from_obj);
4720     _par_scan_state->push_on_queue(from_obj_p);
4721   } else {
4722     assert(length == end, "sanity");
4723     // We'll process the final range for this object. Restore the length
4724     // so that the heap remains parsable in case of evacuation failure.
4725     to_obj_array->set_length(end);
4726   }
4727   _scanner.set_region(_g1->heap_region_containing_raw(to_obj));
4728   // Process indexes [start,end). It will also process the header
4729   // along with the first chunk (i.e., the chunk with start == 0).
4730   // Note that at this point the length field of to_obj_array is not
4731   // correct given that we are using it to keep track of the next
4732   // start index. oop_iterate_range() (thankfully!) ignores the length
4733   // field and only relies on the start / end parameters.  It does
4734   // however return the size of the object which will be incorrect. So
4735   // we have to ignore it even if we wanted to use it.
4736   to_obj_array->oop_iterate_range(&_scanner, start, end);
4737 }
4738 
4739 class G1ParEvacuateFollowersClosure : public VoidClosure {
4740 protected:
4741   G1CollectedHeap*              _g1h;
4742   G1ParScanThreadState*         _par_scan_state;
4743   RefToScanQueueSet*            _queues;
4744   ParallelTaskTerminator*       _terminator;
4745 
4746   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
4747   RefToScanQueueSet*      queues()         { return _queues; }
4748   ParallelTaskTerminator* terminator()     { return _terminator; }
4749 
4750 public:
4751   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
4752                                 G1ParScanThreadState* par_scan_state,
4753                                 RefToScanQueueSet* queues,
4754                                 ParallelTaskTerminator* terminator)
4755     : _g1h(g1h), _par_scan_state(par_scan_state),
4756       _queues(queues), _terminator(terminator) {}
4757 
4758   void do_void();
4759 
4760 private:
4761   inline bool offer_termination();
4762 };
4763 
4764 bool G1ParEvacuateFollowersClosure::offer_termination() {
4765   G1ParScanThreadState* const pss = par_scan_state();
4766   pss->start_term_time();
4767   const bool res = terminator()->offer_termination();
4768   pss->end_term_time();
4769   return res;
4770 }
4771 
4772 void G1ParEvacuateFollowersClosure::do_void() {
4773   StarTask stolen_task;
4774   G1ParScanThreadState* const pss = par_scan_state();
4775   pss->trim_queue();
4776 
4777   do {
4778     while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
4779       assert(pss->verify_task(stolen_task), "sanity");
4780       if (stolen_task.is_narrow()) {
4781         pss->deal_with_reference((narrowOop*) stolen_task);
4782       } else {
4783         pss->deal_with_reference((oop*) stolen_task);
4784       }
4785 
4786       // We've just processed a reference and we might have made
4787       // available new entries on the queues. So we have to make sure
4788       // we drain the queues as necessary.
4789       pss->trim_queue();
4790     }
4791   } while (!offer_termination());
4792 
4793   pss->retire_alloc_buffers();
4794 }
4795 
4796 class G1KlassScanClosure : public KlassClosure {
4797  G1ParCopyHelper* _closure;
4798  bool             _process_only_dirty;
4799  int              _count;
4800  public:
4801   G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
4802       : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
4803   void do_klass(Klass* klass) {
4804     // If the klass has not been dirtied we know that there's
4805     // no references into  the young gen and we can skip it.
4806    if (!_process_only_dirty || klass->has_modified_oops()) {
4807       // Clean the klass since we're going to scavenge all the metadata.
4808       klass->clear_modified_oops();
4809 
4810       // Tell the closure that this klass is the Klass to scavenge
4811       // and is the one to dirty if oops are left pointing into the young gen.
4812       _closure->set_scanned_klass(klass);
4813 
4814       klass->oops_do(_closure);
4815 
4816       _closure->set_scanned_klass(NULL);
4817     }
4818     _count++;
4819   }
4820 };
4821 
4822 class G1ParTask : public AbstractGangTask {
4823 protected:
4824   G1CollectedHeap*       _g1h;
4825   RefToScanQueueSet      *_queues;
4826   ParallelTaskTerminator _terminator;
4827   uint _n_workers;
4828 
4829   Mutex _stats_lock;
4830   Mutex* stats_lock() { return &_stats_lock; }
4831 
4832   size_t getNCards() {
4833     return (_g1h->capacity() + G1BlockOffsetSharedArray::N_bytes - 1)
4834       / G1BlockOffsetSharedArray::N_bytes;
4835   }
4836 
4837 public:
4838   G1ParTask(G1CollectedHeap* g1h,
4839             RefToScanQueueSet *task_queues)
4840     : AbstractGangTask("G1 collection"),
4841       _g1h(g1h),
4842       _queues(task_queues),
4843       _terminator(0, _queues),
4844       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4845   {}
4846 
4847   RefToScanQueueSet* queues() { return _queues; }
4848 
4849   RefToScanQueue *work_queue(int i) {
4850     return queues()->queue(i);
4851   }
4852 
4853   ParallelTaskTerminator* terminator() { return &_terminator; }
4854 
4855   virtual void set_for_termination(int active_workers) {
4856     // This task calls set_n_termination() in par_non_clean_card_iterate_work()
4857     // in the young space (_par_seq_tasks) in the G1 heap
4858     // for SequentialSubTasksDone.
4859     // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
4860     // both of which need setting by set_n_termination().
4861     _g1h->SharedHeap::set_n_termination(active_workers);
4862     _g1h->set_n_termination(active_workers);
4863     terminator()->reset_for_reuse(active_workers);
4864     _n_workers = active_workers;
4865   }
4866 
4867   void work(uint worker_id) {
4868     if (worker_id >= _n_workers) return;  // no work needed this round
4869 
4870     double start_time_ms = os::elapsedTime() * 1000.0;
4871     _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
4872 
4873     {
4874       ResourceMark rm;
4875       HandleMark   hm;
4876 
4877       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
4878 
4879       G1ParScanThreadState            pss(_g1h, worker_id);
4880       G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, rp);
4881       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4882       G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, rp);
4883 
4884       pss.set_evac_closure(&scan_evac_cl);
4885       pss.set_evac_failure_closure(&evac_failure_cl);
4886       pss.set_partial_scan_closure(&partial_scan_cl);
4887 
4888       G1ParScanExtRootClosure        only_scan_root_cl(_g1h, &pss, rp);
4889       G1ParScanMetadataClosure       only_scan_metadata_cl(_g1h, &pss, rp);
4890 
4891       G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
4892       G1ParScanAndMarkMetadataClosure scan_mark_metadata_cl(_g1h, &pss, rp);
4893 
4894       bool only_young                 = _g1h->g1_policy()->gcs_are_young();
4895       G1KlassScanClosure              scan_mark_klasses_cl_s(&scan_mark_metadata_cl, false);
4896       G1KlassScanClosure              only_scan_klasses_cl_s(&only_scan_metadata_cl, only_young);
4897 
4898       OopClosure*                    scan_root_cl = &only_scan_root_cl;
4899       G1KlassScanClosure*            scan_klasses_cl = &only_scan_klasses_cl_s;
4900 
4901       if (_g1h->g1_policy()->during_initial_mark_pause()) {
4902         // We also need to mark copied objects.
4903         scan_root_cl = &scan_mark_root_cl;
4904         scan_klasses_cl = &scan_mark_klasses_cl_s;
4905       }
4906 
4907       G1ParPushHeapRSClosure          push_heap_rs_cl(_g1h, &pss);
4908 
4909       int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
4910 
4911       pss.start_strong_roots();
4912       _g1h->g1_process_strong_roots(/* is scavenging */ true,
4913                                     SharedHeap::ScanningOption(so),
4914                                     scan_root_cl,
4915                                     &push_heap_rs_cl,
4916                                     scan_klasses_cl,
4917                                     worker_id);
4918       pss.end_strong_roots();
4919 
4920       {
4921         double start = os::elapsedTime();
4922         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4923         evac.do_void();
4924         double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4925         double term_ms = pss.term_time()*1000.0;
4926         _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
4927         _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
4928       }
4929       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4930       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4931 
4932       if (ParallelGCVerbose) {
4933         MutexLocker x(stats_lock());
4934         pss.print_termination_stats(worker_id);
4935       }
4936 
4937       assert(pss.refs()->is_empty(), "should be empty");
4938 
4939       // Close the inner scope so that the ResourceMark and HandleMark
4940       // destructors are executed here and are included as part of the
4941       // "GC Worker Time".
4942     }
4943 
4944     double end_time_ms = os::elapsedTime() * 1000.0;
4945     _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
4946   }
4947 };
4948 
4949 // *** Common G1 Evacuation Stuff
4950 
4951 // Closures that support the filtering of CodeBlobs scanned during
4952 // external root scanning.
4953 
4954 // Closure applied to reference fields in code blobs (specifically nmethods)
4955 // to determine whether an nmethod contains references that point into
4956 // the collection set. Used as a predicate when walking code roots so
4957 // that only nmethods that point into the collection set are added to the
4958 // 'marked' list.
4959 
4960 class G1FilteredCodeBlobToOopClosure : public CodeBlobToOopClosure {
4961 
4962   class G1PointsIntoCSOopClosure : public OopClosure {
4963     G1CollectedHeap* _g1;
4964     bool _points_into_cs;
4965   public:
4966     G1PointsIntoCSOopClosure(G1CollectedHeap* g1) :
4967       _g1(g1), _points_into_cs(false) { }
4968 
4969     bool points_into_cs() const { return _points_into_cs; }
4970 
4971     template <class T>
4972     void do_oop_nv(T* p) {
4973       if (!_points_into_cs) {
4974         T heap_oop = oopDesc::load_heap_oop(p);
4975         if (!oopDesc::is_null(heap_oop) &&
4976             _g1->in_cset_fast_test(oopDesc::decode_heap_oop_not_null(heap_oop))) {
4977           _points_into_cs = true;
4978         }
4979       }
4980     }
4981 
4982     virtual void do_oop(oop* p)        { do_oop_nv(p); }
4983     virtual void do_oop(narrowOop* p)  { do_oop_nv(p); }
4984   };
4985 
4986   G1CollectedHeap* _g1;
4987 
4988 public:
4989   G1FilteredCodeBlobToOopClosure(G1CollectedHeap* g1, OopClosure* cl) :
4990     CodeBlobToOopClosure(cl, true), _g1(g1) { }
4991 
4992   virtual void do_code_blob(CodeBlob* cb) {
4993     nmethod* nm = cb->as_nmethod_or_null();
4994     if (nm != NULL && !(nm->test_oops_do_mark())) {
4995       G1PointsIntoCSOopClosure predicate_cl(_g1);
4996       nm->oops_do(&predicate_cl);
4997 
4998       if (predicate_cl.points_into_cs()) {
4999         // At least one of the reference fields or the oop relocations
5000         // in the nmethod points into the collection set. We have to
5001         // 'mark' this nmethod.
5002         // Note: Revisit the following if CodeBlobToOopClosure::do_code_blob()
5003         // or MarkingCodeBlobClosure::do_code_blob() change.
5004         if (!nm->test_set_oops_do_mark()) {
5005           do_newly_marked_nmethod(nm);
5006         }
5007       }
5008     }
5009   }
5010 };
5011 
5012 // This method is run in a GC worker.
5013 
5014 void
5015 G1CollectedHeap::
5016 g1_process_strong_roots(bool is_scavenging,
5017                         ScanningOption so,
5018                         OopClosure* scan_non_heap_roots,
5019                         OopsInHeapRegionClosure* scan_rs,
5020                         G1KlassScanClosure* scan_klasses,
5021                         int worker_i) {
5022 
5023   // First scan the strong roots
5024   double ext_roots_start = os::elapsedTime();
5025   double closure_app_time_sec = 0.0;
5026 
5027   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
5028 
5029   // Walk the code cache w/o buffering, because StarTask cannot handle
5030   // unaligned oop locations.
5031   G1FilteredCodeBlobToOopClosure eager_scan_code_roots(this, scan_non_heap_roots);
5032 
5033   process_strong_roots(false, // no scoping; this is parallel code
5034                        is_scavenging, so,
5035                        &buf_scan_non_heap_roots,
5036                        &eager_scan_code_roots,
5037                        scan_klasses
5038                        );
5039 
5040   // Now the CM ref_processor roots.
5041   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
5042     // We need to treat the discovered reference lists of the
5043     // concurrent mark ref processor as roots and keep entries
5044     // (which are added by the marking threads) on them live
5045     // until they can be processed at the end of marking.
5046     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
5047   }
5048 
5049   // Finish up any enqueued closure apps (attributed as object copy time).
5050   buf_scan_non_heap_roots.done();
5051 
5052   double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds();
5053 
5054   g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
5055 
5056   double ext_root_time_ms =
5057     ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
5058 
5059   g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
5060 
5061   // During conc marking we have to filter the per-thread SATB buffers
5062   // to make sure we remove any oops into the CSet (which will show up
5063   // as implicitly live).
5064   double satb_filtering_ms = 0.0;
5065   if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
5066     if (mark_in_progress()) {
5067       double satb_filter_start = os::elapsedTime();
5068 
5069       JavaThread::satb_mark_queue_set().filter_thread_buffers();
5070 
5071       satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
5072     }
5073   }
5074   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
5075 
5076   // Now scan the complement of the collection set.
5077   if (scan_rs != NULL) {
5078     g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
5079   }
5080   _process_strong_tasks->all_tasks_completed();
5081 }
5082 
5083 void
5084 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) {
5085   CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
5086   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
5087 }
5088 
5089 // Weak Reference Processing support
5090 
5091 // An always "is_alive" closure that is used to preserve referents.
5092 // If the object is non-null then it's alive.  Used in the preservation
5093 // of referent objects that are pointed to by reference objects
5094 // discovered by the CM ref processor.
5095 class G1AlwaysAliveClosure: public BoolObjectClosure {
5096   G1CollectedHeap* _g1;
5097 public:
5098   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5099   bool do_object_b(oop p) {
5100     if (p != NULL) {
5101       return true;
5102     }
5103     return false;
5104   }
5105 };
5106 
5107 bool G1STWIsAliveClosure::do_object_b(oop p) {
5108   // An object is reachable if it is outside the collection set,
5109   // or is inside and copied.
5110   return !_g1->obj_in_cs(p) || p->is_forwarded();
5111 }
5112 
5113 // Non Copying Keep Alive closure
5114 class G1KeepAliveClosure: public OopClosure {
5115   G1CollectedHeap* _g1;
5116 public:
5117   G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5118   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
5119   void do_oop(      oop* p) {
5120     oop obj = *p;
5121 
5122     if (_g1->obj_in_cs(obj)) {
5123       assert( obj->is_forwarded(), "invariant" );
5124       *p = obj->forwardee();
5125     }
5126   }
5127 };
5128 
5129 // Copying Keep Alive closure - can be called from both
5130 // serial and parallel code as long as different worker
5131 // threads utilize different G1ParScanThreadState instances
5132 // and different queues.
5133 
5134 class G1CopyingKeepAliveClosure: public OopClosure {
5135   G1CollectedHeap*         _g1h;
5136   OopClosure*              _copy_non_heap_obj_cl;
5137   OopsInHeapRegionClosure* _copy_metadata_obj_cl;
5138   G1ParScanThreadState*    _par_scan_state;
5139 
5140 public:
5141   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5142                             OopClosure* non_heap_obj_cl,
5143                             OopsInHeapRegionClosure* metadata_obj_cl,
5144                             G1ParScanThreadState* pss):
5145     _g1h(g1h),
5146     _copy_non_heap_obj_cl(non_heap_obj_cl),
5147     _copy_metadata_obj_cl(metadata_obj_cl),
5148     _par_scan_state(pss)
5149   {}
5150 
5151   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
5152   virtual void do_oop(      oop* p) { do_oop_work(p); }
5153 
5154   template <class T> void do_oop_work(T* p) {
5155     oop obj = oopDesc::load_decode_heap_oop(p);
5156 
5157     if (_g1h->obj_in_cs(obj)) {
5158       // If the referent object has been forwarded (either copied
5159       // to a new location or to itself in the event of an
5160       // evacuation failure) then we need to update the reference
5161       // field and, if both reference and referent are in the G1
5162       // heap, update the RSet for the referent.
5163       //
5164       // If the referent has not been forwarded then we have to keep
5165       // it alive by policy. Therefore we have copy the referent.
5166       //
5167       // If the reference field is in the G1 heap then we can push
5168       // on the PSS queue. When the queue is drained (after each
5169       // phase of reference processing) the object and it's followers
5170       // will be copied, the reference field set to point to the
5171       // new location, and the RSet updated. Otherwise we need to
5172       // use the the non-heap or metadata closures directly to copy
5173       // the refernt object and update the pointer, while avoiding
5174       // updating the RSet.
5175 
5176       if (_g1h->is_in_g1_reserved(p)) {
5177         _par_scan_state->push_on_queue(p);
5178       } else {
5179         assert(!ClassLoaderDataGraph::contains((address)p),
5180                err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) "
5181                               PTR_FORMAT, p));
5182           _copy_non_heap_obj_cl->do_oop(p);
5183         }
5184       }
5185     }
5186 };
5187 
5188 // Serial drain queue closure. Called as the 'complete_gc'
5189 // closure for each discovered list in some of the
5190 // reference processing phases.
5191 
5192 class G1STWDrainQueueClosure: public VoidClosure {
5193 protected:
5194   G1CollectedHeap* _g1h;
5195   G1ParScanThreadState* _par_scan_state;
5196 
5197   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
5198 
5199 public:
5200   G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
5201     _g1h(g1h),
5202     _par_scan_state(pss)
5203   { }
5204 
5205   void do_void() {
5206     G1ParScanThreadState* const pss = par_scan_state();
5207     pss->trim_queue();
5208   }
5209 };
5210 
5211 // Parallel Reference Processing closures
5212 
5213 // Implementation of AbstractRefProcTaskExecutor for parallel reference
5214 // processing during G1 evacuation pauses.
5215 
5216 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
5217 private:
5218   G1CollectedHeap*   _g1h;
5219   RefToScanQueueSet* _queues;
5220   FlexibleWorkGang*  _workers;
5221   int                _active_workers;
5222 
5223 public:
5224   G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
5225                         FlexibleWorkGang* workers,
5226                         RefToScanQueueSet *task_queues,
5227                         int n_workers) :
5228     _g1h(g1h),
5229     _queues(task_queues),
5230     _workers(workers),
5231     _active_workers(n_workers)
5232   {
5233     assert(n_workers > 0, "shouldn't call this otherwise");
5234   }
5235 
5236   // Executes the given task using concurrent marking worker threads.
5237   virtual void execute(ProcessTask& task);
5238   virtual void execute(EnqueueTask& task);
5239 };
5240 
5241 // Gang task for possibly parallel reference processing
5242 
5243 class G1STWRefProcTaskProxy: public AbstractGangTask {
5244   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5245   ProcessTask&     _proc_task;
5246   G1CollectedHeap* _g1h;
5247   RefToScanQueueSet *_task_queues;
5248   ParallelTaskTerminator* _terminator;
5249 
5250 public:
5251   G1STWRefProcTaskProxy(ProcessTask& proc_task,
5252                      G1CollectedHeap* g1h,
5253                      RefToScanQueueSet *task_queues,
5254                      ParallelTaskTerminator* terminator) :
5255     AbstractGangTask("Process reference objects in parallel"),
5256     _proc_task(proc_task),
5257     _g1h(g1h),
5258     _task_queues(task_queues),
5259     _terminator(terminator)
5260   {}
5261 
5262   virtual void work(uint worker_id) {
5263     // The reference processing task executed by a single worker.
5264     ResourceMark rm;
5265     HandleMark   hm;
5266 
5267     G1STWIsAliveClosure is_alive(_g1h);
5268 
5269     G1ParScanThreadState pss(_g1h, worker_id);
5270 
5271     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
5272     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5273     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
5274 
5275     pss.set_evac_closure(&scan_evac_cl);
5276     pss.set_evac_failure_closure(&evac_failure_cl);
5277     pss.set_partial_scan_closure(&partial_scan_cl);
5278 
5279     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5280     G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
5281 
5282     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5283     G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5284 
5285     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5286     OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5287 
5288     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5289       // We also need to mark copied objects.
5290       copy_non_heap_cl = &copy_mark_non_heap_cl;
5291       copy_metadata_cl = &copy_mark_metadata_cl;
5292     }
5293 
5294     // Keep alive closure.
5295     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
5296 
5297     // Complete GC closure
5298     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
5299 
5300     // Call the reference processing task's work routine.
5301     _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
5302 
5303     // Note we cannot assert that the refs array is empty here as not all
5304     // of the processing tasks (specifically phase2 - pp2_work) execute
5305     // the complete_gc closure (which ordinarily would drain the queue) so
5306     // the queue may not be empty.
5307   }
5308 };
5309 
5310 // Driver routine for parallel reference processing.
5311 // Creates an instance of the ref processing gang
5312 // task and has the worker threads execute it.
5313 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
5314   assert(_workers != NULL, "Need parallel worker threads.");
5315 
5316   ParallelTaskTerminator terminator(_active_workers, _queues);
5317   G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
5318 
5319   _g1h->set_par_threads(_active_workers);
5320   _workers->run_task(&proc_task_proxy);
5321   _g1h->set_par_threads(0);
5322 }
5323 
5324 // Gang task for parallel reference enqueueing.
5325 
5326 class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
5327   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5328   EnqueueTask& _enq_task;
5329 
5330 public:
5331   G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
5332     AbstractGangTask("Enqueue reference objects in parallel"),
5333     _enq_task(enq_task)
5334   { }
5335 
5336   virtual void work(uint worker_id) {
5337     _enq_task.work(worker_id);
5338   }
5339 };
5340 
5341 // Driver routine for parallel reference enqueing.
5342 // Creates an instance of the ref enqueueing gang
5343 // task and has the worker threads execute it.
5344 
5345 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
5346   assert(_workers != NULL, "Need parallel worker threads.");
5347 
5348   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
5349 
5350   _g1h->set_par_threads(_active_workers);
5351   _workers->run_task(&enq_task_proxy);
5352   _g1h->set_par_threads(0);
5353 }
5354 
5355 // End of weak reference support closures
5356 
5357 // Abstract task used to preserve (i.e. copy) any referent objects
5358 // that are in the collection set and are pointed to by reference
5359 // objects discovered by the CM ref processor.
5360 
5361 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
5362 protected:
5363   G1CollectedHeap* _g1h;
5364   RefToScanQueueSet      *_queues;
5365   ParallelTaskTerminator _terminator;
5366   uint _n_workers;
5367 
5368 public:
5369   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
5370     AbstractGangTask("ParPreserveCMReferents"),
5371     _g1h(g1h),
5372     _queues(task_queues),
5373     _terminator(workers, _queues),
5374     _n_workers(workers)
5375   { }
5376 
5377   void work(uint worker_id) {
5378     ResourceMark rm;
5379     HandleMark   hm;
5380 
5381     G1ParScanThreadState            pss(_g1h, worker_id);
5382     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
5383     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5384     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
5385 
5386     pss.set_evac_closure(&scan_evac_cl);
5387     pss.set_evac_failure_closure(&evac_failure_cl);
5388     pss.set_partial_scan_closure(&partial_scan_cl);
5389 
5390     assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5391 
5392 
5393     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5394     G1ParScanMetadataClosure       only_copy_metadata_cl(_g1h, &pss, NULL);
5395 
5396     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5397     G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL);
5398 
5399     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5400     OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5401 
5402     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5403       // We also need to mark copied objects.
5404       copy_non_heap_cl = &copy_mark_non_heap_cl;
5405       copy_metadata_cl = &copy_mark_metadata_cl;
5406     }
5407 
5408     // Is alive closure
5409     G1AlwaysAliveClosure always_alive(_g1h);
5410 
5411     // Copying keep alive closure. Applied to referent objects that need
5412     // to be copied.
5413     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss);
5414 
5415     ReferenceProcessor* rp = _g1h->ref_processor_cm();
5416 
5417     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5418     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5419 
5420     // limit is set using max_num_q() - which was set using ParallelGCThreads.
5421     // So this must be true - but assert just in case someone decides to
5422     // change the worker ids.
5423     assert(0 <= worker_id && worker_id < limit, "sanity");
5424     assert(!rp->discovery_is_atomic(), "check this code");
5425 
5426     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
5427     for (uint idx = worker_id; idx < limit; idx += stride) {
5428       DiscoveredList& ref_list = rp->discovered_refs()[idx];
5429 
5430       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
5431       while (iter.has_next()) {
5432         // Since discovery is not atomic for the CM ref processor, we
5433         // can see some null referent objects.
5434         iter.load_ptrs(DEBUG_ONLY(true));
5435         oop ref = iter.obj();
5436 
5437         // This will filter nulls.
5438         if (iter.is_referent_alive()) {
5439           iter.make_referent_alive();
5440         }
5441         iter.move_to_next();
5442       }
5443     }
5444 
5445     // Drain the queue - which may cause stealing
5446     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
5447     drain_queue.do_void();
5448     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
5449     assert(pss.refs()->is_empty(), "should be");
5450   }
5451 };
5452 
5453 // Weak Reference processing during an evacuation pause (part 1).
5454 void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
5455   double ref_proc_start = os::elapsedTime();
5456 
5457   ReferenceProcessor* rp = _ref_processor_stw;
5458   assert(rp->discovery_enabled(), "should have been enabled");
5459 
5460   // Any reference objects, in the collection set, that were 'discovered'
5461   // by the CM ref processor should have already been copied (either by
5462   // applying the external root copy closure to the discovered lists, or
5463   // by following an RSet entry).
5464   //
5465   // But some of the referents, that are in the collection set, that these
5466   // reference objects point to may not have been copied: the STW ref
5467   // processor would have seen that the reference object had already
5468   // been 'discovered' and would have skipped discovering the reference,
5469   // but would not have treated the reference object as a regular oop.
5470   // As a reult the copy closure would not have been applied to the
5471   // referent object.
5472   //
5473   // We need to explicitly copy these referent objects - the references
5474   // will be processed at the end of remarking.
5475   //
5476   // We also need to do this copying before we process the reference
5477   // objects discovered by the STW ref processor in case one of these
5478   // referents points to another object which is also referenced by an
5479   // object discovered by the STW ref processor.
5480 
5481   assert(!G1CollectedHeap::use_parallel_gc_threads() ||
5482            no_of_gc_workers == workers()->active_workers(),
5483            "Need to reset active GC workers");
5484 
5485   set_par_threads(no_of_gc_workers);
5486   G1ParPreserveCMReferentsTask keep_cm_referents(this,
5487                                                  no_of_gc_workers,
5488                                                  _task_queues);
5489 
5490   if (G1CollectedHeap::use_parallel_gc_threads()) {
5491     workers()->run_task(&keep_cm_referents);
5492   } else {
5493     keep_cm_referents.work(0);
5494   }
5495 
5496   set_par_threads(0);
5497 
5498   // Closure to test whether a referent is alive.
5499   G1STWIsAliveClosure is_alive(this);
5500 
5501   // Even when parallel reference processing is enabled, the processing
5502   // of JNI refs is serial and performed serially by the current thread
5503   // rather than by a worker. The following PSS will be used for processing
5504   // JNI refs.
5505 
5506   // Use only a single queue for this PSS.
5507   G1ParScanThreadState pss(this, 0);
5508 
5509   // We do not embed a reference processor in the copying/scanning
5510   // closures while we're actually processing the discovered
5511   // reference objects.
5512   G1ParScanHeapEvacClosure        scan_evac_cl(this, &pss, NULL);
5513   G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5514   G1ParScanPartialArrayClosure    partial_scan_cl(this, &pss, NULL);
5515 
5516   pss.set_evac_closure(&scan_evac_cl);
5517   pss.set_evac_failure_closure(&evac_failure_cl);
5518   pss.set_partial_scan_closure(&partial_scan_cl);
5519 
5520   assert(pss.refs()->is_empty(), "pre-condition");
5521 
5522   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
5523   G1ParScanMetadataClosure       only_copy_metadata_cl(this, &pss, NULL);
5524 
5525   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5526   G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL);
5527 
5528   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5529   OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5530 
5531   if (_g1h->g1_policy()->during_initial_mark_pause()) {
5532     // We also need to mark copied objects.
5533     copy_non_heap_cl = &copy_mark_non_heap_cl;
5534     copy_metadata_cl = &copy_mark_metadata_cl;
5535   }
5536 
5537   // Keep alive closure.
5538   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_metadata_cl, &pss);
5539 
5540   // Serial Complete GC closure
5541   G1STWDrainQueueClosure drain_queue(this, &pss);
5542 
5543   // Setup the soft refs policy...
5544   rp->setup_policy(false);
5545 
5546   if (!rp->processing_is_mt()) {
5547     // Serial reference processing...
5548     rp->process_discovered_references(&is_alive,
5549                                       &keep_alive,
5550                                       &drain_queue,
5551                                       NULL);
5552   } else {
5553     // Parallel reference processing
5554     assert(rp->num_q() == no_of_gc_workers, "sanity");
5555     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5556 
5557     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5558     rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);
5559   }
5560 
5561   // We have completed copying any necessary live referent objects
5562   // (that were not copied during the actual pause) so we can
5563   // retire any active alloc buffers
5564   pss.retire_alloc_buffers();
5565   assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5566 
5567   double ref_proc_time = os::elapsedTime() - ref_proc_start;
5568   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5569 }
5570 
5571 // Weak Reference processing during an evacuation pause (part 2).
5572 void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
5573   double ref_enq_start = os::elapsedTime();
5574 
5575   ReferenceProcessor* rp = _ref_processor_stw;
5576   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5577 
5578   // Now enqueue any remaining on the discovered lists on to
5579   // the pending list.
5580   if (!rp->processing_is_mt()) {
5581     // Serial reference processing...
5582     rp->enqueue_discovered_references();
5583   } else {
5584     // Parallel reference enqueuing
5585 
5586     assert(no_of_gc_workers == workers()->active_workers(),
5587            "Need to reset active workers");
5588     assert(rp->num_q() == no_of_gc_workers, "sanity");
5589     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5590 
5591     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5592     rp->enqueue_discovered_references(&par_task_executor);
5593   }
5594 
5595   rp->verify_no_references_recorded();
5596   assert(!rp->discovery_enabled(), "should have been disabled");
5597 
5598   // FIXME
5599   // CM's reference processing also cleans up the string and symbol tables.
5600   // Should we do that here also? We could, but it is a serial operation
5601   // and could signicantly increase the pause time.
5602 
5603   double ref_enq_time = os::elapsedTime() - ref_enq_start;
5604   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5605 }
5606 
5607 void G1CollectedHeap::evacuate_collection_set() {
5608   _expand_heap_after_alloc_failure = true;
5609   set_evacuation_failed(false);
5610 
5611   // Should G1EvacuationFailureALot be in effect for this GC?
5612   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5613 
5614   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5615 
5616   // Disable the hot card cache.
5617   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5618   hot_card_cache->reset_hot_cache_claimed_index();
5619   hot_card_cache->set_use_cache(false);
5620 
5621   uint n_workers;
5622   if (G1CollectedHeap::use_parallel_gc_threads()) {
5623     n_workers =
5624       AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5625                                      workers()->active_workers(),
5626                                      Threads::number_of_non_daemon_threads());
5627     assert(UseDynamicNumberOfGCThreads ||
5628            n_workers == workers()->total_workers(),
5629            "If not dynamic should be using all the  workers");
5630     workers()->set_active_workers(n_workers);
5631     set_par_threads(n_workers);
5632   } else {
5633     assert(n_par_threads() == 0,
5634            "Should be the original non-parallel value");
5635     n_workers = 1;
5636   }
5637 
5638   G1ParTask g1_par_task(this, _task_queues);
5639 
5640   init_for_evac_failure(NULL);
5641 
5642   rem_set()->prepare_for_younger_refs_iterate(true);
5643 
5644   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5645   double start_par_time_sec = os::elapsedTime();
5646   double end_par_time_sec;
5647 
5648   {
5649     StrongRootsScope srs(this);
5650 
5651     if (G1CollectedHeap::use_parallel_gc_threads()) {
5652       // The individual threads will set their evac-failure closures.
5653       if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5654       // These tasks use ShareHeap::_process_strong_tasks
5655       assert(UseDynamicNumberOfGCThreads ||
5656              workers()->active_workers() == workers()->total_workers(),
5657              "If not dynamic should be using all the  workers");
5658       workers()->run_task(&g1_par_task);
5659     } else {
5660       g1_par_task.set_for_termination(n_workers);
5661       g1_par_task.work(0);
5662     }
5663     end_par_time_sec = os::elapsedTime();
5664 
5665     // Closing the inner scope will execute the destructor
5666     // for the StrongRootsScope object. We record the current
5667     // elapsed time before closing the scope so that time
5668     // taken for the SRS destructor is NOT included in the
5669     // reported parallel time.
5670   }
5671 
5672   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5673   g1_policy()->phase_times()->record_par_time(par_time_ms);
5674 
5675   double code_root_fixup_time_ms =
5676         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5677   g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
5678 
5679   set_par_threads(0);
5680 
5681   // Process any discovered reference objects - we have
5682   // to do this _before_ we retire the GC alloc regions
5683   // as we may have to copy some 'reachable' referent
5684   // objects (and their reachable sub-graphs) that were
5685   // not copied during the pause.
5686   process_discovered_references(n_workers);
5687 
5688   // Weak root processing.
5689   // Note: when JSR 292 is enabled and code blobs can contain
5690   // non-perm oops then we will need to process the code blobs
5691   // here too.
5692   {
5693     G1STWIsAliveClosure is_alive(this);
5694     G1KeepAliveClosure keep_alive(this);
5695     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
5696   }
5697 
5698   release_gc_alloc_regions(n_workers);
5699   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5700 
5701   // Reset and re-enable the hot card cache.
5702   // Note the counts for the cards in the regions in the
5703   // collection set are reset when the collection set is freed.
5704   hot_card_cache->reset_hot_cache();
5705   hot_card_cache->set_use_cache(true);
5706 
5707   finalize_for_evac_failure();
5708 
5709   if (evacuation_failed()) {
5710     remove_self_forwarding_pointers();
5711 
5712     // Reset the G1EvacuationFailureALot counters and flags
5713     // Note: the values are reset only when an actual
5714     // evacuation failure occurs.
5715     NOT_PRODUCT(reset_evacuation_should_fail();)
5716   }
5717 
5718   // Enqueue any remaining references remaining on the STW
5719   // reference processor's discovered lists. We need to do
5720   // this after the card table is cleaned (and verified) as
5721   // the act of enqueuing entries on to the pending list
5722   // will log these updates (and dirty their associated
5723   // cards). We need these updates logged to update any
5724   // RSets.
5725   enqueue_discovered_references(n_workers);
5726 
5727   if (G1DeferredRSUpdate) {
5728     RedirtyLoggedCardTableEntryFastClosure redirty;
5729     dirty_card_queue_set().set_closure(&redirty);
5730     dirty_card_queue_set().apply_closure_to_all_completed_buffers();
5731 
5732     DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5733     dcq.merge_bufferlists(&dirty_card_queue_set());
5734     assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5735   }
5736   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
5737 }
5738 
5739 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr,
5740                                      size_t* pre_used,
5741                                      FreeRegionList* free_list,
5742                                      OldRegionSet* old_proxy_set,
5743                                      HumongousRegionSet* humongous_proxy_set,
5744                                      HRRSCleanupTask* hrrs_cleanup_task,
5745                                      bool par) {
5746   if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
5747     if (hr->isHumongous()) {
5748       assert(hr->startsHumongous(), "we should only see starts humongous");
5749       free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
5750     } else {
5751       _old_set.remove_with_proxy(hr, old_proxy_set);
5752       free_region(hr, pre_used, free_list, par);
5753     }
5754   } else {
5755     hr->rem_set()->do_cleanup_work(hrrs_cleanup_task);
5756   }
5757 }
5758 
5759 void G1CollectedHeap::free_region(HeapRegion* hr,
5760                                   size_t* pre_used,
5761                                   FreeRegionList* free_list,
5762                                   bool par) {
5763   assert(!hr->isHumongous(), "this is only for non-humongous regions");
5764   assert(!hr->is_empty(), "the region should not be empty");
5765   assert(free_list != NULL, "pre-condition");
5766 
5767   // Clear the card counts for this region.
5768   // Note: we only need to do this if the region is not young
5769   // (since we don't refine cards in young regions).
5770   if (!hr->is_young()) {
5771     _cg1r->hot_card_cache()->reset_card_counts(hr);
5772   }
5773   *pre_used += hr->used();
5774   hr->hr_clear(par, true /* clear_space */);
5775   free_list->add_as_head(hr);
5776 }
5777 
5778 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
5779                                      size_t* pre_used,
5780                                      FreeRegionList* free_list,
5781                                      HumongousRegionSet* humongous_proxy_set,
5782                                      bool par) {
5783   assert(hr->startsHumongous(), "this is only for starts humongous regions");
5784   assert(free_list != NULL, "pre-condition");
5785   assert(humongous_proxy_set != NULL, "pre-condition");
5786 
5787   size_t hr_used = hr->used();
5788   size_t hr_capacity = hr->capacity();
5789   size_t hr_pre_used = 0;
5790   _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
5791   // We need to read this before we make the region non-humongous,
5792   // otherwise the information will be gone.
5793   uint last_index = hr->last_hc_index();
5794   hr->set_notHumongous();
5795   free_region(hr, &hr_pre_used, free_list, par);
5796 
5797   uint i = hr->hrs_index() + 1;
5798   while (i < last_index) {
5799     HeapRegion* curr_hr = region_at(i);
5800     assert(curr_hr->continuesHumongous(), "invariant");
5801     curr_hr->set_notHumongous();
5802     free_region(curr_hr, &hr_pre_used, free_list, par);
5803     i += 1;
5804   }
5805   assert(hr_pre_used == hr_used,
5806          err_msg("hr_pre_used: "SIZE_FORMAT" and hr_used: "SIZE_FORMAT" "
5807                  "should be the same", hr_pre_used, hr_used));
5808   *pre_used += hr_pre_used;
5809 }
5810 
5811 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used,
5812                                        FreeRegionList* free_list,
5813                                        OldRegionSet* old_proxy_set,
5814                                        HumongousRegionSet* humongous_proxy_set,
5815                                        bool par) {
5816   if (pre_used > 0) {
5817     Mutex* lock = (par) ? ParGCRareEvent_lock : NULL;
5818     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
5819     assert(_summary_bytes_used >= pre_used,
5820            err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" "
5821                    "should be >= pre_used: "SIZE_FORMAT,
5822                    _summary_bytes_used, pre_used));
5823     _summary_bytes_used -= pre_used;
5824   }
5825   if (free_list != NULL && !free_list->is_empty()) {
5826     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
5827     _free_list.add_as_head(free_list);
5828   }
5829   if (old_proxy_set != NULL && !old_proxy_set->is_empty()) {
5830     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5831     _old_set.update_from_proxy(old_proxy_set);
5832   }
5833   if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) {
5834     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5835     _humongous_set.update_from_proxy(humongous_proxy_set);
5836   }
5837 }
5838 
5839 class G1ParCleanupCTTask : public AbstractGangTask {
5840   CardTableModRefBS* _ct_bs;
5841   G1CollectedHeap* _g1h;
5842   HeapRegion* volatile _su_head;
5843 public:
5844   G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
5845                      G1CollectedHeap* g1h) :
5846     AbstractGangTask("G1 Par Cleanup CT Task"),
5847     _ct_bs(ct_bs), _g1h(g1h) { }
5848 
5849   void work(uint worker_id) {
5850     HeapRegion* r;
5851     while (r = _g1h->pop_dirty_cards_region()) {
5852       clear_cards(r);
5853     }
5854   }
5855 
5856   void clear_cards(HeapRegion* r) {
5857     // Cards of the survivors should have already been dirtied.
5858     if (!r->is_survivor()) {
5859       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
5860     }
5861   }
5862 };
5863 
5864 #ifndef PRODUCT
5865 class G1VerifyCardTableCleanup: public HeapRegionClosure {
5866   G1CollectedHeap* _g1h;
5867   CardTableModRefBS* _ct_bs;
5868 public:
5869   G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs)
5870     : _g1h(g1h), _ct_bs(ct_bs) { }
5871   virtual bool doHeapRegion(HeapRegion* r) {
5872     if (r->is_survivor()) {
5873       _g1h->verify_dirty_region(r);
5874     } else {
5875       _g1h->verify_not_dirty_region(r);
5876     }
5877     return false;
5878   }
5879 };
5880 
5881 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
5882   // All of the region should be clean.
5883   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
5884   MemRegion mr(hr->bottom(), hr->end());
5885   ct_bs->verify_not_dirty_region(mr);
5886 }
5887 
5888 void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
5889   // We cannot guarantee that [bottom(),end()] is dirty.  Threads
5890   // dirty allocated blocks as they allocate them. The thread that
5891   // retires each region and replaces it with a new one will do a
5892   // maximal allocation to fill in [pre_dummy_top(),end()] but will
5893   // not dirty that area (one less thing to have to do while holding
5894   // a lock). So we can only verify that [bottom(),pre_dummy_top()]
5895   // is dirty.
5896   CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
5897   MemRegion mr(hr->bottom(), hr->pre_dummy_top());
5898   ct_bs->verify_dirty_region(mr);
5899 }
5900 
5901 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
5902   CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
5903   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
5904     verify_dirty_region(hr);
5905   }
5906 }
5907 
5908 void G1CollectedHeap::verify_dirty_young_regions() {
5909   verify_dirty_young_list(_young_list->first_region());
5910 }
5911 #endif
5912 
5913 void G1CollectedHeap::cleanUpCardTable() {
5914   CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
5915   double start = os::elapsedTime();
5916 
5917   {
5918     // Iterate over the dirty cards region list.
5919     G1ParCleanupCTTask cleanup_task(ct_bs, this);
5920 
5921     if (G1CollectedHeap::use_parallel_gc_threads()) {
5922       set_par_threads();
5923       workers()->run_task(&cleanup_task);
5924       set_par_threads(0);
5925     } else {
5926       while (_dirty_cards_region_list) {
5927         HeapRegion* r = _dirty_cards_region_list;
5928         cleanup_task.clear_cards(r);
5929         _dirty_cards_region_list = r->get_next_dirty_cards_region();
5930         if (_dirty_cards_region_list == r) {
5931           // The last region.
5932           _dirty_cards_region_list = NULL;
5933         }
5934         r->set_next_dirty_cards_region(NULL);
5935       }
5936     }
5937 #ifndef PRODUCT
5938     if (G1VerifyCTCleanup || VerifyAfterGC) {
5939       G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
5940       heap_region_iterate(&cleanup_verifier);
5941     }
5942 #endif
5943   }
5944 
5945   double elapsed = os::elapsedTime() - start;
5946   g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
5947 }
5948 
5949 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
5950   size_t pre_used = 0;
5951   FreeRegionList local_free_list("Local List for CSet Freeing");
5952 
5953   double young_time_ms     = 0.0;
5954   double non_young_time_ms = 0.0;
5955 
5956   // Since the collection set is a superset of the the young list,
5957   // all we need to do to clear the young list is clear its
5958   // head and length, and unlink any young regions in the code below
5959   _young_list->clear();
5960 
5961   G1CollectorPolicy* policy = g1_policy();
5962 
5963   double start_sec = os::elapsedTime();
5964   bool non_young = true;
5965 
5966   HeapRegion* cur = cs_head;
5967   int age_bound = -1;
5968   size_t rs_lengths = 0;
5969 
5970   while (cur != NULL) {
5971     assert(!is_on_master_free_list(cur), "sanity");
5972     if (non_young) {
5973       if (cur->is_young()) {
5974         double end_sec = os::elapsedTime();
5975         double elapsed_ms = (end_sec - start_sec) * 1000.0;
5976         non_young_time_ms += elapsed_ms;
5977 
5978         start_sec = os::elapsedTime();
5979         non_young = false;
5980       }
5981     } else {
5982       if (!cur->is_young()) {
5983         double end_sec = os::elapsedTime();
5984         double elapsed_ms = (end_sec - start_sec) * 1000.0;
5985         young_time_ms += elapsed_ms;
5986 
5987         start_sec = os::elapsedTime();
5988         non_young = true;
5989       }
5990     }
5991 
5992     rs_lengths += cur->rem_set()->occupied();
5993 
5994     HeapRegion* next = cur->next_in_collection_set();
5995     assert(cur->in_collection_set(), "bad CS");
5996     cur->set_next_in_collection_set(NULL);
5997     cur->set_in_collection_set(false);
5998 
5999     if (cur->is_young()) {
6000       int index = cur->young_index_in_cset();
6001       assert(index != -1, "invariant");
6002       assert((uint) index < policy->young_cset_region_length(), "invariant");
6003       size_t words_survived = _surviving_young_words[index];
6004       cur->record_surv_words_in_group(words_survived);
6005 
6006       // At this point the we have 'popped' cur from the collection set
6007       // (linked via next_in_collection_set()) but it is still in the
6008       // young list (linked via next_young_region()). Clear the
6009       // _next_young_region field.
6010       cur->set_next_young_region(NULL);
6011     } else {
6012       int index = cur->young_index_in_cset();
6013       assert(index == -1, "invariant");
6014     }
6015 
6016     assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
6017             (!cur->is_young() && cur->young_index_in_cset() == -1),
6018             "invariant" );
6019 
6020     if (!cur->evacuation_failed()) {
6021       MemRegion used_mr = cur->used_region();
6022 
6023       // And the region is empty.
6024       assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
6025       free_region(cur, &pre_used, &local_free_list, false /* par */);
6026     } else {
6027       cur->uninstall_surv_rate_group();
6028       if (cur->is_young()) {
6029         cur->set_young_index_in_cset(-1);
6030       }
6031       cur->set_not_young();
6032       cur->set_evacuation_failed(false);
6033       // The region is now considered to be old.
6034       _old_set.add(cur);
6035     }
6036     cur = next;
6037   }
6038 
6039   policy->record_max_rs_lengths(rs_lengths);
6040   policy->cset_regions_freed();
6041 
6042   double end_sec = os::elapsedTime();
6043   double elapsed_ms = (end_sec - start_sec) * 1000.0;
6044 
6045   if (non_young) {
6046     non_young_time_ms += elapsed_ms;
6047   } else {
6048     young_time_ms += elapsed_ms;
6049   }
6050 
6051   update_sets_after_freeing_regions(pre_used, &local_free_list,
6052                                     NULL /* old_proxy_set */,
6053                                     NULL /* humongous_proxy_set */,
6054                                     false /* par */);
6055   policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
6056   policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
6057 }
6058 
6059 // This routine is similar to the above but does not record
6060 // any policy statistics or update free lists; we are abandoning
6061 // the current incremental collection set in preparation of a
6062 // full collection. After the full GC we will start to build up
6063 // the incremental collection set again.
6064 // This is only called when we're doing a full collection
6065 // and is immediately followed by the tearing down of the young list.
6066 
6067 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
6068   HeapRegion* cur = cs_head;
6069 
6070   while (cur != NULL) {
6071     HeapRegion* next = cur->next_in_collection_set();
6072     assert(cur->in_collection_set(), "bad CS");
6073     cur->set_next_in_collection_set(NULL);
6074     cur->set_in_collection_set(false);
6075     cur->set_young_index_in_cset(-1);
6076     cur = next;
6077   }
6078 }
6079 
6080 void G1CollectedHeap::set_free_regions_coming() {
6081   if (G1ConcRegionFreeingVerbose) {
6082     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
6083                            "setting free regions coming");
6084   }
6085 
6086   assert(!free_regions_coming(), "pre-condition");
6087   _free_regions_coming = true;
6088 }
6089 
6090 void G1CollectedHeap::reset_free_regions_coming() {
6091   assert(free_regions_coming(), "pre-condition");
6092 
6093   {
6094     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6095     _free_regions_coming = false;
6096     SecondaryFreeList_lock->notify_all();
6097   }
6098 
6099   if (G1ConcRegionFreeingVerbose) {
6100     gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : "
6101                            "reset free regions coming");
6102   }
6103 }
6104 
6105 void G1CollectedHeap::wait_while_free_regions_coming() {
6106   // Most of the time we won't have to wait, so let's do a quick test
6107   // first before we take the lock.
6108   if (!free_regions_coming()) {
6109     return;
6110   }
6111 
6112   if (G1ConcRegionFreeingVerbose) {
6113     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
6114                            "waiting for free regions");
6115   }
6116 
6117   {
6118     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6119     while (free_regions_coming()) {
6120       SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
6121     }
6122   }
6123 
6124   if (G1ConcRegionFreeingVerbose) {
6125     gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : "
6126                            "done waiting for free regions");
6127   }
6128 }
6129 
6130 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
6131   assert(heap_lock_held_for_gc(),
6132               "the heap lock should already be held by or for this thread");
6133   _young_list->push_region(hr);
6134 }
6135 
6136 class NoYoungRegionsClosure: public HeapRegionClosure {
6137 private:
6138   bool _success;
6139 public:
6140   NoYoungRegionsClosure() : _success(true) { }
6141   bool doHeapRegion(HeapRegion* r) {
6142     if (r->is_young()) {
6143       gclog_or_tty->print_cr("Region ["PTR_FORMAT", "PTR_FORMAT") tagged as young",
6144                              r->bottom(), r->end());
6145       _success = false;
6146     }
6147     return false;
6148   }
6149   bool success() { return _success; }
6150 };
6151 
6152 bool G1CollectedHeap::check_young_list_empty(bool check_heap, bool check_sample) {
6153   bool ret = _young_list->check_list_empty(check_sample);
6154 
6155   if (check_heap) {
6156     NoYoungRegionsClosure closure;
6157     heap_region_iterate(&closure);
6158     ret = ret && closure.success();
6159   }
6160 
6161   return ret;
6162 }
6163 
6164 class TearDownRegionSetsClosure : public HeapRegionClosure {
6165 private:
6166   OldRegionSet *_old_set;
6167 
6168 public:
6169   TearDownRegionSetsClosure(OldRegionSet* old_set) : _old_set(old_set) { }
6170 
6171   bool doHeapRegion(HeapRegion* r) {
6172     if (r->is_empty()) {
6173       // We ignore empty regions, we'll empty the free list afterwards
6174     } else if (r->is_young()) {
6175       // We ignore young regions, we'll empty the young list afterwards
6176     } else if (r->isHumongous()) {
6177       // We ignore humongous regions, we're not tearing down the
6178       // humongous region set
6179     } else {
6180       // The rest should be old
6181       _old_set->remove(r);
6182     }
6183     return false;
6184   }
6185 
6186   ~TearDownRegionSetsClosure() {
6187     assert(_old_set->is_empty(), "post-condition");
6188   }
6189 };
6190 
6191 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
6192   assert_at_safepoint(true /* should_be_vm_thread */);
6193 
6194   if (!free_list_only) {
6195     TearDownRegionSetsClosure cl(&_old_set);
6196     heap_region_iterate(&cl);
6197 
6198     // Need to do this after the heap iteration to be able to
6199     // recognize the young regions and ignore them during the iteration.
6200     _young_list->empty_list();
6201   }
6202   _free_list.remove_all();
6203 }
6204 
6205 class RebuildRegionSetsClosure : public HeapRegionClosure {
6206 private:
6207   bool            _free_list_only;
6208   OldRegionSet*   _old_set;
6209   FreeRegionList* _free_list;
6210   size_t          _total_used;
6211 
6212 public:
6213   RebuildRegionSetsClosure(bool free_list_only,
6214                            OldRegionSet* old_set, FreeRegionList* free_list) :
6215     _free_list_only(free_list_only),
6216     _old_set(old_set), _free_list(free_list), _total_used(0) {
6217     assert(_free_list->is_empty(), "pre-condition");
6218     if (!free_list_only) {
6219       assert(_old_set->is_empty(), "pre-condition");
6220     }
6221   }
6222 
6223   bool doHeapRegion(HeapRegion* r) {
6224     if (r->continuesHumongous()) {
6225       return false;
6226     }
6227 
6228     if (r->is_empty()) {
6229       // Add free regions to the free list
6230       _free_list->add_as_tail(r);
6231     } else if (!_free_list_only) {
6232       assert(!r->is_young(), "we should not come across young regions");
6233 
6234       if (r->isHumongous()) {
6235         // We ignore humongous regions, we left the humongous set unchanged
6236       } else {
6237         // The rest should be old, add them to the old set
6238         _old_set->add(r);
6239       }
6240       _total_used += r->used();
6241     }
6242 
6243     return false;
6244   }
6245 
6246   size_t total_used() {
6247     return _total_used;
6248   }
6249 };
6250 
6251 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6252   assert_at_safepoint(true /* should_be_vm_thread */);
6253 
6254   RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list);
6255   heap_region_iterate(&cl);
6256 
6257   if (!free_list_only) {
6258     _summary_bytes_used = cl.total_used();
6259   }
6260   assert(_summary_bytes_used == recalculate_used(),
6261          err_msg("inconsistent _summary_bytes_used, "
6262                  "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
6263                  _summary_bytes_used, recalculate_used()));
6264 }
6265 
6266 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6267   _refine_cte_cl->set_concurrent(concurrent);
6268 }
6269 
6270 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6271   HeapRegion* hr = heap_region_containing(p);
6272   if (hr == NULL) {
6273     return false;
6274   } else {
6275     return hr->is_in(p);
6276   }
6277 }
6278 
6279 // Methods for the mutator alloc region
6280 
6281 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
6282                                                       bool force) {
6283   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6284   assert(!force || g1_policy()->can_expand_young_list(),
6285          "if force is true we should be able to expand the young list");
6286   bool young_list_full = g1_policy()->is_young_list_full();
6287   if (force || !young_list_full) {
6288     HeapRegion* new_alloc_region = new_region(word_size,
6289                                               false /* do_expand */);
6290     if (new_alloc_region != NULL) {
6291       set_region_short_lived_locked(new_alloc_region);
6292       _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
6293       return new_alloc_region;
6294     }
6295   }
6296   return NULL;
6297 }
6298 
6299 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
6300                                                   size_t allocated_bytes) {
6301   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6302   assert(alloc_region->is_young(), "all mutator alloc regions should be young");
6303 
6304   g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
6305   _summary_bytes_used += allocated_bytes;
6306   _hr_printer.retire(alloc_region);
6307   // We update the eden sizes here, when the region is retired,
6308   // instead of when it's allocated, since this is the point that its
6309   // used space has been recored in _summary_bytes_used.
6310   g1mm()->update_eden_size();
6311 }
6312 
6313 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
6314                                                     bool force) {
6315   return _g1h->new_mutator_alloc_region(word_size, force);
6316 }
6317 
6318 void G1CollectedHeap::set_par_threads() {
6319   // Don't change the number of workers.  Use the value previously set
6320   // in the workgroup.
6321   assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
6322   uint n_workers = workers()->active_workers();
6323   assert(UseDynamicNumberOfGCThreads ||
6324            n_workers == workers()->total_workers(),
6325       "Otherwise should be using the total number of workers");
6326   if (n_workers == 0) {
6327     assert(false, "Should have been set in prior evacuation pause.");
6328     n_workers = ParallelGCThreads;
6329     workers()->set_active_workers(n_workers);
6330   }
6331   set_par_threads(n_workers);
6332 }
6333 
6334 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
6335                                        size_t allocated_bytes) {
6336   _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
6337 }
6338 
6339 // Methods for the GC alloc regions
6340 
6341 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6342                                                  uint count,
6343                                                  GCAllocPurpose ap) {
6344   assert(FreeList_lock->owned_by_self(), "pre-condition");
6345 
6346   if (count < g1_policy()->max_regions(ap)) {
6347     HeapRegion* new_alloc_region = new_region(word_size,
6348                                               true /* do_expand */);
6349     if (new_alloc_region != NULL) {
6350       // We really only need to do this for old regions given that we
6351       // should never scan survivors. But it doesn't hurt to do it
6352       // for survivors too.
6353       new_alloc_region->set_saved_mark();
6354       if (ap == GCAllocForSurvived) {
6355         new_alloc_region->set_survivor();
6356         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6357       } else {
6358         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
6359       }
6360       bool during_im = g1_policy()->during_initial_mark_pause();
6361       new_alloc_region->note_start_of_copying(during_im);
6362       return new_alloc_region;
6363     } else {
6364       g1_policy()->note_alloc_region_limit_reached(ap);
6365     }
6366   }
6367   return NULL;
6368 }
6369 
6370 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6371                                              size_t allocated_bytes,
6372                                              GCAllocPurpose ap) {
6373   bool during_im = g1_policy()->during_initial_mark_pause();
6374   alloc_region->note_end_of_copying(during_im);
6375   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6376   if (ap == GCAllocForSurvived) {
6377     young_list()->add_survivor_region(alloc_region);
6378   } else {
6379     _old_set.add(alloc_region);
6380   }
6381   _hr_printer.retire(alloc_region);
6382 }
6383 
6384 HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
6385                                                        bool force) {
6386   assert(!force, "not supported for GC alloc regions");
6387   return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
6388 }
6389 
6390 void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
6391                                           size_t allocated_bytes) {
6392   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
6393                                GCAllocForSurvived);
6394 }
6395 
6396 HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
6397                                                   bool force) {
6398   assert(!force, "not supported for GC alloc regions");
6399   return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
6400 }
6401 
6402 void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
6403                                      size_t allocated_bytes) {
6404   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
6405                                GCAllocForTenured);
6406 }
6407 // Heap region set verification
6408 
6409 class VerifyRegionListsClosure : public HeapRegionClosure {
6410 private:
6411   FreeRegionList*     _free_list;
6412   OldRegionSet*       _old_set;
6413   HumongousRegionSet* _humongous_set;
6414   uint                _region_count;
6415 
6416 public:
6417   VerifyRegionListsClosure(OldRegionSet* old_set,
6418                            HumongousRegionSet* humongous_set,
6419                            FreeRegionList* free_list) :
6420     _old_set(old_set), _humongous_set(humongous_set),
6421     _free_list(free_list), _region_count(0) { }
6422 
6423   uint region_count() { return _region_count; }
6424 
6425   bool doHeapRegion(HeapRegion* hr) {
6426     _region_count += 1;
6427 
6428     if (hr->continuesHumongous()) {
6429       return false;
6430     }
6431 
6432     if (hr->is_young()) {
6433       // TODO
6434     } else if (hr->startsHumongous()) {
6435       _humongous_set->verify_next_region(hr);
6436     } else if (hr->is_empty()) {
6437       _free_list->verify_next_region(hr);
6438     } else {
6439       _old_set->verify_next_region(hr);
6440     }
6441     return false;
6442   }
6443 };
6444 
6445 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
6446                                              HeapWord* bottom) {
6447   HeapWord* end = bottom + HeapRegion::GrainWords;
6448   MemRegion mr(bottom, end);
6449   assert(_g1_reserved.contains(mr), "invariant");
6450   // This might return NULL if the allocation fails
6451   return new HeapRegion(hrs_index, _bot_shared, mr);
6452 }
6453 
6454 void G1CollectedHeap::verify_region_sets() {
6455   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6456 
6457   // First, check the explicit lists.
6458   _free_list.verify();
6459   {
6460     // Given that a concurrent operation might be adding regions to
6461     // the secondary free list we have to take the lock before
6462     // verifying it.
6463     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6464     _secondary_free_list.verify();
6465   }
6466   _old_set.verify();
6467   _humongous_set.verify();
6468 
6469   // If a concurrent region freeing operation is in progress it will
6470   // be difficult to correctly attributed any free regions we come
6471   // across to the correct free list given that they might belong to
6472   // one of several (free_list, secondary_free_list, any local lists,
6473   // etc.). So, if that's the case we will skip the rest of the
6474   // verification operation. Alternatively, waiting for the concurrent
6475   // operation to complete will have a non-trivial effect on the GC's
6476   // operation (no concurrent operation will last longer than the
6477   // interval between two calls to verification) and it might hide
6478   // any issues that we would like to catch during testing.
6479   if (free_regions_coming()) {
6480     return;
6481   }
6482 
6483   // Make sure we append the secondary_free_list on the free_list so
6484   // that all free regions we will come across can be safely
6485   // attributed to the free_list.
6486   append_secondary_free_list_if_not_empty_with_lock();
6487 
6488   // Finally, make sure that the region accounting in the lists is
6489   // consistent with what we see in the heap.
6490   _old_set.verify_start();
6491   _humongous_set.verify_start();
6492   _free_list.verify_start();
6493 
6494   VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list);
6495   heap_region_iterate(&cl);
6496 
6497   _old_set.verify_end();
6498   _humongous_set.verify_end();
6499   _free_list.verify_end();
6500 }