src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 4773 : 8005849: JEP 167: Event-Based JVM Tracing
Reviewed-by: acorn, coleenp, sla
Contributed-by: Karen Kinnear <karen.kinnear@oracle.com>, Bengt Rutisson <bengt.rutisson@oracle.com>, Calvin Cheung <calvin.cheung@oracle.com>, Erik Gahlin <erik.gahlin@oracle.com>, Erik Helin <erik.helin@oracle.com>, Jesper Wilhelmsson <jesper.wilhelmsson@oracle.com>, Keith McGuigan <keith.mcguigan@oracle.com>, Mattias Tobiasson <mattias.tobiasson@oracle.com>, Markus Gronlund <markus.gronlund@oracle.com>, Mikael Auno <mikael.auno@oracle.com>, Nils Eliasson <nils.eliasson@oracle.com>, Nils Loodin <nils.loodin@oracle.com>, Rickard Backman <rickard.backman@oracle.com>, Staffan Larsen <staffan.larsen@oracle.com>, Stefan Karlsson <stefan.karlsson@oracle.com>, Yekaterina Kantserova <yekaterina.kantserova@oracle.com>


  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/icBuffer.hpp"
  27 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  28 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  33 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  34 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  35 #include "gc_implementation/g1/g1EvacFailure.hpp"
  36 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  37 #include "gc_implementation/g1/g1Log.hpp"
  38 #include "gc_implementation/g1/g1MarkSweep.hpp"
  39 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  40 #include "gc_implementation/g1/g1RemSet.inline.hpp"

  41 #include "gc_implementation/g1/heapRegion.inline.hpp"
  42 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  43 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  44 #include "gc_implementation/g1/vm_operations_g1.hpp"




  45 #include "gc_implementation/shared/isGCActiveMark.hpp"
  46 #include "memory/gcLocker.inline.hpp"
  47 #include "memory/genOopClosures.inline.hpp"
  48 #include "memory/generationSpec.hpp"
  49 #include "memory/referenceProcessor.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "oops/oop.pcgc.inline.hpp"
  52 #include "runtime/aprofiler.hpp"
  53 #include "runtime/vmThread.hpp"
  54 
  55 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  56 
  57 // turn it on so that the contents of the young list (scan-only /
  58 // to-be-collected) are printed at "strategic" points before / during
  59 // / after the collection --- this is useful for debugging
  60 #define YOUNG_LIST_VERBOSE 0
  61 // CURRENT STATUS
  62 // This file is under construction.  Search for "FIXME".
  63 
  64 // INVARIANTS/NOTES
  65 //
  66 // All allocation activity covered by the G1CollectedHeap interface is
  67 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  68 // and allocate_new_tlab, which are the "entry" points to the
  69 // allocation code from the rest of the JVM.  (Note that this does not
  70 // apply to TLAB allocation, which is not part of this interface: it
  71 // is done by clients of this interface.)
  72 
  73 // Notes on implementation of parallelism in different tasks.
  74 //
  75 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
  76 // The number of GC workers is passed to heap_region_par_iterate_chunked().
  77 // It does use run_task() which sets _n_workers in the task.
  78 // G1ParTask executes g1_process_strong_roots() ->
  79 // SharedHeap::process_strong_roots() which calls eventuall to
  80 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
  81 // SequentialSubTasksDone.  SharedHeap::process_strong_roots() also
  82 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
  83 //
  84 
  85 // Local to this file.
  86 
  87 class RefineCardTableEntryClosure: public CardTableEntryClosure {
  88   SuspendibleThreadSet* _sts;
  89   G1RemSet* _g1rs;
  90   ConcurrentG1Refine* _cg1r;
  91   bool _concurrent;
  92 public:
  93   RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
  94                               G1RemSet* g1rs,
  95                               ConcurrentG1Refine* cg1r) :
  96     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
  97   {}
  98   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
  99     bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);


 440 }
 441 
 442 #ifdef ASSERT
 443 // A region is added to the collection set as it is retired
 444 // so an address p can point to a region which will be in the
 445 // collection set but has not yet been retired.  This method
 446 // therefore is only accurate during a GC pause after all
 447 // regions have been retired.  It is used for debugging
 448 // to check if an nmethod has references to objects that can
 449 // be move during a partial collection.  Though it can be
 450 // inaccurate, it is sufficient for G1 because the conservative
 451 // implementation of is_scavengable() for G1 will indicate that
 452 // all nmethods must be scanned during a partial collection.
 453 bool G1CollectedHeap::is_in_partial_collection(const void* p) {
 454   HeapRegion* hr = heap_region_containing(p);
 455   return hr != NULL && hr->in_collection_set();
 456 }
 457 #endif
 458 
 459 // Returns true if the reference points to an object that
 460 // can move in an incremental collecction.
 461 bool G1CollectedHeap::is_scavengable(const void* p) {
 462   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 463   G1CollectorPolicy* g1p = g1h->g1_policy();
 464   HeapRegion* hr = heap_region_containing(p);
 465   if (hr == NULL) {
 466      // null
 467      assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p));
 468      return false;
 469   } else {
 470     return !hr->isHumongous();
 471   }
 472 }
 473 
 474 void G1CollectedHeap::check_ct_logs_at_safepoint() {
 475   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
 476   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
 477 
 478   // Count the dirty cards at the start.
 479   CountNonCleanMemRegionClosure count1(this);
 480   ct_bs->mod_card_iterate(&count1);


 531         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 532                                "secondary_free_list has %u entries",
 533                                _secondary_free_list.length());
 534       }
 535       // It looks as if there are free regions available on the
 536       // secondary_free_list. Let's move them to the free_list and try
 537       // again to allocate from it.
 538       append_secondary_free_list();
 539 
 540       assert(!_free_list.is_empty(), "if the secondary_free_list was not "
 541              "empty we should have moved at least one entry to the free_list");
 542       HeapRegion* res = _free_list.remove_head();
 543       if (G1ConcRegionFreeingVerbose) {
 544         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 545                                "allocated "HR_FORMAT" from secondary_free_list",
 546                                HR_FORMAT_PARAMS(res));
 547       }
 548       return res;
 549     }
 550 
 551     // Wait here until we get notifed either when (a) there are no
 552     // more free regions coming or (b) some regions have been moved on
 553     // the secondary_free_list.
 554     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
 555   }
 556 
 557   if (G1ConcRegionFreeingVerbose) {
 558     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 559                            "could not allocate from secondary_free_list");
 560   }
 561   return NULL;
 562 }
 563 
 564 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
 565   assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
 566          "the only time we use this to allocate a humongous region is "
 567          "when we are allocating a single humongous region");
 568 
 569   HeapRegion* res;
 570   if (G1StressConcRegionFreeing) {
 571     if (!_secondary_free_list.is_empty()) {


 606       // it would probably be OK to use remove_head(). But the extra
 607       // check for NULL is unlikely to be a performance issue here (we
 608       // just expanded the heap!) so let's just be conservative and
 609       // use remove_head_or_null().
 610       res = _free_list.remove_head_or_null();
 611     } else {
 612       _expand_heap_after_alloc_failure = false;
 613     }
 614   }
 615   return res;
 616 }
 617 
 618 uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
 619                                                         size_t word_size) {
 620   assert(isHumongous(word_size), "word_size should be humongous");
 621   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 622 
 623   uint first = G1_NULL_HRS_INDEX;
 624   if (num_regions == 1) {
 625     // Only one region to allocate, no need to go through the slower
 626     // path. The caller will attempt the expasion if this fails, so
 627     // let's not try to expand here too.
 628     HeapRegion* hr = new_region(word_size, false /* do_expand */);
 629     if (hr != NULL) {
 630       first = hr->hrs_index();
 631     } else {
 632       first = G1_NULL_HRS_INDEX;
 633     }
 634   } else {
 635     // We can't allocate humongous regions while cleanupComplete() is
 636     // running, since some of the regions we find to be empty might not
 637     // yet be added to the free list and it is not straightforward to
 638     // know which list they are on so that we can remove them. Note
 639     // that we only need to do this if we need to allocate more than
 640     // one region to satisfy the current humongous allocation
 641     // request. If we are only allocating one region we use the common
 642     // region allocation code (see above).
 643     wait_while_free_regions_coming();
 644     append_secondary_free_list_if_not_empty_with_lock();
 645 
 646     if (free_regions() >= num_regions) {


 671   uint last = first + num_regions;
 672 
 673   // We need to initialize the region(s) we just discovered. This is
 674   // a bit tricky given that it can happen concurrently with
 675   // refinement threads refining cards on these regions and
 676   // potentially wanting to refine the BOT as they are scanning
 677   // those cards (this can happen shortly after a cleanup; see CR
 678   // 6991377). So we have to set up the region(s) carefully and in
 679   // a specific order.
 680 
 681   // The word size sum of all the regions we will allocate.
 682   size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
 683   assert(word_size <= word_size_sum, "sanity");
 684 
 685   // This will be the "starts humongous" region.
 686   HeapRegion* first_hr = region_at(first);
 687   // The header of the new object will be placed at the bottom of
 688   // the first region.
 689   HeapWord* new_obj = first_hr->bottom();
 690   // This will be the new end of the first region in the series that
 691   // should also match the end of the last region in the seriers.
 692   HeapWord* new_end = new_obj + word_size_sum;
 693   // This will be the new top of the first region that will reflect
 694   // this allocation.
 695   HeapWord* new_top = new_obj + word_size;
 696 
 697   // First, we need to zero the header of the space that we will be
 698   // allocating. When we update top further down, some refinement
 699   // threads might try to scan the region. By zeroing the header we
 700   // ensure that any thread that will try to scan the region will
 701   // come across the zero klass word and bail out.
 702   //
 703   // NOTE: It would not have been correct to have used
 704   // CollectedHeap::fill_with_object() and make the space look like
 705   // an int array. The thread that is doing the allocation will
 706   // later update the object header to a potentially different array
 707   // type and, for a very short period of time, the klass and length
 708   // fields will be inconsistent. This could cause a refinement
 709   // thread to calculate the object size incorrectly.
 710   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
 711 


 846 
 847   verify_region_sets_optional();
 848 
 849   return result;
 850 }
 851 
 852 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
 853   assert_heap_not_locked_and_not_at_safepoint();
 854   assert(!isHumongous(word_size), "we do not allow humongous TLABs");
 855 
 856   unsigned int dummy_gc_count_before;
 857   int dummy_gclocker_retry_count = 0;
 858   return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
 859 }
 860 
 861 HeapWord*
 862 G1CollectedHeap::mem_allocate(size_t word_size,
 863                               bool*  gc_overhead_limit_was_exceeded) {
 864   assert_heap_not_locked_and_not_at_safepoint();
 865 
 866   // Loop until the allocation is satisified, or unsatisfied after GC.
 867   for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
 868     unsigned int gc_count_before;
 869 
 870     HeapWord* result = NULL;
 871     if (!isHumongous(word_size)) {
 872       result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
 873     } else {
 874       result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
 875     }
 876     if (result != NULL) {
 877       return result;
 878     }
 879 
 880     // Create the garbage collection operation...
 881     VM_G1CollectForAllocation op(gc_count_before, word_size);
 882     // ...and get the VM thread to execute it.
 883     VMThread::execute(&op);
 884 
 885     if (op.prologue_succeeded() && op.pause_succeeded()) {
 886       // If the operation was successful we'll return the result even


 986         // If we get here we successfully scheduled a collection which
 987         // failed to allocate. No point in trying to allocate
 988         // further. We'll just return NULL.
 989         MutexLockerEx x(Heap_lock);
 990         *gc_count_before_ret = total_collections();
 991         return NULL;
 992       }
 993     } else {
 994       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
 995         MutexLockerEx x(Heap_lock);
 996         *gc_count_before_ret = total_collections();
 997         return NULL;
 998       }
 999       // The GCLocker is either active or the GCLocker initiated
1000       // GC has not yet been performed. Stall until it is and
1001       // then retry the allocation.
1002       GC_locker::stall_until_clear();
1003       (*gclocker_retry_count_ret) += 1;
1004     }
1005 
1006     // We can reach here if we were unsuccessul in scheduling a
1007     // collection (because another thread beat us to it) or if we were
1008     // stalled due to the GC locker. In either can we should retry the
1009     // allocation attempt in case another thread successfully
1010     // performed a collection and reclaimed enough space. We do the
1011     // first attempt (without holding the Heap_lock) here and the
1012     // follow-on attempt will be at the start of the next loop
1013     // iteration (after taking the Heap_lock).
1014     result = _mutator_alloc_region.attempt_allocation(word_size,
1015                                                       false /* bot_updates */);
1016     if (result != NULL) {
1017       return result;
1018     }
1019 
1020     // Give a warning if we seem to be looping forever.
1021     if ((QueuedAllocationWarningCount > 0) &&
1022         (try_count % QueuedAllocationWarningCount == 0)) {
1023       warning("G1CollectedHeap::attempt_allocation_slow() "
1024               "retries %d times", try_count);
1025     }
1026   }


1111         // If we get here we successfully scheduled a collection which
1112         // failed to allocate. No point in trying to allocate
1113         // further. We'll just return NULL.
1114         MutexLockerEx x(Heap_lock);
1115         *gc_count_before_ret = total_collections();
1116         return NULL;
1117       }
1118     } else {
1119       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
1120         MutexLockerEx x(Heap_lock);
1121         *gc_count_before_ret = total_collections();
1122         return NULL;
1123       }
1124       // The GCLocker is either active or the GCLocker initiated
1125       // GC has not yet been performed. Stall until it is and
1126       // then retry the allocation.
1127       GC_locker::stall_until_clear();
1128       (*gclocker_retry_count_ret) += 1;
1129     }
1130 
1131     // We can reach here if we were unsuccessul in scheduling a
1132     // collection (because another thread beat us to it) or if we were
1133     // stalled due to the GC locker. In either can we should retry the
1134     // allocation attempt in case another thread successfully
1135     // performed a collection and reclaimed enough space.  Give a
1136     // warning if we seem to be looping forever.
1137 
1138     if ((QueuedAllocationWarningCount > 0) &&
1139         (try_count % QueuedAllocationWarningCount == 0)) {
1140       warning("G1CollectedHeap::attempt_allocation_humongous() "
1141               "retries %d times", try_count);
1142     }
1143   }
1144 
1145   ShouldNotReachHere();
1146   return NULL;
1147 }
1148 
1149 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1150                                        bool expect_null_mutator_alloc_region) {
1151   assert_at_safepoint(true /* should_be_vm_thread */);


1281 
1282 void G1CollectedHeap::verify_before_gc() {
1283   double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
1284   g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
1285 }
1286 
1287 void G1CollectedHeap::verify_after_gc() {
1288   double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
1289   g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
1290 }
1291 
1292 bool G1CollectedHeap::do_collection(bool explicit_gc,
1293                                     bool clear_all_soft_refs,
1294                                     size_t word_size) {
1295   assert_at_safepoint(true /* should_be_vm_thread */);
1296 
1297   if (GC_locker::check_active_before_gc()) {
1298     return false;
1299   }
1300 






1301   SvcGCMarker sgcm(SvcGCMarker::FULL);
1302   ResourceMark rm;
1303 
1304   print_heap_before_gc();

1305 
1306   size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
1307 
1308   HRSPhaseSetter x(HRSPhaseFullGC);
1309   verify_region_sets_optional();
1310 
1311   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1312                            collector_policy()->should_clear_all_soft_refs();
1313 
1314   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1315 
1316   {
1317     IsGCActiveMark x;
1318 
1319     // Timing
1320     assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
1321     gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
1322     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1323 
1324     {
1325       TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty);
1326       TraceCollectorStats tcs(g1mm()->full_collection_counters());
1327       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1328 
1329       double start = os::elapsedTime();
1330       g1_policy()->record_full_collection_start();
1331 
1332       // Note: When we have a more flexible GC logging framework that
1333       // allows us to add optional attributes to a GC log record we
1334       // could consider timing and reporting how long we wait in the
1335       // following two methods.
1336       wait_while_free_regions_coming();
1337       // If we start the compaction before the CM threads finish
1338       // scanning the root regions we might trip them over as we'll
1339       // be moving objects / updating references. So let's wait until
1340       // they are done. By telling them to abort, they should complete
1341       // early.
1342       _cm->root_regions()->abort();
1343       _cm->root_regions()->wait_until_scan_finished();
1344       append_secondary_free_list_if_not_empty_with_lock();
1345 
1346       gc_prologue(true);
1347       increment_total_collections(true /* full gc */);
1348       increment_old_marking_cycles_started();
1349 
1350       assert(used() == recalculate_used(), "Should be equal");
1351 
1352       verify_before_gc();
1353 
1354       pre_full_gc_dump();
1355 
1356       COMPILER2_PRESENT(DerivedPointerTable::clear());
1357 
1358       // Disable discovery and empty the discovered lists
1359       // for the CM ref processor.
1360       ref_processor_cm()->disable_discovery();
1361       ref_processor_cm()->abandon_partial_discovery();
1362       ref_processor_cm()->verify_no_references_recorded();
1363 
1364       // Abandon current iterations of concurrent marking and concurrent
1365       // refinement, if any are in progress. We have to do this before
1366       // wait_until_scan_finished() below.
1367       concurrent_mark()->abort();
1368 
1369       // Make sure we'll choose a new allocation region afterwards.
1370       release_mutator_alloc_region();
1371       abandon_gc_alloc_regions();
1372       g1_rem_set()->cleanupHRRS();
1373 
1374       // We should call this after we retire any currently active alloc


1418       MemoryService::track_memory_usage();
1419 
1420       verify_after_gc();
1421 
1422       assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1423       ref_processor_stw()->verify_no_references_recorded();
1424 
1425       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1426       ClassLoaderDataGraph::purge();
1427     MetaspaceAux::verify_metrics();
1428 
1429       // Note: since we've just done a full GC, concurrent
1430       // marking is no longer active. Therefore we need not
1431       // re-enable reference discovery for the CM ref processor.
1432       // That will be done at the start of the next marking cycle.
1433       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1434       ref_processor_cm()->verify_no_references_recorded();
1435 
1436       reset_gc_time_stamp();
1437       // Since everything potentially moved, we will clear all remembered
1438       // sets, and clear all cards.  Later we will rebuild remebered
1439       // sets. We will also reset the GC time stamps of the regions.
1440       clear_rsets_post_compaction();
1441       check_gc_time_stamps();
1442 
1443       // Resize the heap if necessary.
1444       resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1445 
1446       if (_hr_printer.is_active()) {
1447         // We should do this after we potentially resize the heap so
1448         // that all the COMMIT / UNCOMMIT events are generated before
1449         // the end GC event.
1450 
1451         print_hrs_post_compaction();
1452         _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1453       }
1454 
1455       G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1456       if (hot_card_cache->use_cache()) {
1457         hot_card_cache->reset_card_counts();
1458         hot_card_cache->reset_hot_cache();


1536       g1_policy()->record_full_collection_end();
1537 
1538       if (G1Log::fine()) {
1539         g1_policy()->print_heap_transition();
1540       }
1541 
1542       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1543       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1544       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1545       // before any GC notifications are raised.
1546       g1mm()->update_sizes();
1547 
1548       gc_epilogue(true);
1549     }
1550 
1551     if (G1Log::finer()) {
1552       g1_policy()->print_detailed_heap_transition(true /* full */);
1553     }
1554 
1555     print_heap_after_gc();



1556 
1557     post_full_gc_dump();

1558   }
1559 
1560   return true;
1561 }
1562 
1563 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1564   // do_collection() will return whether it succeeded in performing
1565   // the GC. Currently, there is no facility on the
1566   // do_full_collection() API to notify the caller than the collection
1567   // did not succeed (e.g., because it was locked out by the GC
1568   // locker). So, right now, we'll ignore the return value.
1569   bool dummy = do_collection(true,                /* explicit_gc */
1570                              clear_all_soft_refs,
1571                              0                    /* word_size */);
1572 }
1573 
1574 // This code is mostly copied from TenuredGeneration.
1575 void
1576 G1CollectedHeap::
1577 resize_if_necessary_after_full_collection(size_t word_size) {


1902 }
1903 
1904 // Public methods.
1905 
1906 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1907 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1908 #endif // _MSC_VER
1909 
1910 
1911 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1912   SharedHeap(policy_),
1913   _g1_policy(policy_),
1914   _dirty_card_queue_set(false),
1915   _into_cset_dirty_card_queue_set(false),
1916   _is_alive_closure_cm(this),
1917   _is_alive_closure_stw(this),
1918   _ref_processor_cm(NULL),
1919   _ref_processor_stw(NULL),
1920   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1921   _bot_shared(NULL),
1922   _evac_failure_scan_stack(NULL) ,
1923   _mark_in_progress(false),
1924   _cg1r(NULL), _summary_bytes_used(0),
1925   _g1mm(NULL),
1926   _refine_cte_cl(NULL),
1927   _full_collection(false),
1928   _free_list("Master Free List"),
1929   _secondary_free_list("Secondary Free List"),
1930   _old_set("Old Set"),
1931   _humongous_set("Master Humongous Set"),
1932   _free_regions_coming(false),
1933   _young_list(new YoungList(this)),
1934   _gc_time_stamp(0),
1935   _retained_old_gc_alloc_region(NULL),
1936   _survivor_plab_stats(YoungPLABSize, PLABWeight),
1937   _old_plab_stats(OldPLABSize, PLABWeight),
1938   _expand_heap_after_alloc_failure(true),
1939   _surviving_young_words(NULL),
1940   _old_marking_cycles_started(0),
1941   _old_marking_cycles_completed(0),

1942   _in_cset_fast_test(NULL),
1943   _in_cset_fast_test_base(NULL),
1944   _dirty_cards_region_list(NULL),
1945   _worker_cset_start_region(NULL),
1946   _worker_cset_start_region_time_stamp(NULL) {
1947   _g1h = this; // To catch bugs.





1948   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1949     vm_exit_during_initialization("Failed necessary allocation.");
1950   }
1951 
1952   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1953 
1954   int n_queues = MAX2((int)ParallelGCThreads, 1);
1955   _task_queues = new RefToScanQueueSet(n_queues);
1956 
1957   int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1958   assert(n_rem_sets > 0, "Invariant.");
1959 
1960   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1961   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);

1962 
1963   for (int i = 0; i < n_queues; i++) {
1964     RefToScanQueue* q = new RefToScanQueue();
1965     q->initialize();
1966     _task_queues->register_queue(i, q);

1967   }
1968 
1969   clear_cset_start_regions();
1970 
1971   // Initialize the G1EvacuationFailureALot counters and flags.
1972   NOT_PRODUCT(reset_evacuation_should_fail();)
1973 
1974   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1975 }
1976 
1977 jint G1CollectedHeap::initialize() {
1978   CollectedHeap::pre_initialize();
1979   os::enable_vtime();
1980 
1981   G1Log::init();
1982 
1983   // Necessary to satisfy locking discipline assertions.
1984 
1985   MutexLocker x(Heap_lock);
1986 
1987   // We have to initialize the printer before committing the heap, as
1988   // it will be used then.


2008 
2009   // When compressed oops are enabled, the preferred heap base
2010   // is calculated by subtracting the requested size from the
2011   // 32Gb boundary and using the result as the base address for
2012   // heap reservation. If the requested size is not aligned to
2013   // HeapRegion::GrainBytes (i.e. the alignment that is passed
2014   // into the ReservedHeapSpace constructor) then the actual
2015   // base of the reserved heap may end up differing from the
2016   // address that was requested (i.e. the preferred heap base).
2017   // If this happens then we could end up using a non-optimal
2018   // compressed oops mode.
2019 
2020   // Since max_byte_size is aligned to the size of a heap region (checked
2021   // above).
2022   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
2023 
2024   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
2025                                                  HeapRegion::GrainBytes);
2026 
2027   // It is important to do this in a way such that concurrent readers can't
2028   // temporarily think somethings in the heap.  (I've actually seen this
2029   // happen in asserts: DLD.)
2030   _reserved.set_word_size(0);
2031   _reserved.set_start((HeapWord*)heap_rs.base());
2032   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
2033 
2034   _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
2035 
2036   // Create the gen rem set (and barrier set) for the entire reserved region.
2037   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
2038   set_barrier_set(rem_set()->bs());
2039   if (barrier_set()->is_a(BarrierSet::ModRef)) {
2040     _mr_bs = (ModRefBarrierSet*)_barrier_set;
2041   } else {
2042     vm_exit_during_initialization("G1 requires a mod ref bs.");
2043     return JNI_ENOMEM;
2044   }
2045 
2046   // Also create a G1 rem set.
2047   if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
2048     _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());


2445   assert(concurrent ||
2446          (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
2447          (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
2448          err_msg("for inner caller (Full GC): _old_marking_cycles_started = %u "
2449                  "is inconsistent with _old_marking_cycles_completed = %u",
2450                  _old_marking_cycles_started, _old_marking_cycles_completed));
2451 
2452   // This is the case for the outer caller, i.e. the concurrent cycle.
2453   assert(!concurrent ||
2454          (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
2455          err_msg("for outer caller (concurrent cycle): "
2456                  "_old_marking_cycles_started = %u "
2457                  "is inconsistent with _old_marking_cycles_completed = %u",
2458                  _old_marking_cycles_started, _old_marking_cycles_completed));
2459 
2460   _old_marking_cycles_completed += 1;
2461 
2462   // We need to clear the "in_progress" flag in the CM thread before
2463   // we wake up any waiters (especially when ExplicitInvokesConcurrent
2464   // is set) so that if a waiter requests another System.gc() it doesn't
2465   // incorrectly see that a marking cyle is still in progress.
2466   if (concurrent) {
2467     _cmThread->clear_in_progress();
2468   }
2469 
2470   // This notify_all() will ensure that a thread that called
2471   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2472   // and it's waiting for a full GC to finish will be woken up. It is
2473   // waiting in VM_G1IncCollectionPause::doit_epilogue().
2474   FullGCCount_lock->notify_all();
2475 }
2476 











































2477 void G1CollectedHeap::collect(GCCause::Cause cause) {
2478   assert_heap_not_locked();
2479 
2480   unsigned int gc_count_before;
2481   unsigned int old_marking_count_before;
2482   bool retry_gc;
2483 
2484   do {
2485     retry_gc = false;
2486 
2487     {
2488       MutexLocker ml(Heap_lock);
2489 
2490       // Read the GC count while holding the Heap_lock
2491       gc_count_before = total_collections();
2492       old_marking_count_before = _old_marking_cycles_started;
2493     }
2494 
2495     if (should_do_concurrent_full_gc(cause)) {
2496       // Schedule an initial-mark evacuation pause that will start a


2659       assert(!r->continuesHumongous(), "sanity");
2660       if (r->startsHumongous()) {
2661         // If the region is "starts humongous" we'll iterate over its
2662         // "continues humongous" first; in fact we'll do them
2663         // first. The order is important. In on case, calling the
2664         // closure on the "starts humongous" region might de-allocate
2665         // and clear all its "continues humongous" regions and, as a
2666         // result, we might end up processing them twice. So, we'll do
2667         // them first (notice: most closures will ignore them anyway) and
2668         // then we'll do the "starts humongous" region.
2669         for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
2670           HeapRegion* chr = region_at(ch_index);
2671 
2672           // if the region has already been claimed or it's not
2673           // "continues humongous" we're done
2674           if (chr->claim_value() == claim_value ||
2675               !chr->continuesHumongous()) {
2676             break;
2677           }
2678 
2679           // Noone should have claimed it directly. We can given
2680           // that we claimed its "starts humongous" region.
2681           assert(chr->claim_value() != claim_value, "sanity");
2682           assert(chr->humongous_start_region() == r, "sanity");
2683 
2684           if (chr->claimHeapRegion(claim_value)) {
2685             // we should always be able to claim it; noone else should
2686             // be trying to claim this region
2687 
2688             bool res2 = cl->doHeapRegion(chr);
2689             assert(!res2, "Should not abort");
2690 
2691             // Right now, this holds (i.e., no closure that actually
2692             // does something with "continues humongous" regions
2693             // clears them). We might have to weaken it in the future,
2694             // but let's leave these two asserts here for extra safety.
2695             assert(chr->continuesHumongous(), "should still be the case");
2696             assert(chr->humongous_start_region() == r, "sanity");
2697           } else {
2698             guarantee(false, "we should not reach here");
2699           }
2700         }
2701       }
2702 
2703       assert(!r->continuesHumongous(), "sanity");
2704       bool res = cl->doHeapRegion(r);
2705       assert(!res, "Should not abort");


2959 }
2960 
2961 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2962   Space* sp = space_containing(addr);
2963   return sp->block_is_obj(addr);
2964 }
2965 
2966 bool G1CollectedHeap::supports_tlab_allocation() const {
2967   return true;
2968 }
2969 
2970 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2971   return HeapRegion::GrainBytes;
2972 }
2973 
2974 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2975   // Return the remaining space in the cur alloc region, but not less than
2976   // the min TLAB size.
2977 
2978   // Also, this value can be at most the humongous object threshold,
2979   // since we can't allow tlabs to grow big enough to accomodate
2980   // humongous objects.
2981 
2982   HeapRegion* hr = _mutator_alloc_region.get();
2983   size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
2984   if (hr == NULL) {
2985     return max_tlab_size;
2986   } else {
2987     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size);
2988   }
2989 }
2990 
2991 size_t G1CollectedHeap::max_capacity() const {
2992   return _g1_reserved.byte_size();
2993 }
2994 
2995 jlong G1CollectedHeap::millis_since_last_gc() {
2996   // assert(false, "NYI");
2997   return 0;
2998 }
2999 


3718     g1_policy()->print_detailed_heap_transition();
3719   } else {
3720     if (evacuation_failed()) {
3721       gclog_or_tty->print("--");
3722     }
3723     g1_policy()->print_heap_transition();
3724     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3725   }
3726   gclog_or_tty->flush();
3727 }
3728 
3729 bool
3730 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3731   assert_at_safepoint(true /* should_be_vm_thread */);
3732   guarantee(!is_gc_active(), "collection is not reentrant");
3733 
3734   if (GC_locker::check_active_before_gc()) {
3735     return false;
3736   }
3737 




3738   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3739   ResourceMark rm;
3740 
3741   print_heap_before_gc();

3742 
3743   HRSPhaseSetter x(HRSPhaseEvacuation);
3744   verify_region_sets_optional();
3745   verify_dirty_young_regions();
3746 
3747   // This call will decide whether this pause is an initial-mark
3748   // pause. If it is, during_initial_mark_pause() will return true
3749   // for the duration of this pause.
3750   g1_policy()->decide_on_conc_mark_initiation();
3751 
3752   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3753   assert(!g1_policy()->during_initial_mark_pause() ||
3754           g1_policy()->gcs_are_young(), "sanity");
3755 
3756   // We also do not allow mixed GCs during marking.
3757   assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
3758 
3759   // Record whether this pause is an initial mark. When the current
3760   // thread has completed its logging output and it's safe to signal
3761   // the CM thread, the flag's value in the policy has been reset.
3762   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3763 
3764   // Inner scope for scope based logging, timers, and stats collection
3765   {


3766     if (g1_policy()->during_initial_mark_pause()) {
3767       // We are about to start a marking cycle, so we increment the
3768       // full collection counter.
3769       increment_old_marking_cycles_started();

3770     }



3771     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3772 
3773     int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3774                                 workers()->active_workers() : 1);
3775     double pause_start_sec = os::elapsedTime();
3776     g1_policy()->phase_times()->note_gc_start(active_workers);
3777     log_gc_header();
3778 
3779     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3780     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3781 
3782     // If the secondary_free_list is not empty, append it to the
3783     // free_list. No need to wait for the cleanup operation to finish;
3784     // the region allocation code will check the secondary_free_list
3785     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3786     // set, skip this step so that the region allocation code has to
3787     // get entries from the secondary_free_list.
3788     if (!G1StressConcRegionFreeing) {
3789       append_secondary_free_list_if_not_empty_with_lock();
3790     }


3860           double scan_wait_end = os::elapsedTime();
3861           wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3862         }
3863         g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3864 
3865 #if YOUNG_LIST_VERBOSE
3866         gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
3867         _young_list->print();
3868 #endif // YOUNG_LIST_VERBOSE
3869 
3870         if (g1_policy()->during_initial_mark_pause()) {
3871           concurrent_mark()->checkpointRootsInitialPre();
3872         }
3873 
3874 #if YOUNG_LIST_VERBOSE
3875         gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
3876         _young_list->print();
3877         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3878 #endif // YOUNG_LIST_VERBOSE
3879 
3880         g1_policy()->finalize_cset(target_pause_time_ms);
3881 
3882         _cm->note_start_of_gc();
3883         // We should not verify the per-thread SATB buffers given that
3884         // we have not filtered them yet (we'll do so during the
3885         // GC). We also call this after finalize_cset() to
3886         // ensure that the CSet has been finalized.
3887         _cm->verify_no_cset_oops(true  /* verify_stacks */,
3888                                  true  /* verify_enqueued_buffers */,
3889                                  false /* verify_thread_buffers */,
3890                                  true  /* verify_fingers */);
3891 
3892         if (_hr_printer.is_active()) {
3893           HeapRegion* hr = g1_policy()->collection_set();
3894           while (hr != NULL) {
3895             G1HRPrinter::RegionType type;
3896             if (!hr->is_young()) {
3897               type = G1HRPrinter::Old;
3898             } else if (hr->is_survivor()) {
3899               type = G1HRPrinter::Survivor;
3900             } else {
3901               type = G1HRPrinter::Eden;
3902             }
3903             _hr_printer.cset(hr);
3904             hr = hr->next_in_collection_set();
3905           }
3906         }
3907 
3908 #ifdef ASSERT
3909         VerifyCSetClosure cl;
3910         collection_set_iterate(&cl);
3911 #endif // ASSERT
3912 
3913         setup_surviving_young_words();
3914 
3915         // Initialize the GC alloc regions.
3916         init_gc_alloc_regions();
3917 
3918         // Actually do the work...
3919         evacuate_collection_set();
3920 
3921         // We do this to mainly verify the per-thread SATB buffers
3922         // (which have been filtered by now) since we didn't verify
3923         // them earlier. No point in re-checking the stacks / enqueued
3924         // buffers given that the CSet has not changed since last time
3925         // we checked.
3926         _cm->verify_no_cset_oops(false /* verify_stacks */,
3927                                  false /* verify_enqueued_buffers */,
3928                                  true  /* verify_thread_buffers */,
3929                                  true  /* verify_fingers */);
3930 
3931         free_collection_set(g1_policy()->collection_set());
3932         g1_policy()->clear_collection_set();
3933 
3934         cleanup_surviving_young_words();
3935 
3936         // Start a new incremental collection set for the next pause.
3937         g1_policy()->start_incremental_cset_building();
3938 
3939         // Clear the _cset_fast_test bitmap in anticipation of adding
3940         // regions to the incremental collection set for the next
3941         // evacuation pause.
3942         clear_cset_fast_test();
3943 
3944         _young_list->reset_sampled_info();
3945 
3946         // Don't check the whole heap at this point as the
3947         // GC alloc regions from this pause have been tagged
3948         // as survivors and moved on to the survivor list.
3949         // Survivor regions will fail the !is_young() check.
3950         assert(check_young_list_empty(false /* check_heap */),
3951           "young list should be empty");
3952 
3953 #if YOUNG_LIST_VERBOSE
3954         gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
3955         _young_list->print();
3956 #endif // YOUNG_LIST_VERBOSE
3957 
3958         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
3959                                             _young_list->first_survivor_region(),
3960                                             _young_list->last_survivor_region());
3961 
3962         _young_list->reset_auxilary_lists();
3963 
3964         if (evacuation_failed()) {
3965           _summary_bytes_used = recalculate_used();






3966         } else {
3967           // The "used" of the the collection set have already been subtracted
3968           // when they were freed.  Add in the bytes evacuated.
3969           _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
3970         }
3971 
3972         if (g1_policy()->during_initial_mark_pause()) {
3973           // We have to do this before we notify the CM threads that
3974           // they can start working to make sure that all the
3975           // appropriate initialization is done on the CM object.
3976           concurrent_mark()->checkpointRootsInitialPost();
3977           set_marking_started();
3978           // Note that we don't actually trigger the CM thread at
3979           // this point. We do that later when we're sure that
3980           // the current thread has completed its logging output.
3981         }
3982 
3983         allocate_dummy_regions();
3984 
3985 #if YOUNG_LIST_VERBOSE


3988         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3989 #endif // YOUNG_LIST_VERBOSE
3990 
3991         init_mutator_alloc_region();
3992 
3993         {
3994           size_t expand_bytes = g1_policy()->expansion_amount();
3995           if (expand_bytes > 0) {
3996             size_t bytes_before = capacity();
3997             // No need for an ergo verbose message here,
3998             // expansion_amount() does this when it returns a value > 0.
3999             if (!expand(expand_bytes)) {
4000               // We failed to expand the heap so let's verify that
4001               // committed/uncommitted amount match the backing store
4002               assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
4003               assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
4004             }
4005           }
4006         }
4007 
4008         // We redo the verificaiton but now wrt to the new CSet which
4009         // has just got initialized after the previous CSet was freed.
4010         _cm->verify_no_cset_oops(true  /* verify_stacks */,
4011                                  true  /* verify_enqueued_buffers */,
4012                                  true  /* verify_thread_buffers */,
4013                                  true  /* verify_fingers */);
4014         _cm->note_end_of_gc();
4015 
4016         // This timing is only used by the ergonomics to handle our pause target.
4017         // It is unclear why this should not include the full pause. We will
4018         // investigate this in CR 7178365.
4019         double sample_end_time_sec = os::elapsedTime();
4020         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
4021         g1_policy()->record_collection_pause_end(pause_time_ms);
4022 
4023         MemoryService::track_memory_usage();
4024 
4025         // In prepare_for_verify() below we'll need to scan the deferred
4026         // update buffers to bring the RSets up-to-date if
4027         // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
4028         // the update buffers we'll probably need to scan cards on the
4029         // regions we just allocated to (i.e., the GC alloc
4030         // regions). However, during the last GC we called
4031         // set_saved_mark() on all the GC alloc regions, so card
4032         // scanning might skip the [saved_mark_word()...top()] area of
4033         // those regions (i.e., the area we allocated objects into
4034         // during the last GC). But it shouldn't. Given that
4035         // saved_mark_word() is conditional on whether the GC time stamp
4036         // on the region is current or not, by incrementing the GC time
4037         // stamp here we invalidate all the GC time stamps on all the
4038         // regions and saved_mark_word() will simply return top() for
4039         // all the regions. This is a nicer way of ensuring this rather
4040         // than iterating over the regions and fixing them. In fact, the
4041         // GC time stamp increment here also ensures that


4068 #endif
4069 
4070       gc_epilogue(false);
4071     }
4072 
4073     // Print the remainder of the GC log output.
4074     log_gc_footer(os::elapsedTime() - pause_start_sec);
4075 
4076     // It is not yet to safe to tell the concurrent mark to
4077     // start as we have some optional output below. We don't want the
4078     // output from the concurrent mark thread interfering with this
4079     // logging output either.
4080 
4081     _hrs.verify_optional();
4082     verify_region_sets_optional();
4083 
4084     TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
4085     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4086 
4087     print_heap_after_gc();

4088 
4089     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4090     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4091     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4092     // before any GC notifications are raised.
4093     g1mm()->update_sizes();





4094   }
4095 
4096   if (G1SummarizeRSetStats &&
4097       (G1SummarizeRSetStatsPeriod > 0) &&
4098       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
4099     g1_rem_set()->print_summary_info();
4100   }
4101 
4102   // It should now be safe to tell the concurrent mark thread to start
4103   // without its logging output interfering with the logging output
4104   // that came from the pause.
4105 
4106   if (should_start_conc_mark) {
4107     // CAUTION: after the doConcurrentMark() call below,
4108     // the concurrent marking thread(s) could be running
4109     // concurrently with us. Make sure that anything after
4110     // this point does not assume that we are the only GC thread
4111     // running. Note: of course, the actual marking work will
4112     // not start until the safepoint itself is released in
4113     // ConcurrentGCThread::safepoint_desynchronize().
4114     doConcurrentMark();
4115   }
4116 
4117   return true;
4118 }
4119 
4120 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
4121 {


4133       break;
4134   }
4135 
4136   // Prevent humongous PLAB sizes for two reasons:
4137   // * PLABs are allocated using a similar paths as oops, but should
4138   //   never be in a humongous region
4139   // * Allowing humongous PLABs needlessly churns the region free lists
4140   return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
4141 }
4142 
4143 void G1CollectedHeap::init_mutator_alloc_region() {
4144   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
4145   _mutator_alloc_region.init();
4146 }
4147 
4148 void G1CollectedHeap::release_mutator_alloc_region() {
4149   _mutator_alloc_region.release();
4150   assert(_mutator_alloc_region.get() == NULL, "post-condition");
4151 }
4152 
4153 void G1CollectedHeap::init_gc_alloc_regions() {
4154   assert_at_safepoint(true /* should_be_vm_thread */);
4155 
4156   _survivor_gc_alloc_region.init();
4157   _old_gc_alloc_region.init();
4158   HeapRegion* retained_region = _retained_old_gc_alloc_region;
4159   _retained_old_gc_alloc_region = NULL;
4160 
4161   // We will discard the current GC alloc region if:
4162   // a) it's in the collection set (it can happen!),
4163   // b) it's already full (no point in using it),
4164   // c) it's empty (this means that it was emptied during
4165   // a cleanup and it should be on the free list now), or
4166   // d) it's humongous (this means that it was emptied
4167   // during a cleanup and was added to the free list, but
4168   // has been subseqently used to allocate a humongous
4169   // object that may be less than the region size).
4170   if (retained_region != NULL &&
4171       !retained_region->in_collection_set() &&
4172       !(retained_region->top() == retained_region->end()) &&
4173       !retained_region->is_empty() &&
4174       !retained_region->isHumongous()) {
4175     retained_region->set_saved_mark();
4176     // The retained region was added to the old region set when it was
4177     // retired. We have to remove it now, since we don't allow regions
4178     // we allocate to in the region sets. We'll re-add it later, when
4179     // it's retired again.
4180     _old_set.remove(retained_region);
4181     bool during_im = g1_policy()->during_initial_mark_pause();
4182     retained_region->note_start_of_copying(during_im);
4183     _old_gc_alloc_region.set(retained_region);
4184     _hr_printer.reuse(retained_region);

4185   }
4186 }
4187 
4188 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers) {


4189   _survivor_gc_alloc_region.release();
4190   // If we have an old GC alloc region to release, we'll save it in
4191   // _retained_old_gc_alloc_region. If we don't
4192   // _retained_old_gc_alloc_region will become NULL. This is what we
4193   // want either way so no reason to check explicitly for either
4194   // condition.
4195   _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
4196 
4197   if (ResizePLAB) {
4198     _survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
4199     _old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
4200   }
4201 }
4202 
4203 void G1CollectedHeap::abandon_gc_alloc_regions() {
4204   assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
4205   assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
4206   _retained_old_gc_alloc_region = NULL;
4207 }
4208 


4251   }
4252   _objs_with_preserved_marks.clear(true);
4253   _preserved_marks_of_objs.clear(true);
4254 }
4255 
4256 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
4257   _evac_failure_scan_stack->push(obj);
4258 }
4259 
4260 void G1CollectedHeap::drain_evac_failure_scan_stack() {
4261   assert(_evac_failure_scan_stack != NULL, "precondition");
4262 
4263   while (_evac_failure_scan_stack->length() > 0) {
4264      oop obj = _evac_failure_scan_stack->pop();
4265      _evac_failure_closure->set_region(heap_region_containing(obj));
4266      obj->oop_iterate_backwards(_evac_failure_closure);
4267   }
4268 }
4269 
4270 oop
4271 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
4272                                                oop old) {
4273   assert(obj_in_cs(old),
4274          err_msg("obj: "PTR_FORMAT" should still be in the CSet",
4275                  (HeapWord*) old));
4276   markOop m = old->mark();
4277   oop forward_ptr = old->forward_to_atomic(old);
4278   if (forward_ptr == NULL) {
4279     // Forward-to-self succeeded.



4280 


4281     if (_evac_failure_closure != cl) {
4282       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
4283       assert(!_drain_in_progress,
4284              "Should only be true while someone holds the lock.");
4285       // Set the global evac-failure closure to the current thread's.
4286       assert(_evac_failure_closure == NULL, "Or locking has failed.");
4287       set_evac_failure_closure(cl);
4288       // Now do the common part.
4289       handle_evacuation_failure_common(old, m);
4290       // Reset to NULL.
4291       set_evac_failure_closure(NULL);
4292     } else {
4293       // The lock is already held, and this is recursive.
4294       assert(_drain_in_progress, "This should only be the recursive case.");
4295       handle_evacuation_failure_common(old, m);
4296     }
4297     return old;
4298   } else {
4299     // Forward-to-self failed. Either someone else managed to allocate
4300     // space for this object (old != forward_ptr) or they beat us in
4301     // self-forwarding it (old == forward_ptr).
4302     assert(old == forward_ptr || !obj_in_cs(forward_ptr),
4303            err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" "
4304                    "should not be in the CSet",
4305                    (HeapWord*) old, (HeapWord*) forward_ptr));
4306     return forward_ptr;
4307   }
4308 }
4309 
4310 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
4311   set_evacuation_failed(true);
4312 
4313   preserve_mark_if_necessary(old, m);
4314 
4315   HeapRegion* r = heap_region_containing(old);
4316   if (!r->evacuation_failed()) {
4317     r->set_evacuation_failed(true);
4318     _hr_printer.evac_failure(r);
4319   }
4320 
4321   push_on_evac_failure_scan_stack(old);
4322 
4323   if (!_drain_in_progress) {
4324     // prevent recursion in copy_to_survivor_space()
4325     _drain_in_progress = true;
4326     drain_evac_failure_scan_stack();
4327     _drain_in_progress = false;
4328   }
4329 }
4330 
4331 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
4332   assert(evacuation_failed(), "Oversaving!");


4542   G1CollectorPolicy* g1p = _g1->g1_policy();
4543   markOop m = old->mark();
4544   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
4545                                            : m->age();
4546   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
4547                                                              word_sz);
4548   HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
4549 #ifndef PRODUCT
4550   // Should this evacuation fail?
4551   if (_g1->evacuation_should_fail()) {
4552     if (obj_ptr != NULL) {
4553       _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4554       obj_ptr = NULL;
4555     }
4556   }
4557 #endif // !PRODUCT
4558 
4559   if (obj_ptr == NULL) {
4560     // This will either forward-to-self, or detect that someone else has
4561     // installed a forwarding pointer.
4562     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
4563     return _g1->handle_evacuation_failure_par(cl, old);
4564   }
4565 
4566   oop obj = oop(obj_ptr);
4567 
4568   // We're going to allocate linearly, so might as well prefetch ahead.
4569   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
4570 
4571   oop forward_ptr = old->forward_to_atomic(obj);
4572   if (forward_ptr == NULL) {
4573     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
4574     if (g1p->track_object_age(alloc_purpose)) {
4575       // We could simply do obj->incr_age(). However, this causes a
4576       // performance issue. obj->incr_age() will first check whether
4577       // the object has a displaced mark by checking its mark word;
4578       // getting the mark word from the new location of the object
4579       // stalls. So, given that we already have the mark word and we
4580       // are about to install it anyway, it's better to increase the
4581       // age on the mark word, when the object does not have a
4582       // displaced mark word. We're not expecting many objects to have
4583       // a displaced marked word, so that case is not optimized


5147 
5148   template <class T> void do_oop_work(T* p) {
5149     oop obj = oopDesc::load_decode_heap_oop(p);
5150 
5151     if (_g1h->obj_in_cs(obj)) {
5152       // If the referent object has been forwarded (either copied
5153       // to a new location or to itself in the event of an
5154       // evacuation failure) then we need to update the reference
5155       // field and, if both reference and referent are in the G1
5156       // heap, update the RSet for the referent.
5157       //
5158       // If the referent has not been forwarded then we have to keep
5159       // it alive by policy. Therefore we have copy the referent.
5160       //
5161       // If the reference field is in the G1 heap then we can push
5162       // on the PSS queue. When the queue is drained (after each
5163       // phase of reference processing) the object and it's followers
5164       // will be copied, the reference field set to point to the
5165       // new location, and the RSet updated. Otherwise we need to
5166       // use the the non-heap or metadata closures directly to copy
5167       // the refernt object and update the pointer, while avoiding
5168       // updating the RSet.
5169 
5170       if (_g1h->is_in_g1_reserved(p)) {
5171         _par_scan_state->push_on_queue(p);
5172       } else {
5173         assert(!ClassLoaderDataGraph::contains((address)p),
5174                err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) "
5175                               PTR_FORMAT, p));
5176           _copy_non_heap_obj_cl->do_oop(p);
5177         }
5178       }
5179     }
5180 };
5181 
5182 // Serial drain queue closure. Called as the 'complete_gc'
5183 // closure for each discovered list in some of the
5184 // reference processing phases.
5185 
5186 class G1STWDrainQueueClosure: public VoidClosure {
5187 protected:


5315   _g1h->set_par_threads(0);
5316 }
5317 
5318 // Gang task for parallel reference enqueueing.
5319 
5320 class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
5321   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5322   EnqueueTask& _enq_task;
5323 
5324 public:
5325   G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
5326     AbstractGangTask("Enqueue reference objects in parallel"),
5327     _enq_task(enq_task)
5328   { }
5329 
5330   virtual void work(uint worker_id) {
5331     _enq_task.work(worker_id);
5332   }
5333 };
5334 
5335 // Driver routine for parallel reference enqueing.
5336 // Creates an instance of the ref enqueueing gang
5337 // task and has the worker threads execute it.
5338 
5339 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
5340   assert(_workers != NULL, "Need parallel worker threads.");
5341 
5342   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
5343 
5344   _g1h->set_par_threads(_active_workers);
5345   _workers->run_task(&enq_task_proxy);
5346   _g1h->set_par_threads(0);
5347 }
5348 
5349 // End of weak reference support closures
5350 
5351 // Abstract task used to preserve (i.e. copy) any referent objects
5352 // that are in the collection set and are pointed to by reference
5353 // objects discovered by the CM ref processor.
5354 
5355 class G1ParPreserveCMReferentsTask: public AbstractGangTask {


5444   }
5445 };
5446 
5447 // Weak Reference processing during an evacuation pause (part 1).
5448 void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
5449   double ref_proc_start = os::elapsedTime();
5450 
5451   ReferenceProcessor* rp = _ref_processor_stw;
5452   assert(rp->discovery_enabled(), "should have been enabled");
5453 
5454   // Any reference objects, in the collection set, that were 'discovered'
5455   // by the CM ref processor should have already been copied (either by
5456   // applying the external root copy closure to the discovered lists, or
5457   // by following an RSet entry).
5458   //
5459   // But some of the referents, that are in the collection set, that these
5460   // reference objects point to may not have been copied: the STW ref
5461   // processor would have seen that the reference object had already
5462   // been 'discovered' and would have skipped discovering the reference,
5463   // but would not have treated the reference object as a regular oop.
5464   // As a reult the copy closure would not have been applied to the
5465   // referent object.
5466   //
5467   // We need to explicitly copy these referent objects - the references
5468   // will be processed at the end of remarking.
5469   //
5470   // We also need to do this copying before we process the reference
5471   // objects discovered by the STW ref processor in case one of these
5472   // referents points to another object which is also referenced by an
5473   // object discovered by the STW ref processor.
5474 
5475   assert(!G1CollectedHeap::use_parallel_gc_threads() ||
5476            no_of_gc_workers == workers()->active_workers(),
5477            "Need to reset active GC workers");
5478 
5479   set_par_threads(no_of_gc_workers);
5480   G1ParPreserveCMReferentsTask keep_cm_referents(this,
5481                                                  no_of_gc_workers,
5482                                                  _task_queues);
5483 
5484   if (G1CollectedHeap::use_parallel_gc_threads()) {


5520   G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL);
5521 
5522   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5523   OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5524 
5525   if (_g1h->g1_policy()->during_initial_mark_pause()) {
5526     // We also need to mark copied objects.
5527     copy_non_heap_cl = &copy_mark_non_heap_cl;
5528     copy_metadata_cl = &copy_mark_metadata_cl;
5529   }
5530 
5531   // Keep alive closure.
5532   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_metadata_cl, &pss);
5533 
5534   // Serial Complete GC closure
5535   G1STWDrainQueueClosure drain_queue(this, &pss);
5536 
5537   // Setup the soft refs policy...
5538   rp->setup_policy(false);
5539 

5540   if (!rp->processing_is_mt()) {
5541     // Serial reference processing...
5542     rp->process_discovered_references(&is_alive,
5543                                       &keep_alive,
5544                                       &drain_queue,
5545                                       NULL);

5546   } else {
5547     // Parallel reference processing
5548     assert(rp->num_q() == no_of_gc_workers, "sanity");
5549     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5550 
5551     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5552     rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);




5553   }
5554 

5555   // We have completed copying any necessary live referent objects
5556   // (that were not copied during the actual pause) so we can
5557   // retire any active alloc buffers
5558   pss.retire_alloc_buffers();
5559   assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5560 
5561   double ref_proc_time = os::elapsedTime() - ref_proc_start;
5562   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5563 }
5564 
5565 // Weak Reference processing during an evacuation pause (part 2).
5566 void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
5567   double ref_enq_start = os::elapsedTime();
5568 
5569   ReferenceProcessor* rp = _ref_processor_stw;
5570   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5571 
5572   // Now enqueue any remaining on the discovered lists on to
5573   // the pending list.
5574   if (!rp->processing_is_mt()) {
5575     // Serial reference processing...
5576     rp->enqueue_discovered_references();
5577   } else {
5578     // Parallel reference enqueuing
5579 
5580     assert(no_of_gc_workers == workers()->active_workers(),
5581            "Need to reset active workers");
5582     assert(rp->num_q() == no_of_gc_workers, "sanity");
5583     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5584 
5585     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5586     rp->enqueue_discovered_references(&par_task_executor);
5587   }
5588 
5589   rp->verify_no_references_recorded();
5590   assert(!rp->discovery_enabled(), "should have been disabled");
5591 
5592   // FIXME
5593   // CM's reference processing also cleans up the string and symbol tables.
5594   // Should we do that here also? We could, but it is a serial operation
5595   // and could signicantly increase the pause time.
5596 
5597   double ref_enq_time = os::elapsedTime() - ref_enq_start;
5598   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5599 }
5600 
5601 void G1CollectedHeap::evacuate_collection_set() {
5602   _expand_heap_after_alloc_failure = true;
5603   set_evacuation_failed(false);
5604 
5605   // Should G1EvacuationFailureALot be in effect for this GC?
5606   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5607 
5608   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5609 
5610   // Disable the hot card cache.
5611   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5612   hot_card_cache->reset_hot_cache_claimed_index();
5613   hot_card_cache->set_use_cache(false);
5614 
5615   uint n_workers;
5616   if (G1CollectedHeap::use_parallel_gc_threads()) {
5617     n_workers =
5618       AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5619                                      workers()->active_workers(),
5620                                      Threads::number_of_non_daemon_threads());
5621     assert(UseDynamicNumberOfGCThreads ||
5622            n_workers == workers()->total_workers(),
5623            "If not dynamic should be using all the  workers");


5672 
5673   set_par_threads(0);
5674 
5675   // Process any discovered reference objects - we have
5676   // to do this _before_ we retire the GC alloc regions
5677   // as we may have to copy some 'reachable' referent
5678   // objects (and their reachable sub-graphs) that were
5679   // not copied during the pause.
5680   process_discovered_references(n_workers);
5681 
5682   // Weak root processing.
5683   // Note: when JSR 292 is enabled and code blobs can contain
5684   // non-perm oops then we will need to process the code blobs
5685   // here too.
5686   {
5687     G1STWIsAliveClosure is_alive(this);
5688     G1KeepAliveClosure keep_alive(this);
5689     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
5690   }
5691 
5692   release_gc_alloc_regions(n_workers);
5693   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5694 
5695   // Reset and re-enable the hot card cache.
5696   // Note the counts for the cards in the regions in the
5697   // collection set are reset when the collection set is freed.
5698   hot_card_cache->reset_hot_cache();
5699   hot_card_cache->set_use_cache(true);
5700 
5701   finalize_for_evac_failure();
5702 
5703   if (evacuation_failed()) {
5704     remove_self_forwarding_pointers();
5705 
5706     // Reset the G1EvacuationFailureALot counters and flags
5707     // Note: the values are reset only when an actual
5708     // evacuation failure occurs.
5709     NOT_PRODUCT(reset_evacuation_should_fail();)
5710   }
5711 
5712   // Enqueue any remaining references remaining on the STW
5713   // reference processor's discovered lists. We need to do
5714   // this after the card table is cleaned (and verified) as
5715   // the act of enqueuing entries on to the pending list
5716   // will log these updates (and dirty their associated
5717   // cards). We need these updates logged to update any
5718   // RSets.
5719   enqueue_discovered_references(n_workers);
5720 
5721   if (G1DeferredRSUpdate) {
5722     RedirtyLoggedCardTableEntryFastClosure redirty;
5723     dirty_card_queue_set().set_closure(&redirty);
5724     dirty_card_queue_set().apply_closure_to_all_completed_buffers();
5725 
5726     DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5727     dcq.merge_bufferlists(&dirty_card_queue_set());
5728     assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5729   }
5730   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
5731 }
5732 
5733 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr,
5734                                      size_t* pre_used,
5735                                      FreeRegionList* free_list,


5923         _dirty_cards_region_list = r->get_next_dirty_cards_region();
5924         if (_dirty_cards_region_list == r) {
5925           // The last region.
5926           _dirty_cards_region_list = NULL;
5927         }
5928         r->set_next_dirty_cards_region(NULL);
5929       }
5930     }
5931 #ifndef PRODUCT
5932     if (G1VerifyCTCleanup || VerifyAfterGC) {
5933       G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
5934       heap_region_iterate(&cleanup_verifier);
5935     }
5936 #endif
5937   }
5938 
5939   double elapsed = os::elapsedTime() - start;
5940   g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
5941 }
5942 
5943 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
5944   size_t pre_used = 0;
5945   FreeRegionList local_free_list("Local List for CSet Freeing");
5946 
5947   double young_time_ms     = 0.0;
5948   double non_young_time_ms = 0.0;
5949 
5950   // Since the collection set is a superset of the the young list,
5951   // all we need to do to clear the young list is clear its
5952   // head and length, and unlink any young regions in the code below
5953   _young_list->clear();
5954 
5955   G1CollectorPolicy* policy = g1_policy();
5956 
5957   double start_sec = os::elapsedTime();
5958   bool non_young = true;
5959 
5960   HeapRegion* cur = cs_head;
5961   int age_bound = -1;
5962   size_t rs_lengths = 0;
5963 


6009 
6010     assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
6011             (!cur->is_young() && cur->young_index_in_cset() == -1),
6012             "invariant" );
6013 
6014     if (!cur->evacuation_failed()) {
6015       MemRegion used_mr = cur->used_region();
6016 
6017       // And the region is empty.
6018       assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
6019       free_region(cur, &pre_used, &local_free_list, false /* par */);
6020     } else {
6021       cur->uninstall_surv_rate_group();
6022       if (cur->is_young()) {
6023         cur->set_young_index_in_cset(-1);
6024       }
6025       cur->set_not_young();
6026       cur->set_evacuation_failed(false);
6027       // The region is now considered to be old.
6028       _old_set.add(cur);

6029     }
6030     cur = next;
6031   }
6032 

6033   policy->record_max_rs_lengths(rs_lengths);
6034   policy->cset_regions_freed();
6035 
6036   double end_sec = os::elapsedTime();
6037   double elapsed_ms = (end_sec - start_sec) * 1000.0;
6038 
6039   if (non_young) {
6040     non_young_time_ms += elapsed_ms;
6041   } else {
6042     young_time_ms += elapsed_ms;
6043   }
6044 
6045   update_sets_after_freeing_regions(pre_used, &local_free_list,
6046                                     NULL /* old_proxy_set */,
6047                                     NULL /* humongous_proxy_set */,
6048                                     false /* par */);
6049   policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
6050   policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
6051 }
6052 




  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/icBuffer.hpp"
  27 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  28 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  33 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  34 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  35 #include "gc_implementation/g1/g1EvacFailure.hpp"
  36 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  37 #include "gc_implementation/g1/g1Log.hpp"
  38 #include "gc_implementation/g1/g1MarkSweep.hpp"
  39 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  40 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  41 #include "gc_implementation/g1/g1YCTypes.hpp"
  42 #include "gc_implementation/g1/heapRegion.inline.hpp"
  43 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  44 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  45 #include "gc_implementation/g1/vm_operations_g1.hpp"
  46 #include "gc_implementation/shared/gcHeapSummary.hpp"
  47 #include "gc_implementation/shared/gcTimer.hpp"
  48 #include "gc_implementation/shared/gcTrace.hpp"
  49 #include "gc_implementation/shared/gcTraceTime.hpp"
  50 #include "gc_implementation/shared/isGCActiveMark.hpp"
  51 #include "memory/gcLocker.inline.hpp"
  52 #include "memory/genOopClosures.inline.hpp"
  53 #include "memory/generationSpec.hpp"
  54 #include "memory/referenceProcessor.hpp"
  55 #include "oops/oop.inline.hpp"
  56 #include "oops/oop.pcgc.inline.hpp"
  57 #include "runtime/aprofiler.hpp"
  58 #include "runtime/vmThread.hpp"
  59 
  60 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  61 
  62 // turn it on so that the contents of the young list (scan-only /
  63 // to-be-collected) are printed at "strategic" points before / during
  64 // / after the collection --- this is useful for debugging
  65 #define YOUNG_LIST_VERBOSE 0
  66 // CURRENT STATUS
  67 // This file is under construction.  Search for "FIXME".
  68 
  69 // INVARIANTS/NOTES
  70 //
  71 // All allocation activity covered by the G1CollectedHeap interface is
  72 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  73 // and allocate_new_tlab, which are the "entry" points to the
  74 // allocation code from the rest of the JVM.  (Note that this does not
  75 // apply to TLAB allocation, which is not part of this interface: it
  76 // is done by clients of this interface.)
  77 
  78 // Notes on implementation of parallelism in different tasks.
  79 //
  80 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
  81 // The number of GC workers is passed to heap_region_par_iterate_chunked().
  82 // It does use run_task() which sets _n_workers in the task.
  83 // G1ParTask executes g1_process_strong_roots() ->
  84 // SharedHeap::process_strong_roots() which calls eventually to
  85 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
  86 // SequentialSubTasksDone.  SharedHeap::process_strong_roots() also
  87 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
  88 //
  89 
  90 // Local to this file.
  91 
  92 class RefineCardTableEntryClosure: public CardTableEntryClosure {
  93   SuspendibleThreadSet* _sts;
  94   G1RemSet* _g1rs;
  95   ConcurrentG1Refine* _cg1r;
  96   bool _concurrent;
  97 public:
  98   RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
  99                               G1RemSet* g1rs,
 100                               ConcurrentG1Refine* cg1r) :
 101     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
 102   {}
 103   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
 104     bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);


 445 }
 446 
 447 #ifdef ASSERT
 448 // A region is added to the collection set as it is retired
 449 // so an address p can point to a region which will be in the
 450 // collection set but has not yet been retired.  This method
 451 // therefore is only accurate during a GC pause after all
 452 // regions have been retired.  It is used for debugging
 453 // to check if an nmethod has references to objects that can
 454 // be move during a partial collection.  Though it can be
 455 // inaccurate, it is sufficient for G1 because the conservative
 456 // implementation of is_scavengable() for G1 will indicate that
 457 // all nmethods must be scanned during a partial collection.
 458 bool G1CollectedHeap::is_in_partial_collection(const void* p) {
 459   HeapRegion* hr = heap_region_containing(p);
 460   return hr != NULL && hr->in_collection_set();
 461 }
 462 #endif
 463 
 464 // Returns true if the reference points to an object that
 465 // can move in an incremental collection.
 466 bool G1CollectedHeap::is_scavengable(const void* p) {
 467   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 468   G1CollectorPolicy* g1p = g1h->g1_policy();
 469   HeapRegion* hr = heap_region_containing(p);
 470   if (hr == NULL) {
 471      // null
 472      assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p));
 473      return false;
 474   } else {
 475     return !hr->isHumongous();
 476   }
 477 }
 478 
 479 void G1CollectedHeap::check_ct_logs_at_safepoint() {
 480   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
 481   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
 482 
 483   // Count the dirty cards at the start.
 484   CountNonCleanMemRegionClosure count1(this);
 485   ct_bs->mod_card_iterate(&count1);


 536         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 537                                "secondary_free_list has %u entries",
 538                                _secondary_free_list.length());
 539       }
 540       // It looks as if there are free regions available on the
 541       // secondary_free_list. Let's move them to the free_list and try
 542       // again to allocate from it.
 543       append_secondary_free_list();
 544 
 545       assert(!_free_list.is_empty(), "if the secondary_free_list was not "
 546              "empty we should have moved at least one entry to the free_list");
 547       HeapRegion* res = _free_list.remove_head();
 548       if (G1ConcRegionFreeingVerbose) {
 549         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 550                                "allocated "HR_FORMAT" from secondary_free_list",
 551                                HR_FORMAT_PARAMS(res));
 552       }
 553       return res;
 554     }
 555 
 556     // Wait here until we get notified either when (a) there are no
 557     // more free regions coming or (b) some regions have been moved on
 558     // the secondary_free_list.
 559     SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
 560   }
 561 
 562   if (G1ConcRegionFreeingVerbose) {
 563     gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 564                            "could not allocate from secondary_free_list");
 565   }
 566   return NULL;
 567 }
 568 
 569 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
 570   assert(!isHumongous(word_size) || word_size <= HeapRegion::GrainWords,
 571          "the only time we use this to allocate a humongous region is "
 572          "when we are allocating a single humongous region");
 573 
 574   HeapRegion* res;
 575   if (G1StressConcRegionFreeing) {
 576     if (!_secondary_free_list.is_empty()) {


 611       // it would probably be OK to use remove_head(). But the extra
 612       // check for NULL is unlikely to be a performance issue here (we
 613       // just expanded the heap!) so let's just be conservative and
 614       // use remove_head_or_null().
 615       res = _free_list.remove_head_or_null();
 616     } else {
 617       _expand_heap_after_alloc_failure = false;
 618     }
 619   }
 620   return res;
 621 }
 622 
 623 uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
 624                                                         size_t word_size) {
 625   assert(isHumongous(word_size), "word_size should be humongous");
 626   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 627 
 628   uint first = G1_NULL_HRS_INDEX;
 629   if (num_regions == 1) {
 630     // Only one region to allocate, no need to go through the slower
 631     // path. The caller will attempt the expansion if this fails, so
 632     // let's not try to expand here too.
 633     HeapRegion* hr = new_region(word_size, false /* do_expand */);
 634     if (hr != NULL) {
 635       first = hr->hrs_index();
 636     } else {
 637       first = G1_NULL_HRS_INDEX;
 638     }
 639   } else {
 640     // We can't allocate humongous regions while cleanupComplete() is
 641     // running, since some of the regions we find to be empty might not
 642     // yet be added to the free list and it is not straightforward to
 643     // know which list they are on so that we can remove them. Note
 644     // that we only need to do this if we need to allocate more than
 645     // one region to satisfy the current humongous allocation
 646     // request. If we are only allocating one region we use the common
 647     // region allocation code (see above).
 648     wait_while_free_regions_coming();
 649     append_secondary_free_list_if_not_empty_with_lock();
 650 
 651     if (free_regions() >= num_regions) {


 676   uint last = first + num_regions;
 677 
 678   // We need to initialize the region(s) we just discovered. This is
 679   // a bit tricky given that it can happen concurrently with
 680   // refinement threads refining cards on these regions and
 681   // potentially wanting to refine the BOT as they are scanning
 682   // those cards (this can happen shortly after a cleanup; see CR
 683   // 6991377). So we have to set up the region(s) carefully and in
 684   // a specific order.
 685 
 686   // The word size sum of all the regions we will allocate.
 687   size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
 688   assert(word_size <= word_size_sum, "sanity");
 689 
 690   // This will be the "starts humongous" region.
 691   HeapRegion* first_hr = region_at(first);
 692   // The header of the new object will be placed at the bottom of
 693   // the first region.
 694   HeapWord* new_obj = first_hr->bottom();
 695   // This will be the new end of the first region in the series that
 696   // should also match the end of the last region in the series.
 697   HeapWord* new_end = new_obj + word_size_sum;
 698   // This will be the new top of the first region that will reflect
 699   // this allocation.
 700   HeapWord* new_top = new_obj + word_size;
 701 
 702   // First, we need to zero the header of the space that we will be
 703   // allocating. When we update top further down, some refinement
 704   // threads might try to scan the region. By zeroing the header we
 705   // ensure that any thread that will try to scan the region will
 706   // come across the zero klass word and bail out.
 707   //
 708   // NOTE: It would not have been correct to have used
 709   // CollectedHeap::fill_with_object() and make the space look like
 710   // an int array. The thread that is doing the allocation will
 711   // later update the object header to a potentially different array
 712   // type and, for a very short period of time, the klass and length
 713   // fields will be inconsistent. This could cause a refinement
 714   // thread to calculate the object size incorrectly.
 715   Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
 716 


 851 
 852   verify_region_sets_optional();
 853 
 854   return result;
 855 }
 856 
 857 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
 858   assert_heap_not_locked_and_not_at_safepoint();
 859   assert(!isHumongous(word_size), "we do not allow humongous TLABs");
 860 
 861   unsigned int dummy_gc_count_before;
 862   int dummy_gclocker_retry_count = 0;
 863   return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
 864 }
 865 
 866 HeapWord*
 867 G1CollectedHeap::mem_allocate(size_t word_size,
 868                               bool*  gc_overhead_limit_was_exceeded) {
 869   assert_heap_not_locked_and_not_at_safepoint();
 870 
 871   // Loop until the allocation is satisfied, or unsatisfied after GC.
 872   for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
 873     unsigned int gc_count_before;
 874 
 875     HeapWord* result = NULL;
 876     if (!isHumongous(word_size)) {
 877       result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
 878     } else {
 879       result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
 880     }
 881     if (result != NULL) {
 882       return result;
 883     }
 884 
 885     // Create the garbage collection operation...
 886     VM_G1CollectForAllocation op(gc_count_before, word_size);
 887     // ...and get the VM thread to execute it.
 888     VMThread::execute(&op);
 889 
 890     if (op.prologue_succeeded() && op.pause_succeeded()) {
 891       // If the operation was successful we'll return the result even


 991         // If we get here we successfully scheduled a collection which
 992         // failed to allocate. No point in trying to allocate
 993         // further. We'll just return NULL.
 994         MutexLockerEx x(Heap_lock);
 995         *gc_count_before_ret = total_collections();
 996         return NULL;
 997       }
 998     } else {
 999       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
1000         MutexLockerEx x(Heap_lock);
1001         *gc_count_before_ret = total_collections();
1002         return NULL;
1003       }
1004       // The GCLocker is either active or the GCLocker initiated
1005       // GC has not yet been performed. Stall until it is and
1006       // then retry the allocation.
1007       GC_locker::stall_until_clear();
1008       (*gclocker_retry_count_ret) += 1;
1009     }
1010 
1011     // We can reach here if we were unsuccessful in scheduling a
1012     // collection (because another thread beat us to it) or if we were
1013     // stalled due to the GC locker. In either can we should retry the
1014     // allocation attempt in case another thread successfully
1015     // performed a collection and reclaimed enough space. We do the
1016     // first attempt (without holding the Heap_lock) here and the
1017     // follow-on attempt will be at the start of the next loop
1018     // iteration (after taking the Heap_lock).
1019     result = _mutator_alloc_region.attempt_allocation(word_size,
1020                                                       false /* bot_updates */);
1021     if (result != NULL) {
1022       return result;
1023     }
1024 
1025     // Give a warning if we seem to be looping forever.
1026     if ((QueuedAllocationWarningCount > 0) &&
1027         (try_count % QueuedAllocationWarningCount == 0)) {
1028       warning("G1CollectedHeap::attempt_allocation_slow() "
1029               "retries %d times", try_count);
1030     }
1031   }


1116         // If we get here we successfully scheduled a collection which
1117         // failed to allocate. No point in trying to allocate
1118         // further. We'll just return NULL.
1119         MutexLockerEx x(Heap_lock);
1120         *gc_count_before_ret = total_collections();
1121         return NULL;
1122       }
1123     } else {
1124       if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
1125         MutexLockerEx x(Heap_lock);
1126         *gc_count_before_ret = total_collections();
1127         return NULL;
1128       }
1129       // The GCLocker is either active or the GCLocker initiated
1130       // GC has not yet been performed. Stall until it is and
1131       // then retry the allocation.
1132       GC_locker::stall_until_clear();
1133       (*gclocker_retry_count_ret) += 1;
1134     }
1135 
1136     // We can reach here if we were unsuccessful in scheduling a
1137     // collection (because another thread beat us to it) or if we were
1138     // stalled due to the GC locker. In either can we should retry the
1139     // allocation attempt in case another thread successfully
1140     // performed a collection and reclaimed enough space.  Give a
1141     // warning if we seem to be looping forever.
1142 
1143     if ((QueuedAllocationWarningCount > 0) &&
1144         (try_count % QueuedAllocationWarningCount == 0)) {
1145       warning("G1CollectedHeap::attempt_allocation_humongous() "
1146               "retries %d times", try_count);
1147     }
1148   }
1149 
1150   ShouldNotReachHere();
1151   return NULL;
1152 }
1153 
1154 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1155                                        bool expect_null_mutator_alloc_region) {
1156   assert_at_safepoint(true /* should_be_vm_thread */);


1286 
1287 void G1CollectedHeap::verify_before_gc() {
1288   double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:");
1289   g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
1290 }
1291 
1292 void G1CollectedHeap::verify_after_gc() {
1293   double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
1294   g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
1295 }
1296 
1297 bool G1CollectedHeap::do_collection(bool explicit_gc,
1298                                     bool clear_all_soft_refs,
1299                                     size_t word_size) {
1300   assert_at_safepoint(true /* should_be_vm_thread */);
1301 
1302   if (GC_locker::check_active_before_gc()) {
1303     return false;
1304   }
1305 
1306   STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1307   gc_timer->register_gc_start(os::elapsed_counter());
1308 
1309   SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1310   gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1311 
1312   SvcGCMarker sgcm(SvcGCMarker::FULL);
1313   ResourceMark rm;
1314 
1315   print_heap_before_gc();
1316   trace_heap_before_gc(gc_tracer);
1317 
1318   size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
1319 
1320   HRSPhaseSetter x(HRSPhaseFullGC);
1321   verify_region_sets_optional();
1322 
1323   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1324                            collector_policy()->should_clear_all_soft_refs();
1325 
1326   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1327 
1328   {
1329     IsGCActiveMark x;
1330 
1331     // Timing
1332     assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
1333     gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
1334     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
1335 
1336     {
1337       GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL);
1338       TraceCollectorStats tcs(g1mm()->full_collection_counters());
1339       TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1340 
1341       double start = os::elapsedTime();
1342       g1_policy()->record_full_collection_start();
1343 
1344       // Note: When we have a more flexible GC logging framework that
1345       // allows us to add optional attributes to a GC log record we
1346       // could consider timing and reporting how long we wait in the
1347       // following two methods.
1348       wait_while_free_regions_coming();
1349       // If we start the compaction before the CM threads finish
1350       // scanning the root regions we might trip them over as we'll
1351       // be moving objects / updating references. So let's wait until
1352       // they are done. By telling them to abort, they should complete
1353       // early.
1354       _cm->root_regions()->abort();
1355       _cm->root_regions()->wait_until_scan_finished();
1356       append_secondary_free_list_if_not_empty_with_lock();
1357 
1358       gc_prologue(true);
1359       increment_total_collections(true /* full gc */);
1360       increment_old_marking_cycles_started();
1361 
1362       assert(used() == recalculate_used(), "Should be equal");
1363 
1364       verify_before_gc();
1365 
1366       pre_full_gc_dump(gc_timer);
1367 
1368       COMPILER2_PRESENT(DerivedPointerTable::clear());
1369 
1370       // Disable discovery and empty the discovered lists
1371       // for the CM ref processor.
1372       ref_processor_cm()->disable_discovery();
1373       ref_processor_cm()->abandon_partial_discovery();
1374       ref_processor_cm()->verify_no_references_recorded();
1375 
1376       // Abandon current iterations of concurrent marking and concurrent
1377       // refinement, if any are in progress. We have to do this before
1378       // wait_until_scan_finished() below.
1379       concurrent_mark()->abort();
1380 
1381       // Make sure we'll choose a new allocation region afterwards.
1382       release_mutator_alloc_region();
1383       abandon_gc_alloc_regions();
1384       g1_rem_set()->cleanupHRRS();
1385 
1386       // We should call this after we retire any currently active alloc


1430       MemoryService::track_memory_usage();
1431 
1432       verify_after_gc();
1433 
1434       assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1435       ref_processor_stw()->verify_no_references_recorded();
1436 
1437       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1438       ClassLoaderDataGraph::purge();
1439     MetaspaceAux::verify_metrics();
1440 
1441       // Note: since we've just done a full GC, concurrent
1442       // marking is no longer active. Therefore we need not
1443       // re-enable reference discovery for the CM ref processor.
1444       // That will be done at the start of the next marking cycle.
1445       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1446       ref_processor_cm()->verify_no_references_recorded();
1447 
1448       reset_gc_time_stamp();
1449       // Since everything potentially moved, we will clear all remembered
1450       // sets, and clear all cards.  Later we will rebuild remembered
1451       // sets. We will also reset the GC time stamps of the regions.
1452       clear_rsets_post_compaction();
1453       check_gc_time_stamps();
1454 
1455       // Resize the heap if necessary.
1456       resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1457 
1458       if (_hr_printer.is_active()) {
1459         // We should do this after we potentially resize the heap so
1460         // that all the COMMIT / UNCOMMIT events are generated before
1461         // the end GC event.
1462 
1463         print_hrs_post_compaction();
1464         _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1465       }
1466 
1467       G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1468       if (hot_card_cache->use_cache()) {
1469         hot_card_cache->reset_card_counts();
1470         hot_card_cache->reset_hot_cache();


1548       g1_policy()->record_full_collection_end();
1549 
1550       if (G1Log::fine()) {
1551         g1_policy()->print_heap_transition();
1552       }
1553 
1554       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1555       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1556       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1557       // before any GC notifications are raised.
1558       g1mm()->update_sizes();
1559 
1560       gc_epilogue(true);
1561     }
1562 
1563     if (G1Log::finer()) {
1564       g1_policy()->print_detailed_heap_transition(true /* full */);
1565     }
1566 
1567     print_heap_after_gc();
1568     trace_heap_after_gc(gc_tracer);
1569 
1570     post_full_gc_dump(gc_timer);
1571 
1572     gc_timer->register_gc_end(os::elapsed_counter());
1573     gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1574   }
1575 
1576   return true;
1577 }
1578 
1579 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1580   // do_collection() will return whether it succeeded in performing
1581   // the GC. Currently, there is no facility on the
1582   // do_full_collection() API to notify the caller than the collection
1583   // did not succeed (e.g., because it was locked out by the GC
1584   // locker). So, right now, we'll ignore the return value.
1585   bool dummy = do_collection(true,                /* explicit_gc */
1586                              clear_all_soft_refs,
1587                              0                    /* word_size */);
1588 }
1589 
1590 // This code is mostly copied from TenuredGeneration.
1591 void
1592 G1CollectedHeap::
1593 resize_if_necessary_after_full_collection(size_t word_size) {


1918 }
1919 
1920 // Public methods.
1921 
1922 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1923 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1924 #endif // _MSC_VER
1925 
1926 
1927 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1928   SharedHeap(policy_),
1929   _g1_policy(policy_),
1930   _dirty_card_queue_set(false),
1931   _into_cset_dirty_card_queue_set(false),
1932   _is_alive_closure_cm(this),
1933   _is_alive_closure_stw(this),
1934   _ref_processor_cm(NULL),
1935   _ref_processor_stw(NULL),
1936   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1937   _bot_shared(NULL),
1938   _evac_failure_scan_stack(NULL),
1939   _mark_in_progress(false),
1940   _cg1r(NULL), _summary_bytes_used(0),
1941   _g1mm(NULL),
1942   _refine_cte_cl(NULL),
1943   _full_collection(false),
1944   _free_list("Master Free List"),
1945   _secondary_free_list("Secondary Free List"),
1946   _old_set("Old Set"),
1947   _humongous_set("Master Humongous Set"),
1948   _free_regions_coming(false),
1949   _young_list(new YoungList(this)),
1950   _gc_time_stamp(0),
1951   _retained_old_gc_alloc_region(NULL),
1952   _survivor_plab_stats(YoungPLABSize, PLABWeight),
1953   _old_plab_stats(OldPLABSize, PLABWeight),
1954   _expand_heap_after_alloc_failure(true),
1955   _surviving_young_words(NULL),
1956   _old_marking_cycles_started(0),
1957   _old_marking_cycles_completed(0),
1958   _concurrent_cycle_started(false),
1959   _in_cset_fast_test(NULL),
1960   _in_cset_fast_test_base(NULL),
1961   _dirty_cards_region_list(NULL),
1962   _worker_cset_start_region(NULL),
1963   _worker_cset_start_region_time_stamp(NULL),
1964   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1965   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1966   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1967   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1968 
1969   _g1h = this;
1970   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1971     vm_exit_during_initialization("Failed necessary allocation.");
1972   }
1973 
1974   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1975 
1976   int n_queues = MAX2((int)ParallelGCThreads, 1);
1977   _task_queues = new RefToScanQueueSet(n_queues);
1978 
1979   int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1980   assert(n_rem_sets > 0, "Invariant.");
1981 
1982   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1983   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
1984   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1985 
1986   for (int i = 0; i < n_queues; i++) {
1987     RefToScanQueue* q = new RefToScanQueue();
1988     q->initialize();
1989     _task_queues->register_queue(i, q);
1990     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1991   }

1992   clear_cset_start_regions();
1993 
1994   // Initialize the G1EvacuationFailureALot counters and flags.
1995   NOT_PRODUCT(reset_evacuation_should_fail();)
1996 
1997   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1998 }
1999 
2000 jint G1CollectedHeap::initialize() {
2001   CollectedHeap::pre_initialize();
2002   os::enable_vtime();
2003 
2004   G1Log::init();
2005 
2006   // Necessary to satisfy locking discipline assertions.
2007 
2008   MutexLocker x(Heap_lock);
2009 
2010   // We have to initialize the printer before committing the heap, as
2011   // it will be used then.


2031 
2032   // When compressed oops are enabled, the preferred heap base
2033   // is calculated by subtracting the requested size from the
2034   // 32Gb boundary and using the result as the base address for
2035   // heap reservation. If the requested size is not aligned to
2036   // HeapRegion::GrainBytes (i.e. the alignment that is passed
2037   // into the ReservedHeapSpace constructor) then the actual
2038   // base of the reserved heap may end up differing from the
2039   // address that was requested (i.e. the preferred heap base).
2040   // If this happens then we could end up using a non-optimal
2041   // compressed oops mode.
2042 
2043   // Since max_byte_size is aligned to the size of a heap region (checked
2044   // above).
2045   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
2046 
2047   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
2048                                                  HeapRegion::GrainBytes);
2049 
2050   // It is important to do this in a way such that concurrent readers can't
2051   // temporarily think something is in the heap.  (I've actually seen this
2052   // happen in asserts: DLD.)
2053   _reserved.set_word_size(0);
2054   _reserved.set_start((HeapWord*)heap_rs.base());
2055   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
2056 
2057   _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
2058 
2059   // Create the gen rem set (and barrier set) for the entire reserved region.
2060   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
2061   set_barrier_set(rem_set()->bs());
2062   if (barrier_set()->is_a(BarrierSet::ModRef)) {
2063     _mr_bs = (ModRefBarrierSet*)_barrier_set;
2064   } else {
2065     vm_exit_during_initialization("G1 requires a mod ref bs.");
2066     return JNI_ENOMEM;
2067   }
2068 
2069   // Also create a G1 rem set.
2070   if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
2071     _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());


2468   assert(concurrent ||
2469          (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
2470          (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
2471          err_msg("for inner caller (Full GC): _old_marking_cycles_started = %u "
2472                  "is inconsistent with _old_marking_cycles_completed = %u",
2473                  _old_marking_cycles_started, _old_marking_cycles_completed));
2474 
2475   // This is the case for the outer caller, i.e. the concurrent cycle.
2476   assert(!concurrent ||
2477          (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
2478          err_msg("for outer caller (concurrent cycle): "
2479                  "_old_marking_cycles_started = %u "
2480                  "is inconsistent with _old_marking_cycles_completed = %u",
2481                  _old_marking_cycles_started, _old_marking_cycles_completed));
2482 
2483   _old_marking_cycles_completed += 1;
2484 
2485   // We need to clear the "in_progress" flag in the CM thread before
2486   // we wake up any waiters (especially when ExplicitInvokesConcurrent
2487   // is set) so that if a waiter requests another System.gc() it doesn't
2488   // incorrectly see that a marking cycle is still in progress.
2489   if (concurrent) {
2490     _cmThread->clear_in_progress();
2491   }
2492 
2493   // This notify_all() will ensure that a thread that called
2494   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2495   // and it's waiting for a full GC to finish will be woken up. It is
2496   // waiting in VM_G1IncCollectionPause::doit_epilogue().
2497   FullGCCount_lock->notify_all();
2498 }
2499 
2500 void G1CollectedHeap::register_concurrent_cycle_start(jlong start_time) {
2501   _concurrent_cycle_started = true;
2502   _gc_timer_cm->register_gc_start(start_time);
2503 
2504   _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
2505   trace_heap_before_gc(_gc_tracer_cm);
2506 }
2507 
2508 void G1CollectedHeap::register_concurrent_cycle_end() {
2509   if (_concurrent_cycle_started) {
2510     _gc_timer_cm->register_gc_end(os::elapsed_counter());
2511 
2512     if (_cm->has_aborted()) {
2513       _gc_tracer_cm->report_concurrent_mode_failure();
2514     }
2515     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2516 
2517     _concurrent_cycle_started = false;
2518   }
2519 }
2520 
2521 void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
2522   if (_concurrent_cycle_started) {
2523     trace_heap_after_gc(_gc_tracer_cm);
2524   }
2525 }
2526 
2527 G1YCType G1CollectedHeap::yc_type() {
2528   bool is_young = g1_policy()->gcs_are_young();
2529   bool is_initial_mark = g1_policy()->during_initial_mark_pause();
2530   bool is_during_mark = mark_in_progress();
2531 
2532   if (is_initial_mark) {
2533     return InitialMark;
2534   } else if (is_during_mark) {
2535     return DuringMark;
2536   } else if (is_young) {
2537     return Normal;
2538   } else {
2539     return Mixed;
2540   }
2541 }
2542 
2543 void G1CollectedHeap::collect(GCCause::Cause cause) {
2544   assert_heap_not_locked();
2545 
2546   unsigned int gc_count_before;
2547   unsigned int old_marking_count_before;
2548   bool retry_gc;
2549 
2550   do {
2551     retry_gc = false;
2552 
2553     {
2554       MutexLocker ml(Heap_lock);
2555 
2556       // Read the GC count while holding the Heap_lock
2557       gc_count_before = total_collections();
2558       old_marking_count_before = _old_marking_cycles_started;
2559     }
2560 
2561     if (should_do_concurrent_full_gc(cause)) {
2562       // Schedule an initial-mark evacuation pause that will start a


2725       assert(!r->continuesHumongous(), "sanity");
2726       if (r->startsHumongous()) {
2727         // If the region is "starts humongous" we'll iterate over its
2728         // "continues humongous" first; in fact we'll do them
2729         // first. The order is important. In on case, calling the
2730         // closure on the "starts humongous" region might de-allocate
2731         // and clear all its "continues humongous" regions and, as a
2732         // result, we might end up processing them twice. So, we'll do
2733         // them first (notice: most closures will ignore them anyway) and
2734         // then we'll do the "starts humongous" region.
2735         for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
2736           HeapRegion* chr = region_at(ch_index);
2737 
2738           // if the region has already been claimed or it's not
2739           // "continues humongous" we're done
2740           if (chr->claim_value() == claim_value ||
2741               !chr->continuesHumongous()) {
2742             break;
2743           }
2744 
2745           // No one should have claimed it directly. We can given
2746           // that we claimed its "starts humongous" region.
2747           assert(chr->claim_value() != claim_value, "sanity");
2748           assert(chr->humongous_start_region() == r, "sanity");
2749 
2750           if (chr->claimHeapRegion(claim_value)) {
2751             // we should always be able to claim it; no one else should
2752             // be trying to claim this region
2753 
2754             bool res2 = cl->doHeapRegion(chr);
2755             assert(!res2, "Should not abort");
2756 
2757             // Right now, this holds (i.e., no closure that actually
2758             // does something with "continues humongous" regions
2759             // clears them). We might have to weaken it in the future,
2760             // but let's leave these two asserts here for extra safety.
2761             assert(chr->continuesHumongous(), "should still be the case");
2762             assert(chr->humongous_start_region() == r, "sanity");
2763           } else {
2764             guarantee(false, "we should not reach here");
2765           }
2766         }
2767       }
2768 
2769       assert(!r->continuesHumongous(), "sanity");
2770       bool res = cl->doHeapRegion(r);
2771       assert(!res, "Should not abort");


3025 }
3026 
3027 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
3028   Space* sp = space_containing(addr);
3029   return sp->block_is_obj(addr);
3030 }
3031 
3032 bool G1CollectedHeap::supports_tlab_allocation() const {
3033   return true;
3034 }
3035 
3036 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
3037   return HeapRegion::GrainBytes;
3038 }
3039 
3040 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
3041   // Return the remaining space in the cur alloc region, but not less than
3042   // the min TLAB size.
3043 
3044   // Also, this value can be at most the humongous object threshold,
3045   // since we can't allow tlabs to grow big enough to accommodate
3046   // humongous objects.
3047 
3048   HeapRegion* hr = _mutator_alloc_region.get();
3049   size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
3050   if (hr == NULL) {
3051     return max_tlab_size;
3052   } else {
3053     return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size);
3054   }
3055 }
3056 
3057 size_t G1CollectedHeap::max_capacity() const {
3058   return _g1_reserved.byte_size();
3059 }
3060 
3061 jlong G1CollectedHeap::millis_since_last_gc() {
3062   // assert(false, "NYI");
3063   return 0;
3064 }
3065 


3784     g1_policy()->print_detailed_heap_transition();
3785   } else {
3786     if (evacuation_failed()) {
3787       gclog_or_tty->print("--");
3788     }
3789     g1_policy()->print_heap_transition();
3790     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3791   }
3792   gclog_or_tty->flush();
3793 }
3794 
3795 bool
3796 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3797   assert_at_safepoint(true /* should_be_vm_thread */);
3798   guarantee(!is_gc_active(), "collection is not reentrant");
3799 
3800   if (GC_locker::check_active_before_gc()) {
3801     return false;
3802   }
3803 
3804   _gc_timer_stw->register_gc_start(os::elapsed_counter());
3805 
3806   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3807 
3808   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3809   ResourceMark rm;
3810 
3811   print_heap_before_gc();
3812   trace_heap_before_gc(_gc_tracer_stw);
3813 
3814   HRSPhaseSetter x(HRSPhaseEvacuation);
3815   verify_region_sets_optional();
3816   verify_dirty_young_regions();
3817 
3818   // This call will decide whether this pause is an initial-mark
3819   // pause. If it is, during_initial_mark_pause() will return true
3820   // for the duration of this pause.
3821   g1_policy()->decide_on_conc_mark_initiation();
3822 
3823   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3824   assert(!g1_policy()->during_initial_mark_pause() ||
3825           g1_policy()->gcs_are_young(), "sanity");
3826 
3827   // We also do not allow mixed GCs during marking.
3828   assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
3829 
3830   // Record whether this pause is an initial mark. When the current
3831   // thread has completed its logging output and it's safe to signal
3832   // the CM thread, the flag's value in the policy has been reset.
3833   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3834 
3835   // Inner scope for scope based logging, timers, and stats collection
3836   {
3837     EvacuationInfo evacuation_info;
3838 
3839     if (g1_policy()->during_initial_mark_pause()) {
3840       // We are about to start a marking cycle, so we increment the
3841       // full collection counter.
3842       increment_old_marking_cycles_started();
3843       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3844     }
3845 
3846     _gc_tracer_stw->report_yc_type(yc_type());
3847 
3848     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3849 
3850     int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3851                                 workers()->active_workers() : 1);
3852     double pause_start_sec = os::elapsedTime();
3853     g1_policy()->phase_times()->note_gc_start(active_workers);
3854     log_gc_header();
3855 
3856     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3857     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3858 
3859     // If the secondary_free_list is not empty, append it to the
3860     // free_list. No need to wait for the cleanup operation to finish;
3861     // the region allocation code will check the secondary_free_list
3862     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3863     // set, skip this step so that the region allocation code has to
3864     // get entries from the secondary_free_list.
3865     if (!G1StressConcRegionFreeing) {
3866       append_secondary_free_list_if_not_empty_with_lock();
3867     }


3937           double scan_wait_end = os::elapsedTime();
3938           wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3939         }
3940         g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3941 
3942 #if YOUNG_LIST_VERBOSE
3943         gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
3944         _young_list->print();
3945 #endif // YOUNG_LIST_VERBOSE
3946 
3947         if (g1_policy()->during_initial_mark_pause()) {
3948           concurrent_mark()->checkpointRootsInitialPre();
3949         }
3950 
3951 #if YOUNG_LIST_VERBOSE
3952         gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
3953         _young_list->print();
3954         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3955 #endif // YOUNG_LIST_VERBOSE
3956 
3957         g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
3958 
3959         _cm->note_start_of_gc();
3960         // We should not verify the per-thread SATB buffers given that
3961         // we have not filtered them yet (we'll do so during the
3962         // GC). We also call this after finalize_cset() to
3963         // ensure that the CSet has been finalized.
3964         _cm->verify_no_cset_oops(true  /* verify_stacks */,
3965                                  true  /* verify_enqueued_buffers */,
3966                                  false /* verify_thread_buffers */,
3967                                  true  /* verify_fingers */);
3968 
3969         if (_hr_printer.is_active()) {
3970           HeapRegion* hr = g1_policy()->collection_set();
3971           while (hr != NULL) {
3972             G1HRPrinter::RegionType type;
3973             if (!hr->is_young()) {
3974               type = G1HRPrinter::Old;
3975             } else if (hr->is_survivor()) {
3976               type = G1HRPrinter::Survivor;
3977             } else {
3978               type = G1HRPrinter::Eden;
3979             }
3980             _hr_printer.cset(hr);
3981             hr = hr->next_in_collection_set();
3982           }
3983         }
3984 
3985 #ifdef ASSERT
3986         VerifyCSetClosure cl;
3987         collection_set_iterate(&cl);
3988 #endif // ASSERT
3989 
3990         setup_surviving_young_words();
3991 
3992         // Initialize the GC alloc regions.
3993         init_gc_alloc_regions(evacuation_info);
3994 
3995         // Actually do the work...
3996         evacuate_collection_set(evacuation_info);
3997 
3998         // We do this to mainly verify the per-thread SATB buffers
3999         // (which have been filtered by now) since we didn't verify
4000         // them earlier. No point in re-checking the stacks / enqueued
4001         // buffers given that the CSet has not changed since last time
4002         // we checked.
4003         _cm->verify_no_cset_oops(false /* verify_stacks */,
4004                                  false /* verify_enqueued_buffers */,
4005                                  true  /* verify_thread_buffers */,
4006                                  true  /* verify_fingers */);
4007 
4008         free_collection_set(g1_policy()->collection_set(), evacuation_info);
4009         g1_policy()->clear_collection_set();
4010 
4011         cleanup_surviving_young_words();
4012 
4013         // Start a new incremental collection set for the next pause.
4014         g1_policy()->start_incremental_cset_building();
4015 
4016         // Clear the _cset_fast_test bitmap in anticipation of adding
4017         // regions to the incremental collection set for the next
4018         // evacuation pause.
4019         clear_cset_fast_test();
4020 
4021         _young_list->reset_sampled_info();
4022 
4023         // Don't check the whole heap at this point as the
4024         // GC alloc regions from this pause have been tagged
4025         // as survivors and moved on to the survivor list.
4026         // Survivor regions will fail the !is_young() check.
4027         assert(check_young_list_empty(false /* check_heap */),
4028           "young list should be empty");
4029 
4030 #if YOUNG_LIST_VERBOSE
4031         gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
4032         _young_list->print();
4033 #endif // YOUNG_LIST_VERBOSE
4034 
4035         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
4036                                              _young_list->first_survivor_region(),
4037                                              _young_list->last_survivor_region());
4038 
4039         _young_list->reset_auxilary_lists();
4040 
4041         if (evacuation_failed()) {
4042           _summary_bytes_used = recalculate_used();
4043           uint n_queues = MAX2((int)ParallelGCThreads, 1);
4044           for (uint i = 0; i < n_queues; i++) {
4045             if (_evacuation_failed_info_array[i].has_failed()) {
4046               _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
4047             }
4048           }
4049         } else {
4050           // The "used" of the the collection set have already been subtracted
4051           // when they were freed.  Add in the bytes evacuated.
4052           _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
4053         }
4054 
4055         if (g1_policy()->during_initial_mark_pause()) {
4056           // We have to do this before we notify the CM threads that
4057           // they can start working to make sure that all the
4058           // appropriate initialization is done on the CM object.
4059           concurrent_mark()->checkpointRootsInitialPost();
4060           set_marking_started();
4061           // Note that we don't actually trigger the CM thread at
4062           // this point. We do that later when we're sure that
4063           // the current thread has completed its logging output.
4064         }
4065 
4066         allocate_dummy_regions();
4067 
4068 #if YOUNG_LIST_VERBOSE


4071         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
4072 #endif // YOUNG_LIST_VERBOSE
4073 
4074         init_mutator_alloc_region();
4075 
4076         {
4077           size_t expand_bytes = g1_policy()->expansion_amount();
4078           if (expand_bytes > 0) {
4079             size_t bytes_before = capacity();
4080             // No need for an ergo verbose message here,
4081             // expansion_amount() does this when it returns a value > 0.
4082             if (!expand(expand_bytes)) {
4083               // We failed to expand the heap so let's verify that
4084               // committed/uncommitted amount match the backing store
4085               assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
4086               assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
4087             }
4088           }
4089         }
4090 
4091         // We redo the verification but now wrt to the new CSet which
4092         // has just got initialized after the previous CSet was freed.
4093         _cm->verify_no_cset_oops(true  /* verify_stacks */,
4094                                  true  /* verify_enqueued_buffers */,
4095                                  true  /* verify_thread_buffers */,
4096                                  true  /* verify_fingers */);
4097         _cm->note_end_of_gc();
4098 
4099         // This timing is only used by the ergonomics to handle our pause target.
4100         // It is unclear why this should not include the full pause. We will
4101         // investigate this in CR 7178365.
4102         double sample_end_time_sec = os::elapsedTime();
4103         double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
4104         g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info);
4105 
4106         MemoryService::track_memory_usage();
4107 
4108         // In prepare_for_verify() below we'll need to scan the deferred
4109         // update buffers to bring the RSets up-to-date if
4110         // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
4111         // the update buffers we'll probably need to scan cards on the
4112         // regions we just allocated to (i.e., the GC alloc
4113         // regions). However, during the last GC we called
4114         // set_saved_mark() on all the GC alloc regions, so card
4115         // scanning might skip the [saved_mark_word()...top()] area of
4116         // those regions (i.e., the area we allocated objects into
4117         // during the last GC). But it shouldn't. Given that
4118         // saved_mark_word() is conditional on whether the GC time stamp
4119         // on the region is current or not, by incrementing the GC time
4120         // stamp here we invalidate all the GC time stamps on all the
4121         // regions and saved_mark_word() will simply return top() for
4122         // all the regions. This is a nicer way of ensuring this rather
4123         // than iterating over the regions and fixing them. In fact, the
4124         // GC time stamp increment here also ensures that


4151 #endif
4152 
4153       gc_epilogue(false);
4154     }
4155 
4156     // Print the remainder of the GC log output.
4157     log_gc_footer(os::elapsedTime() - pause_start_sec);
4158 
4159     // It is not yet to safe to tell the concurrent mark to
4160     // start as we have some optional output below. We don't want the
4161     // output from the concurrent mark thread interfering with this
4162     // logging output either.
4163 
4164     _hrs.verify_optional();
4165     verify_region_sets_optional();
4166 
4167     TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());
4168     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
4169 
4170     print_heap_after_gc();
4171     trace_heap_after_gc(_gc_tracer_stw);
4172 
4173     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
4174     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
4175     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
4176     // before any GC notifications are raised.
4177     g1mm()->update_sizes();
4178 
4179     _gc_tracer_stw->report_evacuation_info(&evacuation_info);
4180     _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
4181     _gc_timer_stw->register_gc_end(os::elapsed_counter());
4182     _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
4183   }
4184 
4185   if (G1SummarizeRSetStats &&
4186       (G1SummarizeRSetStatsPeriod > 0) &&
4187       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
4188     g1_rem_set()->print_summary_info();
4189   }

4190   // It should now be safe to tell the concurrent mark thread to start
4191   // without its logging output interfering with the logging output
4192   // that came from the pause.
4193 
4194   if (should_start_conc_mark) {
4195     // CAUTION: after the doConcurrentMark() call below,
4196     // the concurrent marking thread(s) could be running
4197     // concurrently with us. Make sure that anything after
4198     // this point does not assume that we are the only GC thread
4199     // running. Note: of course, the actual marking work will
4200     // not start until the safepoint itself is released in
4201     // ConcurrentGCThread::safepoint_desynchronize().
4202     doConcurrentMark();
4203   }
4204 
4205   return true;
4206 }
4207 
4208 size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
4209 {


4221       break;
4222   }
4223 
4224   // Prevent humongous PLAB sizes for two reasons:
4225   // * PLABs are allocated using a similar paths as oops, but should
4226   //   never be in a humongous region
4227   // * Allowing humongous PLABs needlessly churns the region free lists
4228   return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
4229 }
4230 
4231 void G1CollectedHeap::init_mutator_alloc_region() {
4232   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
4233   _mutator_alloc_region.init();
4234 }
4235 
4236 void G1CollectedHeap::release_mutator_alloc_region() {
4237   _mutator_alloc_region.release();
4238   assert(_mutator_alloc_region.get() == NULL, "post-condition");
4239 }
4240 
4241 void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
4242   assert_at_safepoint(true /* should_be_vm_thread */);
4243 
4244   _survivor_gc_alloc_region.init();
4245   _old_gc_alloc_region.init();
4246   HeapRegion* retained_region = _retained_old_gc_alloc_region;
4247   _retained_old_gc_alloc_region = NULL;
4248 
4249   // We will discard the current GC alloc region if:
4250   // a) it's in the collection set (it can happen!),
4251   // b) it's already full (no point in using it),
4252   // c) it's empty (this means that it was emptied during
4253   // a cleanup and it should be on the free list now), or
4254   // d) it's humongous (this means that it was emptied
4255   // during a cleanup and was added to the free list, but
4256   // has been subsequently used to allocate a humongous
4257   // object that may be less than the region size).
4258   if (retained_region != NULL &&
4259       !retained_region->in_collection_set() &&
4260       !(retained_region->top() == retained_region->end()) &&
4261       !retained_region->is_empty() &&
4262       !retained_region->isHumongous()) {
4263     retained_region->set_saved_mark();
4264     // The retained region was added to the old region set when it was
4265     // retired. We have to remove it now, since we don't allow regions
4266     // we allocate to in the region sets. We'll re-add it later, when
4267     // it's retired again.
4268     _old_set.remove(retained_region);
4269     bool during_im = g1_policy()->during_initial_mark_pause();
4270     retained_region->note_start_of_copying(during_im);
4271     _old_gc_alloc_region.set(retained_region);
4272     _hr_printer.reuse(retained_region);
4273     evacuation_info.set_alloc_regions_used_before(retained_region->used());
4274   }
4275 }
4276 
4277 void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
4278   evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
4279                                          _old_gc_alloc_region.count());
4280   _survivor_gc_alloc_region.release();
4281   // If we have an old GC alloc region to release, we'll save it in
4282   // _retained_old_gc_alloc_region. If we don't
4283   // _retained_old_gc_alloc_region will become NULL. This is what we
4284   // want either way so no reason to check explicitly for either
4285   // condition.
4286   _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
4287 
4288   if (ResizePLAB) {
4289     _survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
4290     _old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
4291   }
4292 }
4293 
4294 void G1CollectedHeap::abandon_gc_alloc_regions() {
4295   assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
4296   assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
4297   _retained_old_gc_alloc_region = NULL;
4298 }
4299 


4342   }
4343   _objs_with_preserved_marks.clear(true);
4344   _preserved_marks_of_objs.clear(true);
4345 }
4346 
4347 void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) {
4348   _evac_failure_scan_stack->push(obj);
4349 }
4350 
4351 void G1CollectedHeap::drain_evac_failure_scan_stack() {
4352   assert(_evac_failure_scan_stack != NULL, "precondition");
4353 
4354   while (_evac_failure_scan_stack->length() > 0) {
4355      oop obj = _evac_failure_scan_stack->pop();
4356      _evac_failure_closure->set_region(heap_region_containing(obj));
4357      obj->oop_iterate_backwards(_evac_failure_closure);
4358   }
4359 }
4360 
4361 oop
4362 G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state,
4363                                                oop old) {
4364   assert(obj_in_cs(old),
4365          err_msg("obj: "PTR_FORMAT" should still be in the CSet",
4366                  (HeapWord*) old));
4367   markOop m = old->mark();
4368   oop forward_ptr = old->forward_to_atomic(old);
4369   if (forward_ptr == NULL) {
4370     // Forward-to-self succeeded.
4371     assert(_par_scan_state != NULL, "par scan state");
4372     OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
4373     uint queue_num = _par_scan_state->queue_num();
4374 
4375     _evacuation_failed = true;
4376     _evacuation_failed_info_array[queue_num].register_copy_failure(old->size());
4377     if (_evac_failure_closure != cl) {
4378       MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
4379       assert(!_drain_in_progress,
4380              "Should only be true while someone holds the lock.");
4381       // Set the global evac-failure closure to the current thread's.
4382       assert(_evac_failure_closure == NULL, "Or locking has failed.");
4383       set_evac_failure_closure(cl);
4384       // Now do the common part.
4385       handle_evacuation_failure_common(old, m);
4386       // Reset to NULL.
4387       set_evac_failure_closure(NULL);
4388     } else {
4389       // The lock is already held, and this is recursive.
4390       assert(_drain_in_progress, "This should only be the recursive case.");
4391       handle_evacuation_failure_common(old, m);
4392     }
4393     return old;
4394   } else {
4395     // Forward-to-self failed. Either someone else managed to allocate
4396     // space for this object (old != forward_ptr) or they beat us in
4397     // self-forwarding it (old == forward_ptr).
4398     assert(old == forward_ptr || !obj_in_cs(forward_ptr),
4399            err_msg("obj: "PTR_FORMAT" forwarded to: "PTR_FORMAT" "
4400                    "should not be in the CSet",
4401                    (HeapWord*) old, (HeapWord*) forward_ptr));
4402     return forward_ptr;
4403   }
4404 }
4405 
4406 void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {


4407   preserve_mark_if_necessary(old, m);
4408 
4409   HeapRegion* r = heap_region_containing(old);
4410   if (!r->evacuation_failed()) {
4411     r->set_evacuation_failed(true);
4412     _hr_printer.evac_failure(r);
4413   }
4414 
4415   push_on_evac_failure_scan_stack(old);
4416 
4417   if (!_drain_in_progress) {
4418     // prevent recursion in copy_to_survivor_space()
4419     _drain_in_progress = true;
4420     drain_evac_failure_scan_stack();
4421     _drain_in_progress = false;
4422   }
4423 }
4424 
4425 void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
4426   assert(evacuation_failed(), "Oversaving!");


4636   G1CollectorPolicy* g1p = _g1->g1_policy();
4637   markOop m = old->mark();
4638   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
4639                                            : m->age();
4640   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
4641                                                              word_sz);
4642   HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
4643 #ifndef PRODUCT
4644   // Should this evacuation fail?
4645   if (_g1->evacuation_should_fail()) {
4646     if (obj_ptr != NULL) {
4647       _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
4648       obj_ptr = NULL;
4649     }
4650   }
4651 #endif // !PRODUCT
4652 
4653   if (obj_ptr == NULL) {
4654     // This will either forward-to-self, or detect that someone else has
4655     // installed a forwarding pointer.
4656     return _g1->handle_evacuation_failure_par(_par_scan_state, old);

4657   }
4658 
4659   oop obj = oop(obj_ptr);
4660 
4661   // We're going to allocate linearly, so might as well prefetch ahead.
4662   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
4663 
4664   oop forward_ptr = old->forward_to_atomic(obj);
4665   if (forward_ptr == NULL) {
4666     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
4667     if (g1p->track_object_age(alloc_purpose)) {
4668       // We could simply do obj->incr_age(). However, this causes a
4669       // performance issue. obj->incr_age() will first check whether
4670       // the object has a displaced mark by checking its mark word;
4671       // getting the mark word from the new location of the object
4672       // stalls. So, given that we already have the mark word and we
4673       // are about to install it anyway, it's better to increase the
4674       // age on the mark word, when the object does not have a
4675       // displaced mark word. We're not expecting many objects to have
4676       // a displaced marked word, so that case is not optimized


5240 
5241   template <class T> void do_oop_work(T* p) {
5242     oop obj = oopDesc::load_decode_heap_oop(p);
5243 
5244     if (_g1h->obj_in_cs(obj)) {
5245       // If the referent object has been forwarded (either copied
5246       // to a new location or to itself in the event of an
5247       // evacuation failure) then we need to update the reference
5248       // field and, if both reference and referent are in the G1
5249       // heap, update the RSet for the referent.
5250       //
5251       // If the referent has not been forwarded then we have to keep
5252       // it alive by policy. Therefore we have copy the referent.
5253       //
5254       // If the reference field is in the G1 heap then we can push
5255       // on the PSS queue. When the queue is drained (after each
5256       // phase of reference processing) the object and it's followers
5257       // will be copied, the reference field set to point to the
5258       // new location, and the RSet updated. Otherwise we need to
5259       // use the the non-heap or metadata closures directly to copy
5260       // the referent object and update the pointer, while avoiding
5261       // updating the RSet.
5262 
5263       if (_g1h->is_in_g1_reserved(p)) {
5264         _par_scan_state->push_on_queue(p);
5265       } else {
5266         assert(!ClassLoaderDataGraph::contains((address)p),
5267                err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) "
5268                               PTR_FORMAT, p));
5269           _copy_non_heap_obj_cl->do_oop(p);
5270         }
5271       }
5272     }
5273 };
5274 
5275 // Serial drain queue closure. Called as the 'complete_gc'
5276 // closure for each discovered list in some of the
5277 // reference processing phases.
5278 
5279 class G1STWDrainQueueClosure: public VoidClosure {
5280 protected:


5408   _g1h->set_par_threads(0);
5409 }
5410 
5411 // Gang task for parallel reference enqueueing.
5412 
5413 class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
5414   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5415   EnqueueTask& _enq_task;
5416 
5417 public:
5418   G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
5419     AbstractGangTask("Enqueue reference objects in parallel"),
5420     _enq_task(enq_task)
5421   { }
5422 
5423   virtual void work(uint worker_id) {
5424     _enq_task.work(worker_id);
5425   }
5426 };
5427 
5428 // Driver routine for parallel reference enqueueing.
5429 // Creates an instance of the ref enqueueing gang
5430 // task and has the worker threads execute it.
5431 
5432 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
5433   assert(_workers != NULL, "Need parallel worker threads.");
5434 
5435   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
5436 
5437   _g1h->set_par_threads(_active_workers);
5438   _workers->run_task(&enq_task_proxy);
5439   _g1h->set_par_threads(0);
5440 }
5441 
5442 // End of weak reference support closures
5443 
5444 // Abstract task used to preserve (i.e. copy) any referent objects
5445 // that are in the collection set and are pointed to by reference
5446 // objects discovered by the CM ref processor.
5447 
5448 class G1ParPreserveCMReferentsTask: public AbstractGangTask {


5537   }
5538 };
5539 
5540 // Weak Reference processing during an evacuation pause (part 1).
5541 void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
5542   double ref_proc_start = os::elapsedTime();
5543 
5544   ReferenceProcessor* rp = _ref_processor_stw;
5545   assert(rp->discovery_enabled(), "should have been enabled");
5546 
5547   // Any reference objects, in the collection set, that were 'discovered'
5548   // by the CM ref processor should have already been copied (either by
5549   // applying the external root copy closure to the discovered lists, or
5550   // by following an RSet entry).
5551   //
5552   // But some of the referents, that are in the collection set, that these
5553   // reference objects point to may not have been copied: the STW ref
5554   // processor would have seen that the reference object had already
5555   // been 'discovered' and would have skipped discovering the reference,
5556   // but would not have treated the reference object as a regular oop.
5557   // As a result the copy closure would not have been applied to the
5558   // referent object.
5559   //
5560   // We need to explicitly copy these referent objects - the references
5561   // will be processed at the end of remarking.
5562   //
5563   // We also need to do this copying before we process the reference
5564   // objects discovered by the STW ref processor in case one of these
5565   // referents points to another object which is also referenced by an
5566   // object discovered by the STW ref processor.
5567 
5568   assert(!G1CollectedHeap::use_parallel_gc_threads() ||
5569            no_of_gc_workers == workers()->active_workers(),
5570            "Need to reset active GC workers");
5571 
5572   set_par_threads(no_of_gc_workers);
5573   G1ParPreserveCMReferentsTask keep_cm_referents(this,
5574                                                  no_of_gc_workers,
5575                                                  _task_queues);
5576 
5577   if (G1CollectedHeap::use_parallel_gc_threads()) {


5613   G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL);
5614 
5615   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5616   OopsInHeapRegionClosure*       copy_metadata_cl = &only_copy_metadata_cl;
5617 
5618   if (_g1h->g1_policy()->during_initial_mark_pause()) {
5619     // We also need to mark copied objects.
5620     copy_non_heap_cl = &copy_mark_non_heap_cl;
5621     copy_metadata_cl = &copy_mark_metadata_cl;
5622   }
5623 
5624   // Keep alive closure.
5625   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_metadata_cl, &pss);
5626 
5627   // Serial Complete GC closure
5628   G1STWDrainQueueClosure drain_queue(this, &pss);
5629 
5630   // Setup the soft refs policy...
5631   rp->setup_policy(false);
5632 
5633   ReferenceProcessorStats stats;
5634   if (!rp->processing_is_mt()) {
5635     // Serial reference processing...
5636     stats = rp->process_discovered_references(&is_alive,
5637                                               &keep_alive,
5638                                               &drain_queue,
5639                                               NULL,
5640                                               _gc_timer_stw);
5641   } else {
5642     // Parallel reference processing
5643     assert(rp->num_q() == no_of_gc_workers, "sanity");
5644     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5645 
5646     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5647     stats = rp->process_discovered_references(&is_alive,
5648                                               &keep_alive,
5649                                               &drain_queue,
5650                                               &par_task_executor,
5651                                               _gc_timer_stw);
5652   }
5653 
5654   _gc_tracer_stw->report_gc_reference_stats(stats);
5655   // We have completed copying any necessary live referent objects
5656   // (that were not copied during the actual pause) so we can
5657   // retire any active alloc buffers
5658   pss.retire_alloc_buffers();
5659   assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5660 
5661   double ref_proc_time = os::elapsedTime() - ref_proc_start;
5662   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5663 }
5664 
5665 // Weak Reference processing during an evacuation pause (part 2).
5666 void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
5667   double ref_enq_start = os::elapsedTime();
5668 
5669   ReferenceProcessor* rp = _ref_processor_stw;
5670   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5671 
5672   // Now enqueue any remaining on the discovered lists on to
5673   // the pending list.
5674   if (!rp->processing_is_mt()) {
5675     // Serial reference processing...
5676     rp->enqueue_discovered_references();
5677   } else {
5678     // Parallel reference enqueueing
5679 
5680     assert(no_of_gc_workers == workers()->active_workers(),
5681            "Need to reset active workers");
5682     assert(rp->num_q() == no_of_gc_workers, "sanity");
5683     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5684 
5685     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5686     rp->enqueue_discovered_references(&par_task_executor);
5687   }
5688 
5689   rp->verify_no_references_recorded();
5690   assert(!rp->discovery_enabled(), "should have been disabled");
5691 
5692   // FIXME
5693   // CM's reference processing also cleans up the string and symbol tables.
5694   // Should we do that here also? We could, but it is a serial operation
5695   // and could significantly increase the pause time.
5696 
5697   double ref_enq_time = os::elapsedTime() - ref_enq_start;
5698   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5699 }
5700 
5701 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
5702   _expand_heap_after_alloc_failure = true;
5703   _evacuation_failed = false;
5704 
5705   // Should G1EvacuationFailureALot be in effect for this GC?
5706   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5707 
5708   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5709 
5710   // Disable the hot card cache.
5711   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5712   hot_card_cache->reset_hot_cache_claimed_index();
5713   hot_card_cache->set_use_cache(false);
5714 
5715   uint n_workers;
5716   if (G1CollectedHeap::use_parallel_gc_threads()) {
5717     n_workers =
5718       AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5719                                      workers()->active_workers(),
5720                                      Threads::number_of_non_daemon_threads());
5721     assert(UseDynamicNumberOfGCThreads ||
5722            n_workers == workers()->total_workers(),
5723            "If not dynamic should be using all the  workers");


5772 
5773   set_par_threads(0);
5774 
5775   // Process any discovered reference objects - we have
5776   // to do this _before_ we retire the GC alloc regions
5777   // as we may have to copy some 'reachable' referent
5778   // objects (and their reachable sub-graphs) that were
5779   // not copied during the pause.
5780   process_discovered_references(n_workers);
5781 
5782   // Weak root processing.
5783   // Note: when JSR 292 is enabled and code blobs can contain
5784   // non-perm oops then we will need to process the code blobs
5785   // here too.
5786   {
5787     G1STWIsAliveClosure is_alive(this);
5788     G1KeepAliveClosure keep_alive(this);
5789     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
5790   }
5791 
5792   release_gc_alloc_regions(n_workers, evacuation_info);
5793   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5794 
5795   // Reset and re-enable the hot card cache.
5796   // Note the counts for the cards in the regions in the
5797   // collection set are reset when the collection set is freed.
5798   hot_card_cache->reset_hot_cache();
5799   hot_card_cache->set_use_cache(true);
5800 
5801   finalize_for_evac_failure();
5802 
5803   if (evacuation_failed()) {
5804     remove_self_forwarding_pointers();
5805 
5806     // Reset the G1EvacuationFailureALot counters and flags
5807     // Note: the values are reset only when an actual
5808     // evacuation failure occurs.
5809     NOT_PRODUCT(reset_evacuation_should_fail();)
5810   }
5811 
5812   // Enqueue any remaining references remaining on the STW
5813   // reference processor's discovered lists. We need to do
5814   // this after the card table is cleaned (and verified) as
5815   // the act of enqueueing entries on to the pending list
5816   // will log these updates (and dirty their associated
5817   // cards). We need these updates logged to update any
5818   // RSets.
5819   enqueue_discovered_references(n_workers);
5820 
5821   if (G1DeferredRSUpdate) {
5822     RedirtyLoggedCardTableEntryFastClosure redirty;
5823     dirty_card_queue_set().set_closure(&redirty);
5824     dirty_card_queue_set().apply_closure_to_all_completed_buffers();
5825 
5826     DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5827     dcq.merge_bufferlists(&dirty_card_queue_set());
5828     assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5829   }
5830   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
5831 }
5832 
5833 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr,
5834                                      size_t* pre_used,
5835                                      FreeRegionList* free_list,


6023         _dirty_cards_region_list = r->get_next_dirty_cards_region();
6024         if (_dirty_cards_region_list == r) {
6025           // The last region.
6026           _dirty_cards_region_list = NULL;
6027         }
6028         r->set_next_dirty_cards_region(NULL);
6029       }
6030     }
6031 #ifndef PRODUCT
6032     if (G1VerifyCTCleanup || VerifyAfterGC) {
6033       G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
6034       heap_region_iterate(&cleanup_verifier);
6035     }
6036 #endif
6037   }
6038 
6039   double elapsed = os::elapsedTime() - start;
6040   g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
6041 }
6042 
6043 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info) {
6044   size_t pre_used = 0;
6045   FreeRegionList local_free_list("Local List for CSet Freeing");
6046 
6047   double young_time_ms     = 0.0;
6048   double non_young_time_ms = 0.0;
6049 
6050   // Since the collection set is a superset of the the young list,
6051   // all we need to do to clear the young list is clear its
6052   // head and length, and unlink any young regions in the code below
6053   _young_list->clear();
6054 
6055   G1CollectorPolicy* policy = g1_policy();
6056 
6057   double start_sec = os::elapsedTime();
6058   bool non_young = true;
6059 
6060   HeapRegion* cur = cs_head;
6061   int age_bound = -1;
6062   size_t rs_lengths = 0;
6063 


6109 
6110     assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
6111             (!cur->is_young() && cur->young_index_in_cset() == -1),
6112             "invariant" );
6113 
6114     if (!cur->evacuation_failed()) {
6115       MemRegion used_mr = cur->used_region();
6116 
6117       // And the region is empty.
6118       assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
6119       free_region(cur, &pre_used, &local_free_list, false /* par */);
6120     } else {
6121       cur->uninstall_surv_rate_group();
6122       if (cur->is_young()) {
6123         cur->set_young_index_in_cset(-1);
6124       }
6125       cur->set_not_young();
6126       cur->set_evacuation_failed(false);
6127       // The region is now considered to be old.
6128       _old_set.add(cur);
6129       evacuation_info.increment_collectionset_used_after(cur->used());
6130     }
6131     cur = next;
6132   }
6133 
6134   evacuation_info.set_regions_freed(local_free_list.length());
6135   policy->record_max_rs_lengths(rs_lengths);
6136   policy->cset_regions_freed();
6137 
6138   double end_sec = os::elapsedTime();
6139   double elapsed_ms = (end_sec - start_sec) * 1000.0;
6140 
6141   if (non_young) {
6142     non_young_time_ms += elapsed_ms;
6143   } else {
6144     young_time_ms += elapsed_ms;
6145   }
6146 
6147   update_sets_after_freeing_regions(pre_used, &local_free_list,
6148                                     NULL /* old_proxy_set */,
6149                                     NULL /* humongous_proxy_set */,
6150                                     false /* par */);
6151   policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
6152   policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
6153 }
6154