< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 7323 : 8069367: Eagerly reclaimed humongous objects left on mark stack
Summary: Prevent eager reclaim of objects that might be on mark stack.
Reviewed-by: brutisso, tschatzl


1836 
1837 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1838   SharedHeap(policy_),
1839   _g1_policy(policy_),
1840   _dirty_card_queue_set(false),
1841   _into_cset_dirty_card_queue_set(false),
1842   _is_alive_closure_cm(this),
1843   _is_alive_closure_stw(this),
1844   _ref_processor_cm(NULL),
1845   _ref_processor_stw(NULL),
1846   _bot_shared(NULL),
1847   _evac_failure_scan_stack(NULL),
1848   _mark_in_progress(false),
1849   _cg1r(NULL),
1850   _g1mm(NULL),
1851   _refine_cte_cl(NULL),
1852   _full_collection(false),
1853   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1854   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1855   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1856   _humongous_is_live(),
1857   _has_humongous_reclaim_candidates(false),
1858   _free_regions_coming(false),
1859   _young_list(new YoungList(this)),
1860   _gc_time_stamp(0),
1861   _survivor_plab_stats(YoungPLABSize, PLABWeight),
1862   _old_plab_stats(OldPLABSize, PLABWeight),
1863   _expand_heap_after_alloc_failure(true),
1864   _surviving_young_words(NULL),
1865   _old_marking_cycles_started(0),
1866   _old_marking_cycles_completed(0),
1867   _concurrent_cycle_started(false),
1868   _heap_summary_sent(false),
1869   _in_cset_fast_test(),
1870   _dirty_cards_region_list(NULL),
1871   _worker_cset_start_region(NULL),
1872   _worker_cset_start_region_time_stamp(NULL),
1873   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1874   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1875   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1876   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {


2031   g1_barrier_set()->initialize(cardtable_storage);
2032    // Do later initialization work for concurrent refinement.
2033   _cg1r->init(card_counts_storage);
2034 
2035   // 6843694 - ensure that the maximum region index can fit
2036   // in the remembered set structures.
2037   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2038   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2039 
2040   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2041   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2042   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2043             "too many cards per region");
2044 
2045   FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
2046 
2047   _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
2048 
2049   _g1h = this;
2050 
2051   _in_cset_fast_test.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
2052   _humongous_is_live.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);






2053 
2054   // Create the ConcurrentMark data structure and thread.
2055   // (Must do this late, so that "max_regions" is defined.)
2056   _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
2057   if (_cm == NULL || !_cm->completed_initialization()) {
2058     vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2059     return JNI_ENOMEM;
2060   }
2061   _cmThread = _cm->cmThread();
2062 
2063   // Initialize the from_card cache structure of HeapRegionRemSet.
2064   HeapRegionRemSet::init_heap(max_regions());
2065 
2066   // Now expand into the initial heap size.
2067   if (!expand(init_byte_size)) {
2068     vm_shutdown_during_initialization("Failed to allocate initial heap.");
2069     return JNI_ENOMEM;
2070   }
2071 
2072   // Perform any initialization actions delegated to the policy.


2124   // Do create of the monitoring and management support so that
2125   // values in the heap have been properly initialized.
2126   _g1mm = new G1MonitoringSupport(this);
2127 
2128   G1StringDedup::initialize();
2129 
2130   return JNI_OK;
2131 }
2132 
2133 void G1CollectedHeap::stop() {
2134   // Stop all concurrent threads. We do this to make sure these threads
2135   // do not continue to execute and access resources (e.g. gclog_or_tty)
2136   // that are destroyed during shutdown.
2137   _cg1r->stop();
2138   _cmThread->stop();
2139   if (G1StringDedup::is_enabled()) {
2140     G1StringDedup::stop();
2141   }
2142 }
2143 
2144 void G1CollectedHeap::clear_humongous_is_live_table() {
2145   guarantee(G1EagerReclaimHumongousObjects, "Should only be called if true");
2146   _humongous_is_live.clear();
2147 }
2148 
2149 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2150   return HeapRegion::max_region_size();
2151 }
2152 
2153 void G1CollectedHeap::ref_processing_init() {
2154   // Reference processing in G1 currently works as follows:
2155   //
2156   // * There are two reference processor instances. One is
2157   //   used to record and process discovered references
2158   //   during concurrent marking; the other is used to
2159   //   record and process references during STW pauses
2160   //   (both full and incremental).
2161   // * Both ref processors need to 'span' the entire heap as
2162   //   the regions in the collection set may be dotted around.
2163   //
2164   // * For the concurrent marking ref processor:
2165   //   * Reference discovery is enabled at initial marking.
2166   //   * Reference discovery is disabled and the discovered
2167   //     references processed etc during remarking.
2168   //   * Reference discovery is MT (see below).


3649   JavaThread *curr = Threads::first();
3650   while (curr != NULL) {
3651     DirtyCardQueue& dcq = curr->dirty_card_queue();
3652     extra_cards += dcq.size();
3653     curr = curr->next();
3654   }
3655   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3656   size_t buffer_size = dcqs.buffer_size();
3657   size_t buffer_num = dcqs.completed_buffers_num();
3658 
3659   // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
3660   // in bytes - not the number of 'entries'. We need to convert
3661   // into a number of cards.
3662   return (buffer_size * buffer_num + extra_cards) / oopSize;
3663 }
3664 
3665 size_t G1CollectedHeap::cards_scanned() {
3666   return g1_rem_set()->cardsScanned();
3667 }
3668 
3669 bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
3670   HeapRegion* region = region_at(index);
3671   assert(region->startsHumongous(), "Must start a humongous object");
3672   return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
3673 }
3674 
3675 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
3676  private:
3677   size_t _total_humongous;
3678   size_t _candidate_humongous;
3679 
3680   DirtyCardQueue _dcq;
3681 
3682   bool humongous_region_is_candidate(uint index) {
3683     HeapRegion* region = G1CollectedHeap::heap()->region_at(index);
3684     assert(region->startsHumongous(), "Must start a humongous object");
3685     HeapRegionRemSet* const rset = region->rem_set();
3686     bool const allow_stale_refs = G1EagerReclaimHumongousObjectsWithStaleRefs;
3687     return !oop(region->bottom())->is_objArray() &&
3688            ((allow_stale_refs && rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)) ||
3689             (!allow_stale_refs && rset->is_empty()));













































3690   }
3691 
3692  public:
3693   RegisterHumongousWithInCSetFastTestClosure()
3694   : _total_humongous(0),
3695     _candidate_humongous(0),
3696     _dcq(&JavaThread::dirty_card_queue_set()) {
3697   }
3698 
3699   virtual bool doHeapRegion(HeapRegion* r) {
3700     if (!r->startsHumongous()) {
3701       return false;
3702     }
3703     G1CollectedHeap* g1h = G1CollectedHeap::heap();
3704 
3705     uint region_idx = r->hrm_index();
3706     bool is_candidate = humongous_region_is_candidate(region_idx);




3707     // Is_candidate already filters out humongous object with large remembered sets.
3708     // If we have a humongous object with a few remembered sets, we simply flush these
3709     // remembered set entries into the DCQS. That will result in automatic
3710     // re-evaluation of their remembered set entries during the following evacuation
3711     // phase.
3712     if (is_candidate) {
3713       if (!r->rem_set()->is_empty()) {
3714         guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
3715                   "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
3716         G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
3717         HeapRegionRemSetIterator hrrs(r->rem_set());
3718         size_t card_index;
3719         while (hrrs.has_next(card_index)) {
3720           jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index);
3721           if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
3722             *card_ptr = CardTableModRefBS::dirty_card_val();
3723             _dcq.enqueue(card_ptr);
3724           }
3725         }
3726         r->rem_set()->clear_locked();
3727       }
3728       assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
3729       g1h->register_humongous_region_with_in_cset_fast_test(region_idx);
3730       _candidate_humongous++;
3731     }
3732     _total_humongous++;
3733 
3734     return false;
3735   }
3736 
3737   size_t total_humongous() const { return _total_humongous; }
3738   size_t candidate_humongous() const { return _candidate_humongous; }
3739 
3740   void flush_rem_set_entries() { _dcq.flush(); }
3741 };
3742 
3743 void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
3744   if (!G1EagerReclaimHumongousObjects) {
3745     g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
3746     return;
3747   }
3748   double time = os::elapsed_counter();
3749 

3750   RegisterHumongousWithInCSetFastTestClosure cl;
3751   heap_region_iterate(&cl);
3752 
3753   time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
3754   g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
3755                                                                   cl.total_humongous(),
3756                                                                   cl.candidate_humongous());
3757   _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
3758 
3759   if (_has_humongous_reclaim_candidates || G1TraceEagerReclaimHumongousObjects) {
3760     clear_humongous_is_live_table();
3761   }
3762 
3763   // Finally flush all remembered set entries to re-check into the global DCQS.
3764   cl.flush_rem_set_entries();
3765 }
3766 
3767 void
3768 G1CollectedHeap::setup_surviving_young_words() {
3769   assert(_surviving_young_words == NULL, "pre-condition");
3770   uint array_length = g1_policy()->young_cset_region_length();
3771   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3772   if (_surviving_young_words == NULL) {
3773     vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
3774                           "Not enough space for young surv words summary.");
3775   }
3776   memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3777 #ifdef ASSERT
3778   for (uint i = 0;  i < array_length; ++i) {
3779     assert( _surviving_young_words[i] == 0, "memset above" );
3780   }
3781 #endif // !ASSERT
3782 }


6304     // remembered set)
6305     // - as soon there is a remembered set entry to the humongous starts region
6306     // (i.e. it has "escaped" to an old object) this remembered set entry will stay
6307     // until the end of a concurrent mark.
6308     //
6309     // It is not required to check whether the object has been found dead by marking
6310     // or not, in fact it would prevent reclamation within a concurrent cycle, as
6311     // all objects allocated during that time are considered live.
6312     // SATB marking is even more conservative than the remembered set.
6313     // So if at this point in the collection there is no remembered set entry,
6314     // nobody has a reference to it.
6315     // At the start of collection we flush all refinement logs, and remembered sets
6316     // are completely up-to-date wrt to references to the humongous object.
6317     //
6318     // Other implementation considerations:
6319     // - never consider object arrays at this time because they would pose
6320     // considerable effort for cleaning up the the remembered sets. This is
6321     // required because stale remembered sets might reference locations that
6322     // are currently allocated into.
6323     uint region_idx = r->hrm_index();
6324     if (g1h->humongous_is_live(region_idx) ||
6325         g1h->humongous_region_is_always_live(region_idx)) {
6326 
6327       if (G1TraceEagerReclaimHumongousObjects) {
6328         gclog_or_tty->print_cr("Live humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
6329                                region_idx,
6330                                obj->size()*HeapWordSize,
6331                                r->bottom(),
6332                                r->region_num(),
6333                                r->rem_set()->occupied(),
6334                                r->rem_set()->strong_code_roots_list_length(),
6335                                next_bitmap->isMarked(r->bottom()),
6336                                g1h->humongous_is_live(region_idx),
6337                                obj->is_objArray()
6338                               );
6339       }
6340 
6341       return false;
6342     }
6343 
6344     guarantee(!obj->is_objArray(),
6345               err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.",

6346                       r->bottom()));
6347 
6348     if (G1TraceEagerReclaimHumongousObjects) {
6349       gclog_or_tty->print_cr("Dead humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
6350                              region_idx,
6351                              obj->size()*HeapWordSize,
6352                              r->bottom(),
6353                              r->region_num(),
6354                              r->rem_set()->occupied(),
6355                              r->rem_set()->strong_code_roots_list_length(),
6356                              next_bitmap->isMarked(r->bottom()),
6357                              g1h->humongous_is_live(region_idx),
6358                              obj->is_objArray()
6359                             );
6360     }
6361     // Need to clear mark bit of the humongous object if already set.
6362     if (next_bitmap->isMarked(r->bottom())) {
6363       next_bitmap->clear(r->bottom());
6364     }
6365     _freed_bytes += r->used();
6366     r->set_containing_set(NULL);
6367     _humongous_regions_removed.increment(1u, r->capacity());
6368     g1h->free_humongous_region(r, _free_region_list, false);
6369 
6370     return false;
6371   }
6372 
6373   HeapRegionSetCount& humongous_free_count() {
6374     return _humongous_regions_removed;
6375   }
6376 
6377   size_t bytes_freed() const {
6378     return _freed_bytes;




1836 
1837 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1838   SharedHeap(policy_),
1839   _g1_policy(policy_),
1840   _dirty_card_queue_set(false),
1841   _into_cset_dirty_card_queue_set(false),
1842   _is_alive_closure_cm(this),
1843   _is_alive_closure_stw(this),
1844   _ref_processor_cm(NULL),
1845   _ref_processor_stw(NULL),
1846   _bot_shared(NULL),
1847   _evac_failure_scan_stack(NULL),
1848   _mark_in_progress(false),
1849   _cg1r(NULL),
1850   _g1mm(NULL),
1851   _refine_cte_cl(NULL),
1852   _full_collection(false),
1853   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1854   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1855   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1856   _humongous_reclaim_candidates(),
1857   _has_humongous_reclaim_candidates(false),
1858   _free_regions_coming(false),
1859   _young_list(new YoungList(this)),
1860   _gc_time_stamp(0),
1861   _survivor_plab_stats(YoungPLABSize, PLABWeight),
1862   _old_plab_stats(OldPLABSize, PLABWeight),
1863   _expand_heap_after_alloc_failure(true),
1864   _surviving_young_words(NULL),
1865   _old_marking_cycles_started(0),
1866   _old_marking_cycles_completed(0),
1867   _concurrent_cycle_started(false),
1868   _heap_summary_sent(false),
1869   _in_cset_fast_test(),
1870   _dirty_cards_region_list(NULL),
1871   _worker_cset_start_region(NULL),
1872   _worker_cset_start_region_time_stamp(NULL),
1873   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1874   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1875   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1876   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {


2031   g1_barrier_set()->initialize(cardtable_storage);
2032    // Do later initialization work for concurrent refinement.
2033   _cg1r->init(card_counts_storage);
2034 
2035   // 6843694 - ensure that the maximum region index can fit
2036   // in the remembered set structures.
2037   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2038   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2039 
2040   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2041   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2042   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2043             "too many cards per region");
2044 
2045   FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
2046 
2047   _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
2048 
2049   _g1h = this;
2050 
2051   {
2052     HeapWord* start = _hrm.reserved().start();
2053     HeapWord* end = _hrm.reserved().end();
2054     size_t granularity = HeapRegion::GrainBytes;
2055 
2056     _in_cset_fast_test.initialize(start, end, granularity);
2057     _humongous_reclaim_candidates.initialize(start, end, granularity);
2058   }
2059 
2060   // Create the ConcurrentMark data structure and thread.
2061   // (Must do this late, so that "max_regions" is defined.)
2062   _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
2063   if (_cm == NULL || !_cm->completed_initialization()) {
2064     vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2065     return JNI_ENOMEM;
2066   }
2067   _cmThread = _cm->cmThread();
2068 
2069   // Initialize the from_card cache structure of HeapRegionRemSet.
2070   HeapRegionRemSet::init_heap(max_regions());
2071 
2072   // Now expand into the initial heap size.
2073   if (!expand(init_byte_size)) {
2074     vm_shutdown_during_initialization("Failed to allocate initial heap.");
2075     return JNI_ENOMEM;
2076   }
2077 
2078   // Perform any initialization actions delegated to the policy.


2130   // Do create of the monitoring and management support so that
2131   // values in the heap have been properly initialized.
2132   _g1mm = new G1MonitoringSupport(this);
2133 
2134   G1StringDedup::initialize();
2135 
2136   return JNI_OK;
2137 }
2138 
2139 void G1CollectedHeap::stop() {
2140   // Stop all concurrent threads. We do this to make sure these threads
2141   // do not continue to execute and access resources (e.g. gclog_or_tty)
2142   // that are destroyed during shutdown.
2143   _cg1r->stop();
2144   _cmThread->stop();
2145   if (G1StringDedup::is_enabled()) {
2146     G1StringDedup::stop();
2147   }
2148 }
2149 





2150 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2151   return HeapRegion::max_region_size();
2152 }
2153 
2154 void G1CollectedHeap::ref_processing_init() {
2155   // Reference processing in G1 currently works as follows:
2156   //
2157   // * There are two reference processor instances. One is
2158   //   used to record and process discovered references
2159   //   during concurrent marking; the other is used to
2160   //   record and process references during STW pauses
2161   //   (both full and incremental).
2162   // * Both ref processors need to 'span' the entire heap as
2163   //   the regions in the collection set may be dotted around.
2164   //
2165   // * For the concurrent marking ref processor:
2166   //   * Reference discovery is enabled at initial marking.
2167   //   * Reference discovery is disabled and the discovered
2168   //     references processed etc during remarking.
2169   //   * Reference discovery is MT (see below).


3650   JavaThread *curr = Threads::first();
3651   while (curr != NULL) {
3652     DirtyCardQueue& dcq = curr->dirty_card_queue();
3653     extra_cards += dcq.size();
3654     curr = curr->next();
3655   }
3656   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3657   size_t buffer_size = dcqs.buffer_size();
3658   size_t buffer_num = dcqs.completed_buffers_num();
3659 
3660   // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
3661   // in bytes - not the number of 'entries'. We need to convert
3662   // into a number of cards.
3663   return (buffer_size * buffer_num + extra_cards) / oopSize;
3664 }
3665 
3666 size_t G1CollectedHeap::cards_scanned() {
3667   return g1_rem_set()->cardsScanned();
3668 }
3669 






3670 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
3671  private:
3672   size_t _total_humongous;
3673   size_t _candidate_humongous;
3674 
3675   DirtyCardQueue _dcq;
3676 
3677   // We don't nominate objects with many remembered set entries, on
3678   // the assumption that such objects are likely still live.
3679   bool is_remset_small(HeapRegion* region) const {
3680     HeapRegionRemSet* const rset = region->rem_set();
3681     return G1EagerReclaimHumongousObjectsWithStaleRefs
3682       ? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
3683       : rset->is_empty();
3684   }
3685 
3686   bool is_typeArray_region(HeapRegion* region) const {
3687     return oop(region->bottom())->is_typeArray();
3688   }
3689 
3690   bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
3691     assert(region->startsHumongous(), "Must start a humongous object");
3692 
3693     // Candidate selection must satisfy the following constraints
3694     // while concurrent marking is in progress:
3695     //
3696     // * In order to maintain SATB invariants, an object must not be
3697     // reclaimed if it was allocated before the start of marking and
3698     // has not had its references scanned.  Such an object must have
3699     // its references (including type metadata) scanned to ensure no
3700     // live objects are missed by the marking process.  Objects
3701     // allocated after the start of concurrent marking don't need to
3702     // be scanned.
3703     //
3704     // * An object must not be reclaimed if it is on the concurrent
3705     // mark stack.  Objects allocated after the start of concurrent
3706     // marking are never pushed on the mark stack.
3707     //
3708     // Nominating only objects allocated after the start of concurrent
3709     // marking is sufficient to meet both constraints.  This may miss
3710     // some objects that satisfy the constraints, but the marking data
3711     // structures don't support efficiently performing the needed
3712     // additional tests or scrubbing of the mark stack.
3713     //
3714     // However, we presently only nominate is_typeArray() objects.
3715     // A humongous object containing references induces remembered
3716     // set entries on other regions.  In order to reclaim such an
3717     // object, those remembered sets would need to be cleaned up.
3718     //
3719     // We also treat is_typeArray() objects specially, allowing them
3720     // to be reclaimed even if allocated before the start of
3721     // concurrent mark.  For this we rely on mark stack insertion to
3722     // exclude is_typeArray() objects, preventing reclaiming an object
3723     // that is in the mark stack.  We also rely on the metadata for
3724     // such objects to be built-in and so ensured to be kept live.
3725     // Frequent allocation and drop of large binary blobs is an
3726     // important use case for eager reclaim, and this special handling
3727     // may reduce needed headroom.
3728 
3729     return is_typeArray_region(region) && is_remset_small(region);
3730   }
3731 
3732  public:
3733   RegisterHumongousWithInCSetFastTestClosure()
3734   : _total_humongous(0),
3735     _candidate_humongous(0),
3736     _dcq(&JavaThread::dirty_card_queue_set()) {
3737   }
3738 
3739   virtual bool doHeapRegion(HeapRegion* r) {
3740     if (!r->startsHumongous()) {
3741       return false;
3742     }
3743     G1CollectedHeap* g1h = G1CollectedHeap::heap();
3744 
3745     bool is_candidate = humongous_region_is_candidate(g1h, r);
3746     uint rindex = r->hrm_index();
3747     g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
3748     if (is_candidate) {
3749       _candidate_humongous++;
3750       g1h->register_humongous_region_with_in_cset_fast_test(rindex);
3751       // Is_candidate already filters out humongous object with large remembered sets.
3752       // If we have a humongous object with a few remembered sets, we simply flush these
3753       // remembered set entries into the DCQS. That will result in automatic
3754       // re-evaluation of their remembered set entries during the following evacuation
3755       // phase.

3756       if (!r->rem_set()->is_empty()) {
3757         guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
3758                   "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
3759         G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
3760         HeapRegionRemSetIterator hrrs(r->rem_set());
3761         size_t card_index;
3762         while (hrrs.has_next(card_index)) {
3763           jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index);
3764           if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
3765             *card_ptr = CardTableModRefBS::dirty_card_val();
3766             _dcq.enqueue(card_ptr);
3767           }
3768         }
3769         r->rem_set()->clear_locked();
3770       }
3771       assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");


3772     }
3773     _total_humongous++;
3774 
3775     return false;
3776   }
3777 
3778   size_t total_humongous() const { return _total_humongous; }
3779   size_t candidate_humongous() const { return _candidate_humongous; }
3780 
3781   void flush_rem_set_entries() { _dcq.flush(); }
3782 };
3783 
3784 void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
3785   if (!G1EagerReclaimHumongousObjects) {
3786     g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
3787     return;
3788   }
3789   double time = os::elapsed_counter();
3790 
3791   // Collect reclaim candidate information and register candidates with cset.
3792   RegisterHumongousWithInCSetFastTestClosure cl;
3793   heap_region_iterate(&cl);
3794 
3795   time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
3796   g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
3797                                                                   cl.total_humongous(),
3798                                                                   cl.candidate_humongous());
3799   _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
3800 




3801   // Finally flush all remembered set entries to re-check into the global DCQS.
3802   cl.flush_rem_set_entries();
3803 }
3804 
3805 void
3806 G1CollectedHeap::setup_surviving_young_words() {
3807   assert(_surviving_young_words == NULL, "pre-condition");
3808   uint array_length = g1_policy()->young_cset_region_length();
3809   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3810   if (_surviving_young_words == NULL) {
3811     vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
3812                           "Not enough space for young surv words summary.");
3813   }
3814   memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3815 #ifdef ASSERT
3816   for (uint i = 0;  i < array_length; ++i) {
3817     assert( _surviving_young_words[i] == 0, "memset above" );
3818   }
3819 #endif // !ASSERT
3820 }


6342     // remembered set)
6343     // - as soon there is a remembered set entry to the humongous starts region
6344     // (i.e. it has "escaped" to an old object) this remembered set entry will stay
6345     // until the end of a concurrent mark.
6346     //
6347     // It is not required to check whether the object has been found dead by marking
6348     // or not, in fact it would prevent reclamation within a concurrent cycle, as
6349     // all objects allocated during that time are considered live.
6350     // SATB marking is even more conservative than the remembered set.
6351     // So if at this point in the collection there is no remembered set entry,
6352     // nobody has a reference to it.
6353     // At the start of collection we flush all refinement logs, and remembered sets
6354     // are completely up-to-date wrt to references to the humongous object.
6355     //
6356     // Other implementation considerations:
6357     // - never consider object arrays at this time because they would pose
6358     // considerable effort for cleaning up the the remembered sets. This is
6359     // required because stale remembered sets might reference locations that
6360     // are currently allocated into.
6361     uint region_idx = r->hrm_index();
6362     if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
6363         !r->rem_set()->is_empty()) {
6364 
6365       if (G1TraceEagerReclaimHumongousObjects) {
6366         gclog_or_tty->print_cr("Live humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d reclaim candidate %d type array %d",
6367                                region_idx,
6368                                obj->size()*HeapWordSize,
6369                                r->bottom(),
6370                                r->region_num(),
6371                                r->rem_set()->occupied(),
6372                                r->rem_set()->strong_code_roots_list_length(),
6373                                next_bitmap->isMarked(r->bottom()),
6374                                g1h->is_humongous_reclaim_candidate(region_idx),
6375                                obj->is_typeArray()
6376                               );
6377       }
6378 
6379       return false;
6380     }
6381 
6382     guarantee(obj->is_typeArray(),
6383               err_msg("Only eagerly reclaiming type arrays is supported, but the object "
6384                       PTR_FORMAT " is not.",
6385                       r->bottom()));
6386 
6387     if (G1TraceEagerReclaimHumongousObjects) {
6388       gclog_or_tty->print_cr("Dead humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d reclaim candidate %d type array %d",
6389                              region_idx,
6390                              obj->size()*HeapWordSize,
6391                              r->bottom(),
6392                              r->region_num(),
6393                              r->rem_set()->occupied(),
6394                              r->rem_set()->strong_code_roots_list_length(),
6395                              next_bitmap->isMarked(r->bottom()),
6396                              g1h->is_humongous_reclaim_candidate(region_idx),
6397                              obj->is_typeArray()
6398                             );
6399     }
6400     // Need to clear mark bit of the humongous object if already set.
6401     if (next_bitmap->isMarked(r->bottom())) {
6402       next_bitmap->clear(r->bottom());
6403     }
6404     _freed_bytes += r->used();
6405     r->set_containing_set(NULL);
6406     _humongous_regions_removed.increment(1u, r->capacity());
6407     g1h->free_humongous_region(r, _free_region_list, false);
6408 
6409     return false;
6410   }
6411 
6412   HeapRegionSetCount& humongous_free_count() {
6413     return _humongous_regions_removed;
6414   }
6415 
6416   size_t bytes_freed() const {
6417     return _freed_bytes;


< prev index next >