< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 10808 : imported patch eager-reclaim-alive-check


2923   // into a number of cards.
2924   return (buffer_size * buffer_num + extra_cards) / oopSize;
2925 }
2926 
2927 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
2928  private:
2929   size_t _total_humongous;
2930   size_t _candidate_humongous;
2931 
2932   DirtyCardQueue _dcq;
2933 
2934   // We don't nominate objects with many remembered set entries, on
2935   // the assumption that such objects are likely still live.
2936   bool is_remset_small(HeapRegion* region) const {
2937     HeapRegionRemSet* const rset = region->rem_set();
2938     return G1EagerReclaimHumongousObjectsWithStaleRefs
2939       ? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
2940       : rset->is_empty();
2941   }
2942 
2943   bool is_typeArray_region(HeapRegion* region) const {
2944     return oop(region->bottom())->is_typeArray();
2945   }
2946 
2947   bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
2948     assert(region->is_starts_humongous(), "Must start a humongous object");
2949 








2950     // Candidate selection must satisfy the following constraints
2951     // while concurrent marking is in progress:
2952     //
2953     // * In order to maintain SATB invariants, an object must not be
2954     // reclaimed if it was allocated before the start of marking and
2955     // has not had its references scanned.  Such an object must have
2956     // its references (including type metadata) scanned to ensure no
2957     // live objects are missed by the marking process.  Objects
2958     // allocated after the start of concurrent marking don't need to
2959     // be scanned.
2960     //
2961     // * An object must not be reclaimed if it is on the concurrent
2962     // mark stack.  Objects allocated after the start of concurrent
2963     // marking are never pushed on the mark stack.
2964     //
2965     // Nominating only objects allocated after the start of concurrent
2966     // marking is sufficient to meet both constraints.  This may miss
2967     // some objects that satisfy the constraints, but the marking data
2968     // structures don't support efficiently performing the needed
2969     // additional tests or scrubbing of the mark stack.
2970     //
2971     // However, we presently only nominate is_typeArray() objects.
2972     // A humongous object containing references induces remembered
2973     // set entries on other regions.  In order to reclaim such an
2974     // object, those remembered sets would need to be cleaned up.
2975     //
2976     // We also treat is_typeArray() objects specially, allowing them
2977     // to be reclaimed even if allocated before the start of
2978     // concurrent mark.  For this we rely on mark stack insertion to
2979     // exclude is_typeArray() objects, preventing reclaiming an object
2980     // that is in the mark stack.  We also rely on the metadata for
2981     // such objects to be built-in and so ensured to be kept live.
2982     // Frequent allocation and drop of large binary blobs is an
2983     // important use case for eager reclaim, and this special handling
2984     // may reduce needed headroom.
2985 
2986     return is_typeArray_region(region) && is_remset_small(region);
2987   }
2988 
2989  public:
2990   RegisterHumongousWithInCSetFastTestClosure()
2991   : _total_humongous(0),
2992     _candidate_humongous(0),
2993     _dcq(&JavaThread::dirty_card_queue_set()) {
2994   }
2995 
2996   virtual bool doHeapRegion(HeapRegion* r) {
2997     if (!r->is_starts_humongous()) {
2998       return false;
2999     }
3000     G1CollectedHeap* g1h = G1CollectedHeap::heap();
3001 
3002     bool is_candidate = humongous_region_is_candidate(g1h, r);
3003     uint rindex = r->hrm_index();
3004     g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
3005     if (is_candidate) {
3006       _candidate_humongous++;




2923   // into a number of cards.
2924   return (buffer_size * buffer_num + extra_cards) / oopSize;
2925 }
2926 
2927 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
2928  private:
2929   size_t _total_humongous;
2930   size_t _candidate_humongous;
2931 
2932   DirtyCardQueue _dcq;
2933 
2934   // We don't nominate objects with many remembered set entries, on
2935   // the assumption that such objects are likely still live.
2936   bool is_remset_small(HeapRegion* region) const {
2937     HeapRegionRemSet* const rset = region->rem_set();
2938     return G1EagerReclaimHumongousObjectsWithStaleRefs
2939       ? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
2940       : rset->is_empty();
2941   }
2942 




2943   bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
2944     assert(region->is_starts_humongous(), "Must start a humongous object");
2945 
2946     oop obj = oop(region->bottom());
2947 
2948     // Dead objects cannot be eager reclaim candidates. Due to class
2949     // unloading it is unsafe to query their classes so we return early.
2950     if (heap->is_obj_dead(obj, region)) {
2951       return false;
2952     }
2953 
2954     // Candidate selection must satisfy the following constraints
2955     // while concurrent marking is in progress:
2956     //
2957     // * In order to maintain SATB invariants, an object must not be
2958     // reclaimed if it was allocated before the start of marking and
2959     // has not had its references scanned.  Such an object must have
2960     // its references (including type metadata) scanned to ensure no
2961     // live objects are missed by the marking process.  Objects
2962     // allocated after the start of concurrent marking don't need to
2963     // be scanned.
2964     //
2965     // * An object must not be reclaimed if it is on the concurrent
2966     // mark stack.  Objects allocated after the start of concurrent
2967     // marking are never pushed on the mark stack.
2968     //
2969     // Nominating only objects allocated after the start of concurrent
2970     // marking is sufficient to meet both constraints.  This may miss
2971     // some objects that satisfy the constraints, but the marking data
2972     // structures don't support efficiently performing the needed
2973     // additional tests or scrubbing of the mark stack.
2974     //
2975     // However, we presently only nominate is_typeArray() objects.
2976     // A humongous object containing references induces remembered
2977     // set entries on other regions.  In order to reclaim such an
2978     // object, those remembered sets would need to be cleaned up.
2979     //
2980     // We also treat is_typeArray() objects specially, allowing them
2981     // to be reclaimed even if allocated before the start of
2982     // concurrent mark.  For this we rely on mark stack insertion to
2983     // exclude is_typeArray() objects, preventing reclaiming an object
2984     // that is in the mark stack.  We also rely on the metadata for
2985     // such objects to be built-in and so ensured to be kept live.
2986     // Frequent allocation and drop of large binary blobs is an
2987     // important use case for eager reclaim, and this special handling
2988     // may reduce needed headroom.
2989 
2990     return obj->is_typeArray() && is_remset_small(region);
2991   }
2992 
2993  public:
2994   RegisterHumongousWithInCSetFastTestClosure()
2995   : _total_humongous(0),
2996     _candidate_humongous(0),
2997     _dcq(&JavaThread::dirty_card_queue_set()) {
2998   }
2999 
3000   virtual bool doHeapRegion(HeapRegion* r) {
3001     if (!r->is_starts_humongous()) {
3002       return false;
3003     }
3004     G1CollectedHeap* g1h = G1CollectedHeap::heap();
3005 
3006     bool is_candidate = humongous_region_is_candidate(g1h, r);
3007     uint rindex = r->hrm_index();
3008     g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
3009     if (is_candidate) {
3010       _candidate_humongous++;


< prev index next >