< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 56150 : imported patch move_iteration
rev 56155 : [mq]: simplify_ce_closure


 115 class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
 116  private:
 117   size_t _num_dirtied;
 118   G1CollectedHeap* _g1h;
 119   G1CardTable* _g1_ct;
 120 
 121   HeapRegion* region_for_card(CardValue* card_ptr) const {
 122     return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
 123   }
 124 
 125   bool will_become_free(HeapRegion* hr) const {
 126     // A region will be freed by free_collection_set if the region is in the
 127     // collection set and has not had an evacuation failure.
 128     return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
 129   }
 130 
 131  public:
 132   RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : G1CardTableEntryClosure(),
 133     _num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { }
 134 
 135   bool do_card_ptr(CardValue* card_ptr, uint worker_i) {
 136     HeapRegion* hr = region_for_card(card_ptr);
 137 
 138     // Should only dirty cards in regions that won't be freed.
 139     if (!will_become_free(hr)) {
 140       *card_ptr = G1CardTable::dirty_card_val();
 141       _num_dirtied++;
 142     }
 143 
 144     return true;
 145   }
 146 
 147   size_t num_dirtied()   const { return _num_dirtied; }
 148 };
 149 
 150 
 151 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
 152   HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
 153 }
 154 
 155 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
 156   // The from card cache is not the memory that is actually committed. So we cannot
 157   // take advantage of the zero_filled parameter.
 158   reset_from_card_cache(start_idx, num_regions);
 159 }
 160 
 161 Tickspan G1CollectedHeap::run_task(AbstractGangTask* task) {
 162   Ticks start = Ticks::now();
 163   workers()->run_task(task, workers()->active_workers());
 164   return Ticks::now() - start;


1929                            &_is_alive_closure_stw,               // is alive closure
1930                            true);                                // allow changes to number of processing threads
1931 }
1932 
1933 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1934   return &_soft_ref_policy;
1935 }
1936 
1937 size_t G1CollectedHeap::capacity() const {
1938   return _hrm->length() * HeapRegion::GrainBytes;
1939 }
1940 
1941 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1942   return _hrm->total_free_bytes();
1943 }
1944 
1945 void G1CollectedHeap::iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_i) {
1946   _hot_card_cache->drain(cl, worker_i);
1947 }
1948 
1949 void G1CollectedHeap::iterate_dirty_card_closure(G1CardTableEntryClosure* cl, uint worker_i) {
1950   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1951   while (dcqs.apply_closure_during_gc(cl, worker_i)) {}
1952   assert(dcqs.num_cards() == 0, "Completed buffers exist!");
1953 }
1954 
1955 // Computes the sum of the storage used by the various regions.
1956 size_t G1CollectedHeap::used() const {
1957   size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
1958   if (_archive_allocator != NULL) {
1959     result += _archive_allocator->used();
1960   }
1961   return result;
1962 }
1963 
1964 size_t G1CollectedHeap::used_unlocked() const {
1965   return _summary_bytes_used;
1966 }
1967 
1968 class SumUsedClosure: public HeapRegionClosure {
1969   size_t _used;
1970 public:
1971   SumUsedClosure() : _used(0) {}
1972   bool do_heap_region(HeapRegion* r) {
1973     _used += r->used();
1974     return false;


3206     {
3207       G1GCParPhaseTimesTracker x(_phase_times, G1GCPhaseTimes::StringDedupTableFixup, worker_id);
3208       StringDedupTable::unlink_or_oops_do(&cl, worker_id);
3209     }
3210   }
3211 };
3212 
3213 void G1CollectedHeap::string_dedup_cleaning(BoolObjectClosure* is_alive,
3214                                             OopClosure* keep_alive,
3215                                             G1GCPhaseTimes* phase_times) {
3216   G1StringDedupCleaningTask cl(is_alive, keep_alive, phase_times);
3217   workers()->run_task(&cl);
3218 }
3219 
3220 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
3221  private:
3222   G1RedirtyCardsQueueSet* _qset;
3223   G1CollectedHeap* _g1h;
3224   BufferNode* volatile _nodes;
3225 
3226   void apply(G1CardTableEntryClosure* cl, BufferNode* node, uint worker_id) {
3227     void** buf = BufferNode::make_buffer_from_node(node);
3228     size_t limit = _qset->buffer_size();
3229     for (size_t i = node->index(); i < limit; ++i) {
3230       CardTable::CardValue* card_ptr = static_cast<CardTable::CardValue*>(buf[i]);
3231       bool result = cl->do_card_ptr(card_ptr, worker_id);
3232       assert(result, "Closure should always return true");
3233     }
3234   }
3235 
3236   void par_apply(G1CardTableEntryClosure* cl, uint worker_id) {
3237     BufferNode* next = Atomic::load(&_nodes);
3238     while (next != NULL) {
3239       BufferNode* node = next;
3240       next = Atomic::cmpxchg(node->next(), &_nodes, node);
3241       if (next == node) {
3242         apply(cl, node, worker_id);
3243         next = node->next();
3244       }
3245     }
3246   }
3247 
3248  public:
3249   G1RedirtyLoggedCardsTask(G1RedirtyCardsQueueSet* qset, G1CollectedHeap* g1h) :
3250     AbstractGangTask("Redirty Cards"),
3251     _qset(qset), _g1h(g1h), _nodes(qset->all_completed_buffers()) { }
3252 
3253   virtual void work(uint worker_id) {
3254     G1GCPhaseTimes* p = _g1h->phase_times();
3255     G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::RedirtyCards, worker_id);
3256 
3257     RedirtyLoggedCardTableEntryClosure cl(_g1h);
3258     par_apply(&cl, worker_id);
3259 
3260     p->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3261   }
3262 };




 115 class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
 116  private:
 117   size_t _num_dirtied;
 118   G1CollectedHeap* _g1h;
 119   G1CardTable* _g1_ct;
 120 
 121   HeapRegion* region_for_card(CardValue* card_ptr) const {
 122     return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
 123   }
 124 
 125   bool will_become_free(HeapRegion* hr) const {
 126     // A region will be freed by free_collection_set if the region is in the
 127     // collection set and has not had an evacuation failure.
 128     return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
 129   }
 130 
 131  public:
 132   RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : G1CardTableEntryClosure(),
 133     _num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { }
 134 
 135   void do_card_ptr(CardValue* card_ptr, uint worker_i) {
 136     HeapRegion* hr = region_for_card(card_ptr);
 137 
 138     // Should only dirty cards in regions that won't be freed.
 139     if (!will_become_free(hr)) {
 140       *card_ptr = G1CardTable::dirty_card_val();
 141       _num_dirtied++;
 142     }


 143   }
 144 
 145   size_t num_dirtied()   const { return _num_dirtied; }
 146 };
 147 
 148 
 149 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
 150   HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
 151 }
 152 
 153 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
 154   // The from card cache is not the memory that is actually committed. So we cannot
 155   // take advantage of the zero_filled parameter.
 156   reset_from_card_cache(start_idx, num_regions);
 157 }
 158 
 159 Tickspan G1CollectedHeap::run_task(AbstractGangTask* task) {
 160   Ticks start = Ticks::now();
 161   workers()->run_task(task, workers()->active_workers());
 162   return Ticks::now() - start;


1927                            &_is_alive_closure_stw,               // is alive closure
1928                            true);                                // allow changes to number of processing threads
1929 }
1930 
1931 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1932   return &_soft_ref_policy;
1933 }
1934 
1935 size_t G1CollectedHeap::capacity() const {
1936   return _hrm->length() * HeapRegion::GrainBytes;
1937 }
1938 
1939 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1940   return _hrm->total_free_bytes();
1941 }
1942 
1943 void G1CollectedHeap::iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_i) {
1944   _hot_card_cache->drain(cl, worker_i);
1945 }
1946 






1947 // Computes the sum of the storage used by the various regions.
1948 size_t G1CollectedHeap::used() const {
1949   size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
1950   if (_archive_allocator != NULL) {
1951     result += _archive_allocator->used();
1952   }
1953   return result;
1954 }
1955 
1956 size_t G1CollectedHeap::used_unlocked() const {
1957   return _summary_bytes_used;
1958 }
1959 
1960 class SumUsedClosure: public HeapRegionClosure {
1961   size_t _used;
1962 public:
1963   SumUsedClosure() : _used(0) {}
1964   bool do_heap_region(HeapRegion* r) {
1965     _used += r->used();
1966     return false;


3198     {
3199       G1GCParPhaseTimesTracker x(_phase_times, G1GCPhaseTimes::StringDedupTableFixup, worker_id);
3200       StringDedupTable::unlink_or_oops_do(&cl, worker_id);
3201     }
3202   }
3203 };
3204 
3205 void G1CollectedHeap::string_dedup_cleaning(BoolObjectClosure* is_alive,
3206                                             OopClosure* keep_alive,
3207                                             G1GCPhaseTimes* phase_times) {
3208   G1StringDedupCleaningTask cl(is_alive, keep_alive, phase_times);
3209   workers()->run_task(&cl);
3210 }
3211 
3212 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
3213  private:
3214   G1RedirtyCardsQueueSet* _qset;
3215   G1CollectedHeap* _g1h;
3216   BufferNode* volatile _nodes;
3217 
3218   void par_apply(RedirtyLoggedCardTableEntryClosure* cl, uint worker_id) {
3219     size_t buffer_size = _qset->buffer_size();









3220     BufferNode* next = Atomic::load(&_nodes);
3221     while (next != NULL) {
3222       BufferNode* node = next;
3223       next = Atomic::cmpxchg(node->next(), &_nodes, node);
3224       if (next == node) {
3225         cl->apply_to_buffer(node, buffer_size, worker_id);
3226         next = node->next();
3227       }
3228     }
3229   }
3230 
3231  public:
3232   G1RedirtyLoggedCardsTask(G1RedirtyCardsQueueSet* qset, G1CollectedHeap* g1h) :
3233     AbstractGangTask("Redirty Cards"),
3234     _qset(qset), _g1h(g1h), _nodes(qset->all_completed_buffers()) { }
3235 
3236   virtual void work(uint worker_id) {
3237     G1GCPhaseTimes* p = _g1h->phase_times();
3238     G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::RedirtyCards, worker_id);
3239 
3240     RedirtyLoggedCardTableEntryClosure cl(_g1h);
3241     par_apply(&cl, worker_id);
3242 
3243     p->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3244   }
3245 };


< prev index next >