src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page




  83 // G1ParTask executes g1_process_strong_roots() ->
  84 // SharedHeap::process_strong_roots() which calls eventually to
  85 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
  86 // SequentialSubTasksDone.  SharedHeap::process_strong_roots() also
  87 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
  88 //
  89 
  90 // Local to this file.
  91 
  92 class RefineCardTableEntryClosure: public CardTableEntryClosure {
  93   SuspendibleThreadSet* _sts;
  94   G1RemSet* _g1rs;
  95   ConcurrentG1Refine* _cg1r;
  96   bool _concurrent;
  97 public:
  98   RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
  99                               G1RemSet* g1rs,
 100                               ConcurrentG1Refine* cg1r) :
 101     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
 102   {}
 103   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
 104     bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);
 105     // This path is executed by the concurrent refine or mutator threads,
 106     // concurrently, and so we do not care if card_ptr contains references
 107     // that point into the collection set.
 108     assert(!oops_into_cset, "should be");
 109 
 110     if (_concurrent && _sts->should_yield()) {
 111       // Caller will actually yield.
 112       return false;
 113     }
 114     // Otherwise, we finished successfully; return true.
 115     return true;
 116   }
 117   void set_concurrent(bool b) { _concurrent = b; }
 118 };
 119 
 120 
 121 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
 122   int _calls;
 123   G1CollectedHeap* _g1h;
 124   CardTableModRefBS* _ctbs;
 125   int _histo[256];
 126 public:
 127   ClearLoggedCardTableEntryClosure() :
 128     _calls(0)
 129   {
 130     _g1h = G1CollectedHeap::heap();
 131     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
 132     for (int i = 0; i < 256; i++) _histo[i] = 0;
 133   }
 134   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
 135     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
 136       _calls++;
 137       unsigned char* ujb = (unsigned char*)card_ptr;
 138       int ind = (int)(*ujb);
 139       _histo[ind]++;
 140       *card_ptr = -1;
 141     }
 142     return true;
 143   }
 144   int calls() { return _calls; }
 145   void print_histo() {
 146     gclog_or_tty->print_cr("Card table value histogram:");
 147     for (int i = 0; i < 256; i++) {
 148       if (_histo[i] != 0) {
 149         gclog_or_tty->print_cr("  %d: %d", i, _histo[i]);
 150       }
 151     }
 152   }
 153 };
 154 
 155 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
 156   int _calls;
 157   G1CollectedHeap* _g1h;
 158   CardTableModRefBS* _ctbs;
 159 public:
 160   RedirtyLoggedCardTableEntryClosure() :
 161     _calls(0)
 162   {
 163     _g1h = G1CollectedHeap::heap();
 164     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
 165   }
 166   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
 167     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
 168       _calls++;
 169       *card_ptr = 0;
 170     }
 171     return true;
 172   }
 173   int calls() { return _calls; }
 174 };
 175 
 176 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
 177 public:
 178   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
 179     *card_ptr = CardTableModRefBS::dirty_card_val();
 180     return true;
 181   }
 182 };
 183 
 184 YoungList::YoungList(G1CollectedHeap* g1h) :
 185     _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
 186     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
 187   guarantee(check_list_empty(false), "just making sure...");
 188 }
 189 
 190 void YoungList::push_region(HeapRegion *hr) {
 191   assert(!hr->is_young(), "should not already be young");
 192   assert(hr->get_next_young_region() == NULL, "cause it should!");
 193 
 194   hr->set_next_young_region(_head);
 195   _head = hr;
 196 
 197   _g1h->g1_policy()->set_region_eden(hr, (int) _length);
 198   ++_length;


1195     hrrs->clear();
1196     // You might think here that we could clear just the cards
1197     // corresponding to the used region.  But no: if we leave a dirty card
1198     // in a region we might allocate into, then it would prevent that card
1199     // from being enqueued, and cause it to be missed.
1200     // Re: the performance cost: we shouldn't be doing full GC anyway!
1201     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1202 
1203     return false;
1204   }
1205 };
1206 
1207 void G1CollectedHeap::clear_rsets_post_compaction() {
1208   PostMCRemSetClearClosure rs_clear(this, mr_bs());
1209   heap_region_iterate(&rs_clear);
1210 }
1211 
1212 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1213   G1CollectedHeap*   _g1h;
1214   UpdateRSOopClosure _cl;
1215   int                _worker_i;
1216 public:
1217   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
1218     _cl(g1->g1_rem_set(), worker_i),
1219     _worker_i(worker_i),
1220     _g1h(g1)
1221   { }
1222 
1223   bool doHeapRegion(HeapRegion* r) {
1224     if (!r->continuesHumongous()) {
1225       _cl.set_from(r);
1226       r->oop_iterate(&_cl);
1227     }
1228     return false;
1229   }
1230 };
1231 
1232 class ParRebuildRSTask: public AbstractGangTask {
1233   G1CollectedHeap* _g1;
1234 public:
1235   ParRebuildRSTask(G1CollectedHeap* g1)
1236     : AbstractGangTask("ParRebuildRSTask"),
1237       _g1(g1)


2306                              "expected %d", HR_FORMAT_PARAMS(hr),
2307                              region_gc_time_stamp, _gc_time_stamp);
2308       _failures = true;
2309     }
2310     return false;
2311   }
2312 
2313   bool failures() { return _failures; }
2314 };
2315 
2316 void G1CollectedHeap::check_gc_time_stamps() {
2317   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2318   heap_region_iterate(&cl);
2319   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2320 }
2321 #endif // PRODUCT
2322 
2323 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2324                                                  DirtyCardQueue* into_cset_dcq,
2325                                                  bool concurrent,
2326                                                  int worker_i) {
2327   // Clean cards in the hot card cache
2328   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
2329   hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
2330 
2331   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2332   int n_completed_buffers = 0;
2333   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2334     n_completed_buffers++;
2335   }
2336   g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
2337   dcqs.clear_n_completed_buffers();
2338   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2339 }
2340 
2341 
2342 // Computes the sum of the storage used by the various regions.
2343 
2344 size_t G1CollectedHeap::used() const {
2345   assert(Heap_lock->owner() != NULL,
2346          "Should be owned on this thread's behalf.");


2850   collection_set_iterate(&cl);
2851   return cl.failures() == 0;
2852 }
2853 #endif // ASSERT
2854 
2855 // Clear the cached CSet starting regions and (more importantly)
2856 // the time stamps. Called when we reset the GC time stamp.
2857 void G1CollectedHeap::clear_cset_start_regions() {
2858   assert(_worker_cset_start_region != NULL, "sanity");
2859   assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
2860 
2861   int n_queues = MAX2((int)ParallelGCThreads, 1);
2862   for (int i = 0; i < n_queues; i++) {
2863     _worker_cset_start_region[i] = NULL;
2864     _worker_cset_start_region_time_stamp[i] = 0;
2865   }
2866 }
2867 
2868 // Given the id of a worker, obtain or calculate a suitable
2869 // starting region for iterating over the current collection set.
2870 HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
2871   assert(get_gc_time_stamp() > 0, "should have been updated by now");
2872 
2873   HeapRegion* result = NULL;
2874   unsigned gc_time_stamp = get_gc_time_stamp();
2875 
2876   if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
2877     // Cached starting region for current worker was set
2878     // during the current pause - so it's valid.
2879     // Note: the cached starting heap region may be NULL
2880     // (when the collection set is empty).
2881     result = _worker_cset_start_region[worker_i];
2882     assert(result == NULL || result->in_collection_set(), "sanity");
2883     return result;
2884   }
2885 
2886   // The cached entry was not valid so let's calculate
2887   // a suitable starting heap region for this worker.
2888 
2889   // We want the parallel threads to start their collection
2890   // set iteration at different collection set regions to


5108       // destructors are executed here and are included as part of the
5109       // "GC Worker Time".
5110     }
5111 
5112     double end_time_ms = os::elapsedTime() * 1000.0;
5113     _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
5114   }
5115 };
5116 
5117 // *** Common G1 Evacuation Stuff
5118 
5119 // This method is run in a GC worker.
5120 
5121 void
5122 G1CollectedHeap::
5123 g1_process_strong_roots(bool is_scavenging,
5124                         ScanningOption so,
5125                         OopClosure* scan_non_heap_roots,
5126                         OopsInHeapRegionClosure* scan_rs,
5127                         G1KlassScanClosure* scan_klasses,
5128                         int worker_i) {
5129 
5130   // First scan the strong roots
5131   double ext_roots_start = os::elapsedTime();
5132   double closure_app_time_sec = 0.0;
5133 
5134   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
5135 
5136   assert(so & SO_CodeCache || scan_rs != NULL, "must scan code roots somehow");
5137   // Walk the code cache/strong code roots w/o buffering, because StarTask
5138   // cannot handle unaligned oop locations.
5139   CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
5140 
5141   process_strong_roots(false, // no scoping; this is parallel code
5142                        is_scavenging, so,
5143                        &buf_scan_non_heap_roots,
5144                        &eager_scan_code_roots,
5145                        scan_klasses
5146                        );
5147 
5148   // Now the CM ref_processor roots.




  83 // G1ParTask executes g1_process_strong_roots() ->
  84 // SharedHeap::process_strong_roots() which calls eventually to
  85 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
  86 // SequentialSubTasksDone.  SharedHeap::process_strong_roots() also
  87 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
  88 //
  89 
  90 // Local to this file.
  91 
  92 class RefineCardTableEntryClosure: public CardTableEntryClosure {
  93   SuspendibleThreadSet* _sts;
  94   G1RemSet* _g1rs;
  95   ConcurrentG1Refine* _cg1r;
  96   bool _concurrent;
  97 public:
  98   RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
  99                               G1RemSet* g1rs,
 100                               ConcurrentG1Refine* cg1r) :
 101     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
 102   {}
 103   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 104     bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);
 105     // This path is executed by the concurrent refine or mutator threads,
 106     // concurrently, and so we do not care if card_ptr contains references
 107     // that point into the collection set.
 108     assert(!oops_into_cset, "should be");
 109 
 110     if (_concurrent && _sts->should_yield()) {
 111       // Caller will actually yield.
 112       return false;
 113     }
 114     // Otherwise, we finished successfully; return true.
 115     return true;
 116   }
 117   void set_concurrent(bool b) { _concurrent = b; }
 118 };
 119 
 120 
 121 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
 122   int _calls;
 123   G1CollectedHeap* _g1h;
 124   CardTableModRefBS* _ctbs;
 125   int _histo[256];
 126 public:
 127   ClearLoggedCardTableEntryClosure() :
 128     _calls(0)
 129   {
 130     _g1h = G1CollectedHeap::heap();
 131     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
 132     for (int i = 0; i < 256; i++) _histo[i] = 0;
 133   }
 134   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 135     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
 136       _calls++;
 137       unsigned char* ujb = (unsigned char*)card_ptr;
 138       int ind = (int)(*ujb);
 139       _histo[ind]++;
 140       *card_ptr = -1;
 141     }
 142     return true;
 143   }
 144   int calls() { return _calls; }
 145   void print_histo() {
 146     gclog_or_tty->print_cr("Card table value histogram:");
 147     for (int i = 0; i < 256; i++) {
 148       if (_histo[i] != 0) {
 149         gclog_or_tty->print_cr("  %d: %d", i, _histo[i]);
 150       }
 151     }
 152   }
 153 };
 154 
 155 class RedirtyLoggedCardTableEntryClosure: public CardTableEntryClosure {
 156   int _calls;
 157   G1CollectedHeap* _g1h;
 158   CardTableModRefBS* _ctbs;
 159 public:
 160   RedirtyLoggedCardTableEntryClosure() :
 161     _calls(0)
 162   {
 163     _g1h = G1CollectedHeap::heap();
 164     _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
 165   }
 166   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 167     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
 168       _calls++;
 169       *card_ptr = 0;
 170     }
 171     return true;
 172   }
 173   int calls() { return _calls; }
 174 };
 175 
 176 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
 177 public:
 178   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 179     *card_ptr = CardTableModRefBS::dirty_card_val();
 180     return true;
 181   }
 182 };
 183 
 184 YoungList::YoungList(G1CollectedHeap* g1h) :
 185     _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
 186     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
 187   guarantee(check_list_empty(false), "just making sure...");
 188 }
 189 
 190 void YoungList::push_region(HeapRegion *hr) {
 191   assert(!hr->is_young(), "should not already be young");
 192   assert(hr->get_next_young_region() == NULL, "cause it should!");
 193 
 194   hr->set_next_young_region(_head);
 195   _head = hr;
 196 
 197   _g1h->g1_policy()->set_region_eden(hr, (int) _length);
 198   ++_length;


1195     hrrs->clear();
1196     // You might think here that we could clear just the cards
1197     // corresponding to the used region.  But no: if we leave a dirty card
1198     // in a region we might allocate into, then it would prevent that card
1199     // from being enqueued, and cause it to be missed.
1200     // Re: the performance cost: we shouldn't be doing full GC anyway!
1201     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1202 
1203     return false;
1204   }
1205 };
1206 
1207 void G1CollectedHeap::clear_rsets_post_compaction() {
1208   PostMCRemSetClearClosure rs_clear(this, mr_bs());
1209   heap_region_iterate(&rs_clear);
1210 }
1211 
1212 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1213   G1CollectedHeap*   _g1h;
1214   UpdateRSOopClosure _cl;
1215   uint               _worker_i;
1216 public:
1217   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, uint worker_i = 0) :
1218     _cl(g1->g1_rem_set(), worker_i),
1219     _worker_i(worker_i),
1220     _g1h(g1)
1221   { }
1222 
1223   bool doHeapRegion(HeapRegion* r) {
1224     if (!r->continuesHumongous()) {
1225       _cl.set_from(r);
1226       r->oop_iterate(&_cl);
1227     }
1228     return false;
1229   }
1230 };
1231 
1232 class ParRebuildRSTask: public AbstractGangTask {
1233   G1CollectedHeap* _g1;
1234 public:
1235   ParRebuildRSTask(G1CollectedHeap* g1)
1236     : AbstractGangTask("ParRebuildRSTask"),
1237       _g1(g1)


2306                              "expected %d", HR_FORMAT_PARAMS(hr),
2307                              region_gc_time_stamp, _gc_time_stamp);
2308       _failures = true;
2309     }
2310     return false;
2311   }
2312 
2313   bool failures() { return _failures; }
2314 };
2315 
2316 void G1CollectedHeap::check_gc_time_stamps() {
2317   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2318   heap_region_iterate(&cl);
2319   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2320 }
2321 #endif // PRODUCT
2322 
2323 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2324                                                  DirtyCardQueue* into_cset_dcq,
2325                                                  bool concurrent,
2326                                                  uint worker_i) {
2327   // Clean cards in the hot card cache
2328   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
2329   hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
2330 
2331   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2332   int n_completed_buffers = 0;
2333   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2334     n_completed_buffers++;
2335   }
2336   g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
2337   dcqs.clear_n_completed_buffers();
2338   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2339 }
2340 
2341 
2342 // Computes the sum of the storage used by the various regions.
2343 
2344 size_t G1CollectedHeap::used() const {
2345   assert(Heap_lock->owner() != NULL,
2346          "Should be owned on this thread's behalf.");


2850   collection_set_iterate(&cl);
2851   return cl.failures() == 0;
2852 }
2853 #endif // ASSERT
2854 
2855 // Clear the cached CSet starting regions and (more importantly)
2856 // the time stamps. Called when we reset the GC time stamp.
2857 void G1CollectedHeap::clear_cset_start_regions() {
2858   assert(_worker_cset_start_region != NULL, "sanity");
2859   assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
2860 
2861   int n_queues = MAX2((int)ParallelGCThreads, 1);
2862   for (int i = 0; i < n_queues; i++) {
2863     _worker_cset_start_region[i] = NULL;
2864     _worker_cset_start_region_time_stamp[i] = 0;
2865   }
2866 }
2867 
2868 // Given the id of a worker, obtain or calculate a suitable
2869 // starting region for iterating over the current collection set.
2870 HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
2871   assert(get_gc_time_stamp() > 0, "should have been updated by now");
2872 
2873   HeapRegion* result = NULL;
2874   unsigned gc_time_stamp = get_gc_time_stamp();
2875 
2876   if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
2877     // Cached starting region for current worker was set
2878     // during the current pause - so it's valid.
2879     // Note: the cached starting heap region may be NULL
2880     // (when the collection set is empty).
2881     result = _worker_cset_start_region[worker_i];
2882     assert(result == NULL || result->in_collection_set(), "sanity");
2883     return result;
2884   }
2885 
2886   // The cached entry was not valid so let's calculate
2887   // a suitable starting heap region for this worker.
2888 
2889   // We want the parallel threads to start their collection
2890   // set iteration at different collection set regions to


5108       // destructors are executed here and are included as part of the
5109       // "GC Worker Time".
5110     }
5111 
5112     double end_time_ms = os::elapsedTime() * 1000.0;
5113     _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
5114   }
5115 };
5116 
5117 // *** Common G1 Evacuation Stuff
5118 
5119 // This method is run in a GC worker.
5120 
5121 void
5122 G1CollectedHeap::
5123 g1_process_strong_roots(bool is_scavenging,
5124                         ScanningOption so,
5125                         OopClosure* scan_non_heap_roots,
5126                         OopsInHeapRegionClosure* scan_rs,
5127                         G1KlassScanClosure* scan_klasses,
5128                         uint worker_i) {
5129 
5130   // First scan the strong roots
5131   double ext_roots_start = os::elapsedTime();
5132   double closure_app_time_sec = 0.0;
5133 
5134   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
5135 
5136   assert(so & SO_CodeCache || scan_rs != NULL, "must scan code roots somehow");
5137   // Walk the code cache/strong code roots w/o buffering, because StarTask
5138   // cannot handle unaligned oop locations.
5139   CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
5140 
5141   process_strong_roots(false, // no scoping; this is parallel code
5142                        is_scavenging, so,
5143                        &buf_scan_non_heap_roots,
5144                        &eager_scan_code_roots,
5145                        scan_klasses
5146                        );
5147 
5148   // Now the CM ref_processor roots.