< prev index next >

src/share/vm/gc_implementation/g1/concurrentMark.cpp

Print this page
rev 7407 : 8065227: Report allocation context stats at end of cleanup
Summary: Moved allocation context update from remark to the cleanup phase.
Reviewed-by:


2106                      " VerifyDuringGC:(after)");
2107   }
2108 
2109   g1h->check_bitmaps("Cleanup End");
2110 
2111   g1h->verify_region_sets_optional();
2112 
2113   // We need to make this be a "collection" so any collection pause that
2114   // races with it goes around and waits for completeCleanup to finish.
2115   g1h->increment_total_collections();
2116 
2117   // Clean out dead classes and update Metaspace sizes.
2118   if (ClassUnloadingWithConcurrentMark) {
2119     ClassLoaderDataGraph::purge();
2120   }
2121   MetaspaceGC::compute_new_size();
2122 
2123   // We reclaimed old regions so we should calculate the sizes to make
2124   // sure we update the old gen/space data.
2125   g1h->g1mm()->update_sizes();

2126 
2127   g1h->trace_heap_after_concurrent_cycle();
2128 }
2129 
2130 void ConcurrentMark::completeCleanup() {
2131   if (has_aborted()) return;
2132 
2133   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2134 
2135   _cleanup_list.verify_optional();
2136   FreeRegionList tmp_free_list("Tmp Free List");
2137 
2138   if (G1ConcRegionFreeingVerbose) {
2139     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2140                            "cleanup list has %u entries",
2141                            _cleanup_list.length());
2142   }
2143 
2144   // No one else should be accessing the _cleanup_list at this point,
2145   // so it is not necessary to take any locks


3226       _hrclaimer(_active_workers) {
3227   }
3228 
3229   void work(uint worker_id) {
3230     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3231 
3232     _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer);
3233   }
3234 };
3235 
3236 
3237 void ConcurrentMark::aggregate_count_data() {
3238   int n_workers = _g1h->workers()->active_workers();
3239 
3240   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3241                                            _max_worker_id, n_workers);
3242 
3243   _g1h->set_par_threads(n_workers);
3244   _g1h->workers()->run_task(&g1_par_agg_task);
3245   _g1h->set_par_threads(0);
3246   _g1h->allocation_context_stats().update_at_remark();
3247 }
3248 
3249 // Clear the per-worker arrays used to store the per-region counting data
3250 void ConcurrentMark::clear_all_count_data() {
3251   // Clear the global card bitmap - it will be filled during
3252   // liveness count aggregation (during remark) and the
3253   // final counting task.
3254   _card_bm.clear();
3255 
3256   // Clear the global region bitmap - it will be filled as part
3257   // of the final counting task.
3258   _region_bm.clear();
3259 
3260   uint max_regions = _g1h->max_regions();
3261   assert(_max_worker_id > 0, "uninitialized");
3262 
3263   for (uint i = 0; i < _max_worker_id; i += 1) {
3264     BitMap* task_card_bm = count_card_bitmap_for(i);
3265     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
3266 




2106                      " VerifyDuringGC:(after)");
2107   }
2108 
2109   g1h->check_bitmaps("Cleanup End");
2110 
2111   g1h->verify_region_sets_optional();
2112 
2113   // We need to make this be a "collection" so any collection pause that
2114   // races with it goes around and waits for completeCleanup to finish.
2115   g1h->increment_total_collections();
2116 
2117   // Clean out dead classes and update Metaspace sizes.
2118   if (ClassUnloadingWithConcurrentMark) {
2119     ClassLoaderDataGraph::purge();
2120   }
2121   MetaspaceGC::compute_new_size();
2122 
2123   // We reclaimed old regions so we should calculate the sizes to make
2124   // sure we update the old gen/space data.
2125   g1h->g1mm()->update_sizes();
2126   g1h->allocation_context_stats().update_after_mark();
2127 
2128   g1h->trace_heap_after_concurrent_cycle();
2129 }
2130 
2131 void ConcurrentMark::completeCleanup() {
2132   if (has_aborted()) return;
2133 
2134   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2135 
2136   _cleanup_list.verify_optional();
2137   FreeRegionList tmp_free_list("Tmp Free List");
2138 
2139   if (G1ConcRegionFreeingVerbose) {
2140     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
2141                            "cleanup list has %u entries",
2142                            _cleanup_list.length());
2143   }
2144 
2145   // No one else should be accessing the _cleanup_list at this point,
2146   // so it is not necessary to take any locks


3227       _hrclaimer(_active_workers) {
3228   }
3229 
3230   void work(uint worker_id) {
3231     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
3232 
3233     _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer);
3234   }
3235 };
3236 
3237 
3238 void ConcurrentMark::aggregate_count_data() {
3239   int n_workers = _g1h->workers()->active_workers();
3240 
3241   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3242                                            _max_worker_id, n_workers);
3243 
3244   _g1h->set_par_threads(n_workers);
3245   _g1h->workers()->run_task(&g1_par_agg_task);
3246   _g1h->set_par_threads(0);

3247 }
3248 
3249 // Clear the per-worker arrays used to store the per-region counting data
3250 void ConcurrentMark::clear_all_count_data() {
3251   // Clear the global card bitmap - it will be filled during
3252   // liveness count aggregation (during remark) and the
3253   // final counting task.
3254   _card_bm.clear();
3255 
3256   // Clear the global region bitmap - it will be filled as part
3257   // of the final counting task.
3258   _region_bm.clear();
3259 
3260   uint max_regions = _g1h->max_regions();
3261   assert(_max_worker_id > 0, "uninitialized");
3262 
3263   for (uint i = 0; i < _max_worker_id; i += 1) {
3264     BitMap* task_card_bm = count_card_bitmap_for(i);
3265     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
3266 


< prev index next >