< prev index next >

src/share/vm/gc/g1/concurrentMark.cpp

Print this page




1923   g1h->verify_region_sets_optional();
1924 
1925   if (VerifyDuringGC) {
1926     HandleMark hm;  // handle scope
1927     g1h->prepare_for_verify();
1928     Universe::verify(VerifyOption_G1UsePrevMarking,
1929                      " VerifyDuringGC:(before)");
1930   }
1931   g1h->check_bitmaps("Cleanup Start");
1932 
1933   G1CollectorPolicy* g1p = g1h->g1_policy();
1934   g1p->record_concurrent_mark_cleanup_start();
1935 
1936   double start = os::elapsedTime();
1937 
1938   HeapRegionRemSet::reset_for_cleanup_tasks();
1939 
1940   // Do counting once more with the world stopped for good measure.
1941   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
1942 
1943   g1h->set_par_threads();
1944   uint n_workers = _g1h->workers()->active_workers();
1945   g1h->workers()->run_task(&g1_par_count_task);
1946   // Done with the parallel phase so reset to 0.
1947   g1h->set_par_threads(0);
1948 
1949   if (VerifyDuringGC) {
1950     // Verify that the counting data accumulated during marking matches
1951     // that calculated by walking the marking bitmap.
1952 
1953     // Bitmaps to hold expected values
1954     BitMap expected_region_bm(_region_bm.size(), true);
1955     BitMap expected_card_bm(_card_bm.size(), true);
1956 
1957     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
1958                                                  &_region_bm,
1959                                                  &_card_bm,
1960                                                  &expected_region_bm,
1961                                                  &expected_card_bm);
1962 
1963     g1h->set_par_threads((int)n_workers);
1964     g1h->workers()->run_task(&g1_par_verify_task);
1965     // Done with the parallel phase so reset to 0.
1966     g1h->set_par_threads(0);
1967 
1968     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
1969   }
1970 
1971   size_t start_used_bytes = g1h->used();
1972   g1h->set_marking_complete();
1973 
1974   double count_end = os::elapsedTime();
1975   double this_final_counting_time = (count_end - start);
1976   _total_counting_time += this_final_counting_time;
1977 
1978   if (G1PrintRegionLivenessInfo) {
1979     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
1980     _g1h->heap_region_iterate(&cl);
1981   }
1982 
1983   // Install newly created mark bitMap as "prev".
1984   swapMarkBitMaps();
1985 
1986   g1h->reset_gc_time_stamp();
1987 


1988   // Note end of marking in all heap regions.
1989   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
1990   g1h->set_par_threads((int)n_workers);
1991   g1h->workers()->run_task(&g1_par_note_end_task);
1992   g1h->set_par_threads(0);
1993   g1h->check_gc_time_stamps();
1994 
1995   if (!cleanup_list_is_empty()) {
1996     // The cleanup list is not empty, so we'll have to process it
1997     // concurrently. Notify anyone else that might be wanting free
1998     // regions that there will be more free regions coming soon.
1999     g1h->set_free_regions_coming();
2000   }
2001 
2002   // call below, since it affects the metric by which we sort the heap
2003   // regions.
2004   if (G1ScrubRemSets) {
2005     double rs_scrub_start = os::elapsedTime();
2006     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers);
2007     g1h->set_par_threads((int)n_workers);
2008     g1h->workers()->run_task(&g1_par_scrub_rs_task);
2009     g1h->set_par_threads(0);
2010 
2011     double rs_scrub_end = os::elapsedTime();
2012     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
2013     _total_rs_scrub_time += this_rs_scrub_time;
2014   }
2015 
2016   // this will also free any regions totally full of garbage objects,
2017   // and sort the regions.
2018   g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2019 
2020   // Statistics.
2021   double end = os::elapsedTime();
2022   _cleanup_times.add((end - start) * 1000.0);
2023 
2024   if (G1Log::fine()) {
2025     g1h->g1_policy()->print_heap_transition(start_used_bytes);
2026   }
2027 
2028   // Clean up will have freed any regions completely full of garbage.
2029   // Update the soft reference policy with the new heap occupancy.


2290     CMTask* task = _cm->task(worker_id);
2291     G1CMIsAliveClosure g1_is_alive(_g1h);
2292     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
2293     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
2294 
2295     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2296   }
2297 };
2298 
2299 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2300   assert(_workers != NULL, "Need parallel worker threads.");
2301   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2302 
2303   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
2304 
2305   // We need to reset the concurrency level before each
2306   // proxy task execution, so that the termination protocol
2307   // and overflow handling in CMTask::do_marking_step() knows
2308   // how many workers to wait for.
2309   _cm->set_concurrency(_active_workers);
2310   _g1h->set_par_threads(_active_workers);
2311   _workers->run_task(&proc_task_proxy);
2312   _g1h->set_par_threads(0);
2313 }
2314 
2315 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2316   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2317   EnqueueTask& _enq_task;
2318 
2319 public:
2320   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2321     AbstractGangTask("Enqueue reference objects in parallel"),
2322     _enq_task(enq_task) { }
2323 
2324   virtual void work(uint worker_id) {
2325     _enq_task.work(worker_id);
2326   }
2327 };
2328 
2329 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2330   assert(_workers != NULL, "Need parallel worker threads.");
2331   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2332 
2333   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2334 
2335   // Not strictly necessary but...
2336   //
2337   // We need to reset the concurrency level before each
2338   // proxy task execution, so that the termination protocol
2339   // and overflow handling in CMTask::do_marking_step() knows
2340   // how many workers to wait for.
2341   _cm->set_concurrency(_active_workers);
2342   _g1h->set_par_threads(_active_workers);
2343   _workers->run_task(&enq_task_proxy);
2344   _g1h->set_par_threads(0);
2345 }
2346 
2347 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
2348   G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
2349 }
2350 
2351 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2352   if (has_overflown()) {
2353     // Skip processing the discovered references if we have
2354     // overflown the global marking stack. Reference objects
2355     // only get discovered once so it is OK to not
2356     // de-populate the discovered reference lists. We could have,
2357     // but the only benefit would be that, when marking restarts,
2358     // less reference objects are discovered.
2359     return;
2360   }
2361 
2362   ResourceMark rm;
2363   HandleMark   hm;
2364 


2606   // this is remark, so we'll use up all active threads
2607   uint active_workers = g1h->workers()->active_workers();
2608   if (active_workers == 0) {
2609     assert(active_workers > 0, "Should have been set earlier");
2610     active_workers = (uint) ParallelGCThreads;
2611     g1h->workers()->set_active_workers(active_workers);
2612   }
2613   set_concurrency_and_phase(active_workers, false /* concurrent */);
2614   // Leave _parallel_marking_threads at it's
2615   // value originally calculated in the ConcurrentMark
2616   // constructor and pass values of the active workers
2617   // through the gang in the task.
2618 
2619   {
2620     StrongRootsScope srs(active_workers);
2621 
2622     CMRemarkTask remarkTask(this, active_workers);
2623     // We will start all available threads, even if we decide that the
2624     // active_workers will be fewer. The extra ones will just bail out
2625     // immediately.
2626     g1h->set_par_threads(active_workers);
2627     g1h->workers()->run_task(&remarkTask);
2628     g1h->set_par_threads(0);
2629   }
2630 
2631   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2632   guarantee(has_overflown() ||
2633             satb_mq_set.completed_buffers_num() == 0,
2634             err_msg("Invariant: has_overflown = %s, num buffers = %d",
2635                     BOOL_TO_STR(has_overflown()),
2636                     satb_mq_set.completed_buffers_num()));
2637 
2638   print_stats();
2639 }
2640 
2641 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2642   // Note we are overriding the read-only view of the prev map here, via
2643   // the cast.
2644   ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2645 }
2646 
2647 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2648   _nextMarkBitMap->clearRange(mr);


2982       _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
2983       _max_worker_id(max_worker_id),
2984       _active_workers(n_workers),
2985       _hrclaimer(_active_workers) {
2986   }
2987 
2988   void work(uint worker_id) {
2989     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
2990 
2991     _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer);
2992   }
2993 };
2994 
2995 
2996 void ConcurrentMark::aggregate_count_data() {
2997   uint n_workers = _g1h->workers()->active_workers();
2998 
2999   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
3000                                            _max_worker_id, n_workers);
3001 
3002   _g1h->set_par_threads(n_workers);
3003   _g1h->workers()->run_task(&g1_par_agg_task);
3004   _g1h->set_par_threads(0);
3005 }
3006 
3007 // Clear the per-worker arrays used to store the per-region counting data
3008 void ConcurrentMark::clear_all_count_data() {
3009   // Clear the global card bitmap - it will be filled during
3010   // liveness count aggregation (during remark) and the
3011   // final counting task.
3012   _card_bm.clear();
3013 
3014   // Clear the global region bitmap - it will be filled as part
3015   // of the final counting task.
3016   _region_bm.clear();
3017 
3018   uint max_regions = _g1h->max_regions();
3019   assert(_max_worker_id > 0, "uninitialized");
3020 
3021   for (uint i = 0; i < _max_worker_id; i += 1) {
3022     BitMap* task_card_bm = count_card_bitmap_for(i);
3023     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
3024 




1923   g1h->verify_region_sets_optional();
1924 
1925   if (VerifyDuringGC) {
1926     HandleMark hm;  // handle scope
1927     g1h->prepare_for_verify();
1928     Universe::verify(VerifyOption_G1UsePrevMarking,
1929                      " VerifyDuringGC:(before)");
1930   }
1931   g1h->check_bitmaps("Cleanup Start");
1932 
1933   G1CollectorPolicy* g1p = g1h->g1_policy();
1934   g1p->record_concurrent_mark_cleanup_start();
1935 
1936   double start = os::elapsedTime();
1937 
1938   HeapRegionRemSet::reset_for_cleanup_tasks();
1939 
1940   // Do counting once more with the world stopped for good measure.
1941   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
1942 


1943   g1h->workers()->run_task(&g1_par_count_task);


1944 
1945   if (VerifyDuringGC) {
1946     // Verify that the counting data accumulated during marking matches
1947     // that calculated by walking the marking bitmap.
1948 
1949     // Bitmaps to hold expected values
1950     BitMap expected_region_bm(_region_bm.size(), true);
1951     BitMap expected_card_bm(_card_bm.size(), true);
1952 
1953     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
1954                                                  &_region_bm,
1955                                                  &_card_bm,
1956                                                  &expected_region_bm,
1957                                                  &expected_card_bm);
1958 

1959     g1h->workers()->run_task(&g1_par_verify_task);


1960 
1961     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
1962   }
1963 
1964   size_t start_used_bytes = g1h->used();
1965   g1h->set_marking_complete();
1966 
1967   double count_end = os::elapsedTime();
1968   double this_final_counting_time = (count_end - start);
1969   _total_counting_time += this_final_counting_time;
1970 
1971   if (G1PrintRegionLivenessInfo) {
1972     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
1973     _g1h->heap_region_iterate(&cl);
1974   }
1975 
1976   // Install newly created mark bitMap as "prev".
1977   swapMarkBitMaps();
1978 
1979   g1h->reset_gc_time_stamp();
1980 
1981   uint n_workers = _g1h->workers()->active_workers();
1982 
1983   // Note end of marking in all heap regions.
1984   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);

1985   g1h->workers()->run_task(&g1_par_note_end_task);

1986   g1h->check_gc_time_stamps();
1987 
1988   if (!cleanup_list_is_empty()) {
1989     // The cleanup list is not empty, so we'll have to process it
1990     // concurrently. Notify anyone else that might be wanting free
1991     // regions that there will be more free regions coming soon.
1992     g1h->set_free_regions_coming();
1993   }
1994 
1995   // call below, since it affects the metric by which we sort the heap
1996   // regions.
1997   if (G1ScrubRemSets) {
1998     double rs_scrub_start = os::elapsedTime();
1999     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers);

2000     g1h->workers()->run_task(&g1_par_scrub_rs_task);

2001 
2002     double rs_scrub_end = os::elapsedTime();
2003     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
2004     _total_rs_scrub_time += this_rs_scrub_time;
2005   }
2006 
2007   // this will also free any regions totally full of garbage objects,
2008   // and sort the regions.
2009   g1h->g1_policy()->record_concurrent_mark_cleanup_end((int)n_workers);
2010 
2011   // Statistics.
2012   double end = os::elapsedTime();
2013   _cleanup_times.add((end - start) * 1000.0);
2014 
2015   if (G1Log::fine()) {
2016     g1h->g1_policy()->print_heap_transition(start_used_bytes);
2017   }
2018 
2019   // Clean up will have freed any regions completely full of garbage.
2020   // Update the soft reference policy with the new heap occupancy.


2281     CMTask* task = _cm->task(worker_id);
2282     G1CMIsAliveClosure g1_is_alive(_g1h);
2283     G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */);
2284     G1CMDrainMarkingStackClosure g1_par_drain(_cm, task, false /* is_serial */);
2285 
2286     _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2287   }
2288 };
2289 
2290 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2291   assert(_workers != NULL, "Need parallel worker threads.");
2292   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2293 
2294   G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
2295 
2296   // We need to reset the concurrency level before each
2297   // proxy task execution, so that the termination protocol
2298   // and overflow handling in CMTask::do_marking_step() knows
2299   // how many workers to wait for.
2300   _cm->set_concurrency(_active_workers);

2301   _workers->run_task(&proc_task_proxy);

2302 }
2303 
2304 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2305   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2306   EnqueueTask& _enq_task;
2307 
2308 public:
2309   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2310     AbstractGangTask("Enqueue reference objects in parallel"),
2311     _enq_task(enq_task) { }
2312 
2313   virtual void work(uint worker_id) {
2314     _enq_task.work(worker_id);
2315   }
2316 };
2317 
2318 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2319   assert(_workers != NULL, "Need parallel worker threads.");
2320   assert(_g1h->ref_processor_cm()->processing_is_mt(), "processing is not MT");
2321 
2322   G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2323 
2324   // Not strictly necessary but...
2325   //
2326   // We need to reset the concurrency level before each
2327   // proxy task execution, so that the termination protocol
2328   // and overflow handling in CMTask::do_marking_step() knows
2329   // how many workers to wait for.
2330   _cm->set_concurrency(_active_workers);

2331   _workers->run_task(&enq_task_proxy);

2332 }
2333 
2334 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
2335   G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
2336 }
2337 
2338 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2339   if (has_overflown()) {
2340     // Skip processing the discovered references if we have
2341     // overflown the global marking stack. Reference objects
2342     // only get discovered once so it is OK to not
2343     // de-populate the discovered reference lists. We could have,
2344     // but the only benefit would be that, when marking restarts,
2345     // less reference objects are discovered.
2346     return;
2347   }
2348 
2349   ResourceMark rm;
2350   HandleMark   hm;
2351 


2593   // this is remark, so we'll use up all active threads
2594   uint active_workers = g1h->workers()->active_workers();
2595   if (active_workers == 0) {
2596     assert(active_workers > 0, "Should have been set earlier");
2597     active_workers = (uint) ParallelGCThreads;
2598     g1h->workers()->set_active_workers(active_workers);
2599   }
2600   set_concurrency_and_phase(active_workers, false /* concurrent */);
2601   // Leave _parallel_marking_threads at it's
2602   // value originally calculated in the ConcurrentMark
2603   // constructor and pass values of the active workers
2604   // through the gang in the task.
2605 
2606   {
2607     StrongRootsScope srs(active_workers);
2608 
2609     CMRemarkTask remarkTask(this, active_workers);
2610     // We will start all available threads, even if we decide that the
2611     // active_workers will be fewer. The extra ones will just bail out
2612     // immediately.

2613     g1h->workers()->run_task(&remarkTask);

2614   }
2615 
2616   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2617   guarantee(has_overflown() ||
2618             satb_mq_set.completed_buffers_num() == 0,
2619             err_msg("Invariant: has_overflown = %s, num buffers = %d",
2620                     BOOL_TO_STR(has_overflown()),
2621                     satb_mq_set.completed_buffers_num()));
2622 
2623   print_stats();
2624 }
2625 
2626 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2627   // Note we are overriding the read-only view of the prev map here, via
2628   // the cast.
2629   ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2630 }
2631 
2632 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2633   _nextMarkBitMap->clearRange(mr);


2967       _g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
2968       _max_worker_id(max_worker_id),
2969       _active_workers(n_workers),
2970       _hrclaimer(_active_workers) {
2971   }
2972 
2973   void work(uint worker_id) {
2974     AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
2975 
2976     _g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer);
2977   }
2978 };
2979 
2980 
2981 void ConcurrentMark::aggregate_count_data() {
2982   uint n_workers = _g1h->workers()->active_workers();
2983 
2984   G1AggregateCountDataTask g1_par_agg_task(_g1h, this, &_card_bm,
2985                                            _max_worker_id, n_workers);
2986 

2987   _g1h->workers()->run_task(&g1_par_agg_task);

2988 }
2989 
2990 // Clear the per-worker arrays used to store the per-region counting data
2991 void ConcurrentMark::clear_all_count_data() {
2992   // Clear the global card bitmap - it will be filled during
2993   // liveness count aggregation (during remark) and the
2994   // final counting task.
2995   _card_bm.clear();
2996 
2997   // Clear the global region bitmap - it will be filled as part
2998   // of the final counting task.
2999   _region_bm.clear();
3000 
3001   uint max_regions = _g1h->max_regions();
3002   assert(_max_worker_id > 0, "uninitialized");
3003 
3004   for (uint i = 0; i < _max_worker_id; i += 1) {
3005     BitMap* task_card_bm = count_card_bitmap_for(i);
3006     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
3007 


< prev index next >