< prev index next >

src/share/vm/gc/g1/g1CollectorPolicy.cpp

Print this page




 840 void G1CollectorPolicy::record_concurrent_mark_init_end(double
 841                                                    mark_init_elapsed_time_ms) {
 842   collector_state()->set_during_marking(true);
 843   assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
 844   collector_state()->set_during_initial_mark_pause(false);
 845   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
 846 }
 847 
 848 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
 849   _mark_remark_start_sec = os::elapsedTime();
 850   collector_state()->set_during_marking(false);
 851 }
 852 
 853 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
 854   double end_time_sec = os::elapsedTime();
 855   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
 856   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
 857   _cur_mark_stop_world_time_ms += elapsed_time_ms;
 858   _prev_collection_pause_end_ms += elapsed_time_ms;
 859 
 860   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, _g1->gc_tracer_cm()->gc_id());
 861 }
 862 
 863 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
 864   _mark_cleanup_start_sec = os::elapsedTime();
 865 }
 866 
 867 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
 868   collector_state()->set_last_young_gc(true);
 869   collector_state()->set_in_marking_window(false);
 870 }
 871 
 872 void G1CollectorPolicy::record_concurrent_pause() {
 873   if (_stop_world_start > 0.0) {
 874     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
 875     _trace_young_gen_time_data.record_yield_time(yield_ms);
 876   }
 877 }
 878 
 879 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
 880   if (_g1->concurrent_mark()->cmThread()->during_cycle()) {


 935   bool update_stats = !_g1->evacuation_failed();
 936 
 937 #ifndef PRODUCT
 938   if (G1YoungSurvRateVerbose) {
 939     gclog_or_tty->cr();
 940     _short_lived_surv_rate_group->print();
 941     // do that for any other surv rate groups too
 942   }
 943 #endif // PRODUCT
 944 
 945   last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
 946   if (last_pause_included_initial_mark) {
 947     record_concurrent_mark_init_end(0.0);
 948   } else if (need_to_start_conc_mark("end of GC")) {
 949     // Note: this might have already been set, if during the last
 950     // pause we decided to start a cycle but at the beginning of
 951     // this pause we decided to postpone it. That's OK.
 952     collector_state()->set_initiate_conc_mark_if_possible(true);
 953   }
 954 
 955   _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
 956                           end_time_sec, _g1->gc_tracer_stw()->gc_id());
 957 
 958   if (update_stats) {
 959     _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
 960     // this is where we update the allocation rate of the application
 961     double app_time_ms =
 962       (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
 963     if (app_time_ms < MIN_TIMER_GRANULARITY) {
 964       // This usually happens due to the timer not having the required
 965       // granularity. Some Linuxes are the usual culprits.
 966       // We'll just set it to something (arbitrarily) small.
 967       app_time_ms = 1.0;
 968     }
 969     // We maintain the invariant that all objects allocated by mutator
 970     // threads will be allocated out of eden regions. So, we can use
 971     // the eden region number allocated since the previous GC to
 972     // calculate the application's allocate rate. The only exception
 973     // to that is humongous objects that are allocated separately. But
 974     // given that humongous object allocations do not really affect
 975     // either the pause's duration nor when the next pause will take
 976     // place we can safely ignore them here.


1567 void
1568 G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
1569   _collectionSetChooser->clear();
1570 
1571   WorkGang* workers = _g1->workers();
1572   uint n_workers = workers->active_workers();
1573 
1574   uint n_regions = _g1->num_regions();
1575   uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
1576   _collectionSetChooser->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
1577   ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers);
1578   workers->run_task(&par_known_garbage_task);
1579 
1580   _collectionSetChooser->sort_regions();
1581 
1582   double end_sec = os::elapsedTime();
1583   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1584   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
1585   _cur_mark_stop_world_time_ms += elapsed_time_ms;
1586   _prev_collection_pause_end_ms += elapsed_time_ms;
1587   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, _g1->gc_tracer_cm()->gc_id());
1588 }
1589 
1590 // Add the heap region at the head of the non-incremental collection set
1591 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
1592   assert(_inc_cset_build_state == Active, "Precondition");
1593   assert(hr->is_old(), "the region should be old");
1594 
1595   assert(!hr->in_collection_set(), "should not already be in the CSet");
1596   _g1->register_old_region_with_cset(hr);
1597   hr->set_next_in_collection_set(_collection_set);
1598   _collection_set = hr;
1599   _collection_set_bytes_used_before += hr->used();
1600   size_t rs_length = hr->rem_set()->occupied();
1601   _recorded_rs_lengths += rs_length;
1602   _old_cset_region_length += 1;
1603 }
1604 
1605 // Initialize the per-collection-set information
1606 void G1CollectorPolicy::start_incremental_cset_building() {
1607   assert(_inc_cset_build_state == Inactive, "Precondition");




 840 void G1CollectorPolicy::record_concurrent_mark_init_end(double
 841                                                    mark_init_elapsed_time_ms) {
 842   collector_state()->set_during_marking(true);
 843   assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
 844   collector_state()->set_during_initial_mark_pause(false);
 845   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
 846 }
 847 
 848 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
 849   _mark_remark_start_sec = os::elapsedTime();
 850   collector_state()->set_during_marking(false);
 851 }
 852 
 853 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
 854   double end_time_sec = os::elapsedTime();
 855   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
 856   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
 857   _cur_mark_stop_world_time_ms += elapsed_time_ms;
 858   _prev_collection_pause_end_ms += elapsed_time_ms;
 859 
 860   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec);
 861 }
 862 
 863 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
 864   _mark_cleanup_start_sec = os::elapsedTime();
 865 }
 866 
 867 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
 868   collector_state()->set_last_young_gc(true);
 869   collector_state()->set_in_marking_window(false);
 870 }
 871 
 872 void G1CollectorPolicy::record_concurrent_pause() {
 873   if (_stop_world_start > 0.0) {
 874     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
 875     _trace_young_gen_time_data.record_yield_time(yield_ms);
 876   }
 877 }
 878 
 879 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
 880   if (_g1->concurrent_mark()->cmThread()->during_cycle()) {


 935   bool update_stats = !_g1->evacuation_failed();
 936 
 937 #ifndef PRODUCT
 938   if (G1YoungSurvRateVerbose) {
 939     gclog_or_tty->cr();
 940     _short_lived_surv_rate_group->print();
 941     // do that for any other surv rate groups too
 942   }
 943 #endif // PRODUCT
 944 
 945   last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
 946   if (last_pause_included_initial_mark) {
 947     record_concurrent_mark_init_end(0.0);
 948   } else if (need_to_start_conc_mark("end of GC")) {
 949     // Note: this might have already been set, if during the last
 950     // pause we decided to start a cycle but at the beginning of
 951     // this pause we decided to postpone it. That's OK.
 952     collector_state()->set_initiate_conc_mark_if_possible(true);
 953   }
 954 
 955   _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec);

 956 
 957   if (update_stats) {
 958     _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
 959     // this is where we update the allocation rate of the application
 960     double app_time_ms =
 961       (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
 962     if (app_time_ms < MIN_TIMER_GRANULARITY) {
 963       // This usually happens due to the timer not having the required
 964       // granularity. Some Linuxes are the usual culprits.
 965       // We'll just set it to something (arbitrarily) small.
 966       app_time_ms = 1.0;
 967     }
 968     // We maintain the invariant that all objects allocated by mutator
 969     // threads will be allocated out of eden regions. So, we can use
 970     // the eden region number allocated since the previous GC to
 971     // calculate the application's allocate rate. The only exception
 972     // to that is humongous objects that are allocated separately. But
 973     // given that humongous object allocations do not really affect
 974     // either the pause's duration nor when the next pause will take
 975     // place we can safely ignore them here.


1566 void
1567 G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
1568   _collectionSetChooser->clear();
1569 
1570   WorkGang* workers = _g1->workers();
1571   uint n_workers = workers->active_workers();
1572 
1573   uint n_regions = _g1->num_regions();
1574   uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
1575   _collectionSetChooser->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
1576   ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers);
1577   workers->run_task(&par_known_garbage_task);
1578 
1579   _collectionSetChooser->sort_regions();
1580 
1581   double end_sec = os::elapsedTime();
1582   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1583   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
1584   _cur_mark_stop_world_time_ms += elapsed_time_ms;
1585   _prev_collection_pause_end_ms += elapsed_time_ms;
1586   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec);
1587 }
1588 
1589 // Add the heap region at the head of the non-incremental collection set
1590 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
1591   assert(_inc_cset_build_state == Active, "Precondition");
1592   assert(hr->is_old(), "the region should be old");
1593 
1594   assert(!hr->in_collection_set(), "should not already be in the CSet");
1595   _g1->register_old_region_with_cset(hr);
1596   hr->set_next_in_collection_set(_collection_set);
1597   _collection_set = hr;
1598   _collection_set_bytes_used_before += hr->used();
1599   size_t rs_length = hr->rem_set()->occupied();
1600   _recorded_rs_lengths += rs_length;
1601   _old_cset_region_length += 1;
1602 }
1603 
1604 // Initialize the per-collection-set information
1605 void G1CollectorPolicy::start_incremental_cset_building() {
1606   assert(_inc_cset_build_state == Inactive, "Precondition");


< prev index next >