840 void G1CollectorPolicy::record_concurrent_mark_init_end(double
841 mark_init_elapsed_time_ms) {
842 collector_state()->set_during_marking(true);
843 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
844 collector_state()->set_during_initial_mark_pause(false);
845 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
846 }
847
848 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
849 _mark_remark_start_sec = os::elapsedTime();
850 collector_state()->set_during_marking(false);
851 }
852
853 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
854 double end_time_sec = os::elapsedTime();
855 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
856 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
857 _cur_mark_stop_world_time_ms += elapsed_time_ms;
858 _prev_collection_pause_end_ms += elapsed_time_ms;
859
860 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, _g1->gc_tracer_cm()->gc_id());
861 }
862
863 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
864 _mark_cleanup_start_sec = os::elapsedTime();
865 }
866
867 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
868 collector_state()->set_last_young_gc(true);
869 collector_state()->set_in_marking_window(false);
870 }
871
872 void G1CollectorPolicy::record_concurrent_pause() {
873 if (_stop_world_start > 0.0) {
874 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
875 _trace_young_gen_time_data.record_yield_time(yield_ms);
876 }
877 }
878
879 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
880 if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
935 bool update_stats = !_g1->evacuation_failed();
936
937 #ifndef PRODUCT
938 if (G1YoungSurvRateVerbose) {
939 gclog_or_tty->cr();
940 _short_lived_surv_rate_group->print();
941 // do that for any other surv rate groups too
942 }
943 #endif // PRODUCT
944
945 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
946 if (last_pause_included_initial_mark) {
947 record_concurrent_mark_init_end(0.0);
948 } else if (need_to_start_conc_mark("end of GC")) {
949 // Note: this might have already been set, if during the last
950 // pause we decided to start a cycle but at the beginning of
951 // this pause we decided to postpone it. That's OK.
952 collector_state()->set_initiate_conc_mark_if_possible(true);
953 }
954
955 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
956 end_time_sec, _g1->gc_tracer_stw()->gc_id());
957
958 if (update_stats) {
959 _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
960 // this is where we update the allocation rate of the application
961 double app_time_ms =
962 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
963 if (app_time_ms < MIN_TIMER_GRANULARITY) {
964 // This usually happens due to the timer not having the required
965 // granularity. Some Linuxes are the usual culprits.
966 // We'll just set it to something (arbitrarily) small.
967 app_time_ms = 1.0;
968 }
969 // We maintain the invariant that all objects allocated by mutator
970 // threads will be allocated out of eden regions. So, we can use
971 // the eden region number allocated since the previous GC to
972 // calculate the application's allocate rate. The only exception
973 // to that is humongous objects that are allocated separately. But
974 // given that humongous object allocations do not really affect
975 // either the pause's duration nor when the next pause will take
976 // place we can safely ignore them here.
1569 void
1570 G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
1571 _collectionSetChooser->clear();
1572
1573 WorkGang* workers = _g1->workers();
1574 uint n_workers = workers->active_workers();
1575
1576 uint n_regions = _g1->num_regions();
1577 uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
1578 _collectionSetChooser->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
1579 ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers);
1580 workers->run_task(&par_known_garbage_task);
1581
1582 _collectionSetChooser->sort_regions();
1583
1584 double end_sec = os::elapsedTime();
1585 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1586 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
1587 _cur_mark_stop_world_time_ms += elapsed_time_ms;
1588 _prev_collection_pause_end_ms += elapsed_time_ms;
1589 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, _g1->gc_tracer_cm()->gc_id());
1590 }
1591
1592 // Add the heap region at the head of the non-incremental collection set
1593 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
1594 assert(_inc_cset_build_state == Active, "Precondition");
1595 assert(hr->is_old(), "the region should be old");
1596
1597 assert(!hr->in_collection_set(), "should not already be in the CSet");
1598 _g1->register_old_region_with_cset(hr);
1599 hr->set_next_in_collection_set(_collection_set);
1600 _collection_set = hr;
1601 _collection_set_bytes_used_before += hr->used();
1602 size_t rs_length = hr->rem_set()->occupied();
1603 _recorded_rs_lengths += rs_length;
1604 _old_cset_region_length += 1;
1605 }
1606
1607 // Initialize the per-collection-set information
1608 void G1CollectorPolicy::start_incremental_cset_building() {
1609 assert(_inc_cset_build_state == Inactive, "Precondition");
|
840 void G1CollectorPolicy::record_concurrent_mark_init_end(double
841 mark_init_elapsed_time_ms) {
842 collector_state()->set_during_marking(true);
843 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
844 collector_state()->set_during_initial_mark_pause(false);
845 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
846 }
847
848 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
849 _mark_remark_start_sec = os::elapsedTime();
850 collector_state()->set_during_marking(false);
851 }
852
853 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
854 double end_time_sec = os::elapsedTime();
855 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
856 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
857 _cur_mark_stop_world_time_ms += elapsed_time_ms;
858 _prev_collection_pause_end_ms += elapsed_time_ms;
859
860 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec);
861 }
862
863 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
864 _mark_cleanup_start_sec = os::elapsedTime();
865 }
866
867 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
868 collector_state()->set_last_young_gc(true);
869 collector_state()->set_in_marking_window(false);
870 }
871
872 void G1CollectorPolicy::record_concurrent_pause() {
873 if (_stop_world_start > 0.0) {
874 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
875 _trace_young_gen_time_data.record_yield_time(yield_ms);
876 }
877 }
878
879 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
880 if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
935 bool update_stats = !_g1->evacuation_failed();
936
937 #ifndef PRODUCT
938 if (G1YoungSurvRateVerbose) {
939 gclog_or_tty->cr();
940 _short_lived_surv_rate_group->print();
941 // do that for any other surv rate groups too
942 }
943 #endif // PRODUCT
944
945 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
946 if (last_pause_included_initial_mark) {
947 record_concurrent_mark_init_end(0.0);
948 } else if (need_to_start_conc_mark("end of GC")) {
949 // Note: this might have already been set, if during the last
950 // pause we decided to start a cycle but at the beginning of
951 // this pause we decided to postpone it. That's OK.
952 collector_state()->set_initiate_conc_mark_if_possible(true);
953 }
954
955 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec);
956
957 if (update_stats) {
958 _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
959 // this is where we update the allocation rate of the application
960 double app_time_ms =
961 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
962 if (app_time_ms < MIN_TIMER_GRANULARITY) {
963 // This usually happens due to the timer not having the required
964 // granularity. Some Linuxes are the usual culprits.
965 // We'll just set it to something (arbitrarily) small.
966 app_time_ms = 1.0;
967 }
968 // We maintain the invariant that all objects allocated by mutator
969 // threads will be allocated out of eden regions. So, we can use
970 // the eden region number allocated since the previous GC to
971 // calculate the application's allocate rate. The only exception
972 // to that is humongous objects that are allocated separately. But
973 // given that humongous object allocations do not really affect
974 // either the pause's duration nor when the next pause will take
975 // place we can safely ignore them here.
1568 void
1569 G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
1570 _collectionSetChooser->clear();
1571
1572 WorkGang* workers = _g1->workers();
1573 uint n_workers = workers->active_workers();
1574
1575 uint n_regions = _g1->num_regions();
1576 uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
1577 _collectionSetChooser->prepare_for_par_region_addition(n_workers, n_regions, chunk_size);
1578 ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers);
1579 workers->run_task(&par_known_garbage_task);
1580
1581 _collectionSetChooser->sort_regions();
1582
1583 double end_sec = os::elapsedTime();
1584 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1585 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
1586 _cur_mark_stop_world_time_ms += elapsed_time_ms;
1587 _prev_collection_pause_end_ms += elapsed_time_ms;
1588 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec);
1589 }
1590
1591 // Add the heap region at the head of the non-incremental collection set
1592 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
1593 assert(_inc_cset_build_state == Active, "Precondition");
1594 assert(hr->is_old(), "the region should be old");
1595
1596 assert(!hr->in_collection_set(), "should not already be in the CSet");
1597 _g1->register_old_region_with_cset(hr);
1598 hr->set_next_in_collection_set(_collection_set);
1599 _collection_set = hr;
1600 _collection_set_bytes_used_before += hr->used();
1601 size_t rs_length = hr->rem_set()->occupied();
1602 _recorded_rs_lengths += rs_length;
1603 _old_cset_region_length += 1;
1604 }
1605
1606 // Initialize the per-collection-set information
1607 void G1CollectorPolicy::start_incremental_cset_building() {
1608 assert(_inc_cset_build_state == Inactive, "Precondition");
|