72 #include "gc/shared/gcBehaviours.hpp"
73 #include "gc/shared/gcHeapSummary.hpp"
74 #include "gc/shared/gcId.hpp"
75 #include "gc/shared/gcLocker.hpp"
76 #include "gc/shared/gcTimer.hpp"
77 #include "gc/shared/gcTraceTime.inline.hpp"
78 #include "gc/shared/generationSpec.hpp"
79 #include "gc/shared/isGCActiveMark.hpp"
80 #include "gc/shared/locationPrinter.inline.hpp"
81 #include "gc/shared/oopStorageParState.hpp"
82 #include "gc/shared/preservedMarks.inline.hpp"
83 #include "gc/shared/suspendibleThreadSet.hpp"
84 #include "gc/shared/referenceProcessor.inline.hpp"
85 #include "gc/shared/taskTerminator.hpp"
86 #include "gc/shared/taskqueue.inline.hpp"
87 #include "gc/shared/weakProcessor.inline.hpp"
88 #include "gc/shared/workerPolicy.hpp"
89 #include "logging/log.hpp"
90 #include "memory/allocation.hpp"
91 #include "memory/iterator.hpp"
92 #include "memory/resourceArea.hpp"
93 #include "memory/universe.hpp"
94 #include "oops/access.inline.hpp"
95 #include "oops/compressedOops.inline.hpp"
96 #include "oops/oop.inline.hpp"
97 #include "runtime/atomic.hpp"
98 #include "runtime/handles.inline.hpp"
99 #include "runtime/init.hpp"
100 #include "runtime/orderAccess.hpp"
101 #include "runtime/threadSMR.hpp"
102 #include "runtime/vmThread.hpp"
103 #include "utilities/align.hpp"
104 #include "utilities/autoRestore.hpp"
105 #include "utilities/bitMap.inline.hpp"
106 #include "utilities/globalDefinitions.hpp"
107 #include "utilities/stack.inline.hpp"
108
109 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
110
111 // INVARIANTS/NOTES
144 if (!will_become_free(hr)) {
145 *card_ptr = G1CardTable::dirty_card_val();
146 _num_dirtied++;
147 }
148 }
149
150 size_t num_dirtied() const { return _num_dirtied; }
151 };
152
153
154 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
155 HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
156 }
157
158 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
159 // The from card cache is not the memory that is actually committed. So we cannot
160 // take advantage of the zero_filled parameter.
161 reset_from_card_cache(start_idx, num_regions);
162 }
163
164 Tickspan G1CollectedHeap::run_task(AbstractGangTask* task) {
165 Ticks start = Ticks::now();
166 workers()->run_task(task, workers()->active_workers());
167 return Ticks::now() - start;
168 }
169
170 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
171 MemRegion mr) {
172 return new HeapRegion(hrs_index, bot(), mr);
173 }
174
175 // Private methods.
176
177 HeapRegion* G1CollectedHeap::new_region(size_t word_size,
178 HeapRegionType type,
179 bool do_expand,
180 uint node_index) {
181 assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
182 "the only time we use this to allocate a humongous region is "
183 "when we are allocating a single humongous region");
184
185 HeapRegion* res = _hrm->allocate_free_region(type, node_index);
186
3683 _bytes_used_during_gc = 0;
3684
3685 _expand_heap_after_alloc_failure = true;
3686 _evacuation_failed = false;
3687
3688 // Disable the hot card cache.
3689 _hot_card_cache->reset_hot_cache_claimed_index();
3690 _hot_card_cache->set_use_cache(false);
3691
3692 // Initialize the GC alloc regions.
3693 _allocator->init_gc_alloc_regions(evacuation_info);
3694
3695 {
3696 Ticks start = Ticks::now();
3697 rem_set()->prepare_for_scan_heap_roots();
3698 phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
3699 }
3700
3701 {
3702 G1PrepareEvacuationTask g1_prep_task(this);
3703 Tickspan task_time = run_task(&g1_prep_task);
3704
3705 phase_times()->record_register_regions(task_time.seconds() * 1000.0,
3706 g1_prep_task.humongous_total(),
3707 g1_prep_task.humongous_candidates());
3708 }
3709
3710 assert(_verifier->check_region_attr_table(), "Inconsistency in the region attributes table.");
3711 _preserved_marks_set.assert_empty();
3712
3713 #if COMPILER2_OR_JVMCI
3714 DerivedPointerTable::clear();
3715 #endif
3716
3717 // Concurrent start needs claim bits to keep track of the marked-through CLDs.
3718 if (collector_state()->in_concurrent_start_gc()) {
3719 concurrent_mark()->pre_concurrent_start();
3720
3721 double start_clear_claimed_marks = os::elapsedTime();
3722
3723 ClassLoaderDataGraph::clear_claimed_marks();
3833 _root_processor(root_processor)
3834 { }
3835 };
3836
3837 void G1CollectedHeap::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3838 G1GCPhaseTimes* p = phase_times();
3839
3840 {
3841 Ticks start = Ticks::now();
3842 rem_set()->merge_heap_roots(true /* initial_evacuation */);
3843 p->record_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
3844 }
3845
3846 Tickspan task_time;
3847 const uint num_workers = workers()->active_workers();
3848
3849 Ticks start_processing = Ticks::now();
3850 {
3851 G1RootProcessor root_processor(this, num_workers);
3852 G1EvacuateRegionsTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, num_workers);
3853 task_time = run_task(&g1_par_task);
3854 // Closing the inner scope will execute the destructor for the G1RootProcessor object.
3855 // To extract its code root fixup time we measure total time of this scope and
3856 // subtract from the time the WorkGang task took.
3857 }
3858 Tickspan total_processing = Ticks::now() - start_processing;
3859
3860 p->record_initial_evac_time(task_time.seconds() * 1000.0);
3861 p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
3862 }
3863
3864 class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
3865
3866 void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
3867 _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptObjCopy);
3868 _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptCodeRoots, G1GCPhaseTimes::OptObjCopy);
3869 }
3870
3871 void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
3872 G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination);
3873 }
3874
3875 public:
3876 G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
3877 G1ScannerTasksQueueSet* queues,
3878 uint num_workers) :
3879 G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
3880 }
3881 };
3882
3883 void G1CollectedHeap::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) {
3884 class G1MarkScope : public MarkScope { };
3885
3886 Tickspan task_time;
3887
3888 Ticks start_processing = Ticks::now();
3889 {
3890 G1MarkScope code_mark_scope;
3891 G1EvacuateOptionalRegionsTask task(per_thread_states, _task_queues, workers()->active_workers());
3892 task_time = run_task(&task);
3893 // See comment in evacuate_collection_set() for the reason of the scope.
3894 }
3895 Tickspan total_processing = Ticks::now() - start_processing;
3896
3897 G1GCPhaseTimes* p = phase_times();
3898 p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
3899 }
3900
3901 void G1CollectedHeap::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3902 const double gc_start_time_ms = phase_times()->cur_collection_start_sec() * 1000.0;
3903
3904 while (!evacuation_failed() && _collection_set.optional_region_length() > 0) {
3905
3906 double time_used_ms = os::elapsedTime() * 1000.0 - gc_start_time_ms;
3907 double time_left_ms = MaxGCPauseMillis - time_used_ms;
3908
3909 if (time_left_ms < 0 ||
3910 !_collection_set.finalize_optional_for_evacuation(time_left_ms * policy()->optional_evacuation_fraction())) {
3911 log_trace(gc, ergo, cset)("Skipping evacuation of %u optional regions, no more regions can be evacuated in %.3fms",
3912 _collection_set.optional_region_length(), time_left_ms);
4898
4899 void G1CollectedHeap::rebuild_strong_code_roots() {
4900 RebuildStrongCodeRootClosure blob_cl(this);
4901 CodeCache::blobs_do(&blob_cl);
4902 }
4903
4904 void G1CollectedHeap::initialize_serviceability() {
4905 _g1mm->initialize_serviceability();
4906 }
4907
4908 MemoryUsage G1CollectedHeap::memory_usage() {
4909 return _g1mm->memory_usage();
4910 }
4911
4912 GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() {
4913 return _g1mm->memory_managers();
4914 }
4915
4916 GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {
4917 return _g1mm->memory_pools();
4918 }
|
72 #include "gc/shared/gcBehaviours.hpp"
73 #include "gc/shared/gcHeapSummary.hpp"
74 #include "gc/shared/gcId.hpp"
75 #include "gc/shared/gcLocker.hpp"
76 #include "gc/shared/gcTimer.hpp"
77 #include "gc/shared/gcTraceTime.inline.hpp"
78 #include "gc/shared/generationSpec.hpp"
79 #include "gc/shared/isGCActiveMark.hpp"
80 #include "gc/shared/locationPrinter.inline.hpp"
81 #include "gc/shared/oopStorageParState.hpp"
82 #include "gc/shared/preservedMarks.inline.hpp"
83 #include "gc/shared/suspendibleThreadSet.hpp"
84 #include "gc/shared/referenceProcessor.inline.hpp"
85 #include "gc/shared/taskTerminator.hpp"
86 #include "gc/shared/taskqueue.inline.hpp"
87 #include "gc/shared/weakProcessor.inline.hpp"
88 #include "gc/shared/workerPolicy.hpp"
89 #include "logging/log.hpp"
90 #include "memory/allocation.hpp"
91 #include "memory/iterator.hpp"
92 #include "memory/heapInspection.hpp"
93 #include "memory/resourceArea.hpp"
94 #include "memory/universe.hpp"
95 #include "oops/access.inline.hpp"
96 #include "oops/compressedOops.inline.hpp"
97 #include "oops/oop.inline.hpp"
98 #include "runtime/atomic.hpp"
99 #include "runtime/handles.inline.hpp"
100 #include "runtime/init.hpp"
101 #include "runtime/orderAccess.hpp"
102 #include "runtime/threadSMR.hpp"
103 #include "runtime/vmThread.hpp"
104 #include "utilities/align.hpp"
105 #include "utilities/autoRestore.hpp"
106 #include "utilities/bitMap.inline.hpp"
107 #include "utilities/globalDefinitions.hpp"
108 #include "utilities/stack.inline.hpp"
109
110 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
111
112 // INVARIANTS/NOTES
145 if (!will_become_free(hr)) {
146 *card_ptr = G1CardTable::dirty_card_val();
147 _num_dirtied++;
148 }
149 }
150
151 size_t num_dirtied() const { return _num_dirtied; }
152 };
153
154
155 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
156 HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
157 }
158
159 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
160 // The from card cache is not the memory that is actually committed. So we cannot
161 // take advantage of the zero_filled parameter.
162 reset_from_card_cache(start_idx, num_regions);
163 }
164
165 void G1CollectedHeap::run_task(AbstractGangTask* task) {
166 workers()->run_task(task, workers()->active_workers());
167 }
168
169 Tickspan G1CollectedHeap::run_task_timed(AbstractGangTask* task) {
170 Ticks start = Ticks::now();
171 run_task(task);
172 return Ticks::now() - start;
173 }
174
175 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
176 MemRegion mr) {
177 return new HeapRegion(hrs_index, bot(), mr);
178 }
179
180 // Private methods.
181
182 HeapRegion* G1CollectedHeap::new_region(size_t word_size,
183 HeapRegionType type,
184 bool do_expand,
185 uint node_index) {
186 assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
187 "the only time we use this to allocate a humongous region is "
188 "when we are allocating a single humongous region");
189
190 HeapRegion* res = _hrm->allocate_free_region(type, node_index);
191
3688 _bytes_used_during_gc = 0;
3689
3690 _expand_heap_after_alloc_failure = true;
3691 _evacuation_failed = false;
3692
3693 // Disable the hot card cache.
3694 _hot_card_cache->reset_hot_cache_claimed_index();
3695 _hot_card_cache->set_use_cache(false);
3696
3697 // Initialize the GC alloc regions.
3698 _allocator->init_gc_alloc_regions(evacuation_info);
3699
3700 {
3701 Ticks start = Ticks::now();
3702 rem_set()->prepare_for_scan_heap_roots();
3703 phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
3704 }
3705
3706 {
3707 G1PrepareEvacuationTask g1_prep_task(this);
3708 Tickspan task_time = run_task_timed(&g1_prep_task);
3709
3710 phase_times()->record_register_regions(task_time.seconds() * 1000.0,
3711 g1_prep_task.humongous_total(),
3712 g1_prep_task.humongous_candidates());
3713 }
3714
3715 assert(_verifier->check_region_attr_table(), "Inconsistency in the region attributes table.");
3716 _preserved_marks_set.assert_empty();
3717
3718 #if COMPILER2_OR_JVMCI
3719 DerivedPointerTable::clear();
3720 #endif
3721
3722 // Concurrent start needs claim bits to keep track of the marked-through CLDs.
3723 if (collector_state()->in_concurrent_start_gc()) {
3724 concurrent_mark()->pre_concurrent_start();
3725
3726 double start_clear_claimed_marks = os::elapsedTime();
3727
3728 ClassLoaderDataGraph::clear_claimed_marks();
3838 _root_processor(root_processor)
3839 { }
3840 };
3841
3842 void G1CollectedHeap::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3843 G1GCPhaseTimes* p = phase_times();
3844
3845 {
3846 Ticks start = Ticks::now();
3847 rem_set()->merge_heap_roots(true /* initial_evacuation */);
3848 p->record_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
3849 }
3850
3851 Tickspan task_time;
3852 const uint num_workers = workers()->active_workers();
3853
3854 Ticks start_processing = Ticks::now();
3855 {
3856 G1RootProcessor root_processor(this, num_workers);
3857 G1EvacuateRegionsTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, num_workers);
3858 task_time = run_task_timed(&g1_par_task);
3859 // Closing the inner scope will execute the destructor for the G1RootProcessor object.
3860 // To extract its code root fixup time we measure total time of this scope and
3861 // subtract from the time the WorkGang task took.
3862 }
3863 Tickspan total_processing = Ticks::now() - start_processing;
3864
3865 p->record_initial_evac_time(task_time.seconds() * 1000.0);
3866 p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
3867 }
3868
3869 class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
3870
3871 void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
3872 _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptObjCopy);
3873 _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptCodeRoots, G1GCPhaseTimes::OptObjCopy);
3874 }
3875
3876 void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
3877 G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination);
3878 }
3879
3880 public:
3881 G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
3882 G1ScannerTasksQueueSet* queues,
3883 uint num_workers) :
3884 G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
3885 }
3886 };
3887
3888 void G1CollectedHeap::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) {
3889 class G1MarkScope : public MarkScope { };
3890
3891 Tickspan task_time;
3892
3893 Ticks start_processing = Ticks::now();
3894 {
3895 G1MarkScope code_mark_scope;
3896 G1EvacuateOptionalRegionsTask task(per_thread_states, _task_queues, workers()->active_workers());
3897 task_time = run_task_timed(&task);
3898 // See comment in evacuate_collection_set() for the reason of the scope.
3899 }
3900 Tickspan total_processing = Ticks::now() - start_processing;
3901
3902 G1GCPhaseTimes* p = phase_times();
3903 p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
3904 }
3905
3906 void G1CollectedHeap::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3907 const double gc_start_time_ms = phase_times()->cur_collection_start_sec() * 1000.0;
3908
3909 while (!evacuation_failed() && _collection_set.optional_region_length() > 0) {
3910
3911 double time_used_ms = os::elapsedTime() * 1000.0 - gc_start_time_ms;
3912 double time_left_ms = MaxGCPauseMillis - time_used_ms;
3913
3914 if (time_left_ms < 0 ||
3915 !_collection_set.finalize_optional_for_evacuation(time_left_ms * policy()->optional_evacuation_fraction())) {
3916 log_trace(gc, ergo, cset)("Skipping evacuation of %u optional regions, no more regions can be evacuated in %.3fms",
3917 _collection_set.optional_region_length(), time_left_ms);
4903
4904 void G1CollectedHeap::rebuild_strong_code_roots() {
4905 RebuildStrongCodeRootClosure blob_cl(this);
4906 CodeCache::blobs_do(&blob_cl);
4907 }
4908
4909 void G1CollectedHeap::initialize_serviceability() {
4910 _g1mm->initialize_serviceability();
4911 }
4912
4913 MemoryUsage G1CollectedHeap::memory_usage() {
4914 return _g1mm->memory_usage();
4915 }
4916
4917 GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() {
4918 return _g1mm->memory_managers();
4919 }
4920
4921 GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {
4922 return _g1mm->memory_pools();
4923 }
4924
4925 class G1ParallelObjectIterator : public ParallelObjectIterator {
4926 private:
4927 G1CollectedHeap* _heap;
4928 HeapRegionClaimer _claimer;
4929
4930 public:
4931 G1ParallelObjectIterator(uint thread_num) :
4932 _heap(G1CollectedHeap::heap()),
4933 _claimer(thread_num == 0 ? G1CollectedHeap::heap()->workers()->active_workers() : thread_num) {}
4934
4935 virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
4936 _heap->object_iterate_parallel(cl, worker_id, &_claimer);
4937 }
4938 };
4939
4940 ParallelObjectIterator* G1CollectedHeap::parallel_object_iterator(uint thread_num) {
4941 return new G1ParallelObjectIterator(thread_num);
4942 }
4943
4944 void G1CollectedHeap::object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer) {
4945 IterateObjectClosureRegionClosure blk(cl);
4946 heap_region_par_iterate_from_worker_offset(&blk, claimer, worker_id);
4947 }
|