72 #include "gc/shared/gcBehaviours.hpp"
73 #include "gc/shared/gcHeapSummary.hpp"
74 #include "gc/shared/gcId.hpp"
75 #include "gc/shared/gcLocker.hpp"
76 #include "gc/shared/gcTimer.hpp"
77 #include "gc/shared/gcTraceTime.inline.hpp"
78 #include "gc/shared/generationSpec.hpp"
79 #include "gc/shared/isGCActiveMark.hpp"
80 #include "gc/shared/locationPrinter.inline.hpp"
81 #include "gc/shared/oopStorageParState.hpp"
82 #include "gc/shared/preservedMarks.inline.hpp"
83 #include "gc/shared/suspendibleThreadSet.hpp"
84 #include "gc/shared/referenceProcessor.inline.hpp"
85 #include "gc/shared/taskTerminator.hpp"
86 #include "gc/shared/taskqueue.inline.hpp"
87 #include "gc/shared/weakProcessor.inline.hpp"
88 #include "gc/shared/workerPolicy.hpp"
89 #include "logging/log.hpp"
90 #include "memory/allocation.hpp"
91 #include "memory/iterator.hpp"
92 #include "memory/resourceArea.hpp"
93 #include "memory/universe.hpp"
94 #include "oops/access.inline.hpp"
95 #include "oops/compressedOops.inline.hpp"
96 #include "oops/oop.inline.hpp"
97 #include "runtime/atomic.hpp"
98 #include "runtime/flags/flagSetting.hpp"
99 #include "runtime/handles.inline.hpp"
100 #include "runtime/init.hpp"
101 #include "runtime/orderAccess.hpp"
102 #include "runtime/threadSMR.hpp"
103 #include "runtime/vmThread.hpp"
104 #include "utilities/align.hpp"
105 #include "utilities/bitMap.inline.hpp"
106 #include "utilities/globalDefinitions.hpp"
107 #include "utilities/stack.inline.hpp"
108
109 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
110
111 // INVARIANTS/NOTES
144 if (!will_become_free(hr)) {
145 *card_ptr = G1CardTable::dirty_card_val();
146 _num_dirtied++;
147 }
148 }
149
150 size_t num_dirtied() const { return _num_dirtied; }
151 };
152
153
154 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
155 HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
156 }
157
158 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
159 // The from card cache is not the memory that is actually committed. So we cannot
160 // take advantage of the zero_filled parameter.
161 reset_from_card_cache(start_idx, num_regions);
162 }
163
164 Tickspan G1CollectedHeap::run_task(AbstractGangTask* task) {
165 Ticks start = Ticks::now();
166 workers()->run_task(task, workers()->active_workers());
167 return Ticks::now() - start;
168 }
169
170 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
171 MemRegion mr) {
172 return new HeapRegion(hrs_index, bot(), mr);
173 }
174
175 // Private methods.
176
177 HeapRegion* G1CollectedHeap::new_region(size_t word_size,
178 HeapRegionType type,
179 bool do_expand,
180 uint node_index) {
181 assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
182 "the only time we use this to allocate a humongous region is "
183 "when we are allocating a single humongous region");
184
185 HeapRegion* res = _hrm->allocate_free_region(type, node_index);
186
3745 _bytes_used_during_gc = 0;
3746
3747 _expand_heap_after_alloc_failure = true;
3748 _evacuation_failed = false;
3749
3750 // Disable the hot card cache.
3751 _hot_card_cache->reset_hot_cache_claimed_index();
3752 _hot_card_cache->set_use_cache(false);
3753
3754 // Initialize the GC alloc regions.
3755 _allocator->init_gc_alloc_regions(evacuation_info);
3756
3757 {
3758 Ticks start = Ticks::now();
3759 rem_set()->prepare_for_scan_heap_roots();
3760 phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
3761 }
3762
3763 {
3764 G1PrepareEvacuationTask g1_prep_task(this);
3765 Tickspan task_time = run_task(&g1_prep_task);
3766
3767 phase_times()->record_register_regions(task_time.seconds() * 1000.0,
3768 g1_prep_task.humongous_total(),
3769 g1_prep_task.humongous_candidates());
3770 }
3771
3772 assert(_verifier->check_region_attr_table(), "Inconsistency in the region attributes table.");
3773 _preserved_marks_set.assert_empty();
3774
3775 #if COMPILER2_OR_JVMCI
3776 DerivedPointerTable::clear();
3777 #endif
3778
3779 // InitialMark needs claim bits to keep track of the marked-through CLDs.
3780 if (collector_state()->in_initial_mark_gc()) {
3781 concurrent_mark()->pre_initial_mark();
3782
3783 double start_clear_claimed_marks = os::elapsedTime();
3784
3785 ClassLoaderDataGraph::clear_claimed_marks();
3895 _root_processor(root_processor)
3896 { }
3897 };
3898
3899 void G1CollectedHeap::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3900 G1GCPhaseTimes* p = phase_times();
3901
3902 {
3903 Ticks start = Ticks::now();
3904 rem_set()->merge_heap_roots(true /* initial_evacuation */);
3905 p->record_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
3906 }
3907
3908 Tickspan task_time;
3909 const uint num_workers = workers()->active_workers();
3910
3911 Ticks start_processing = Ticks::now();
3912 {
3913 G1RootProcessor root_processor(this, num_workers);
3914 G1EvacuateRegionsTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, num_workers);
3915 task_time = run_task(&g1_par_task);
3916 // Closing the inner scope will execute the destructor for the G1RootProcessor object.
3917 // To extract its code root fixup time we measure total time of this scope and
3918 // subtract from the time the WorkGang task took.
3919 }
3920 Tickspan total_processing = Ticks::now() - start_processing;
3921
3922 p->record_initial_evac_time(task_time.seconds() * 1000.0);
3923 p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
3924 }
3925
3926 class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
3927
3928 void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
3929 _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptObjCopy);
3930 _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptCodeRoots, G1GCPhaseTimes::OptObjCopy);
3931 }
3932
3933 void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
3934 G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination);
3935 }
3936
3937 public:
3938 G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
3939 G1ScannerTasksQueueSet* queues,
3940 uint num_workers) :
3941 G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
3942 }
3943 };
3944
3945 void G1CollectedHeap::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) {
3946 class G1MarkScope : public MarkScope { };
3947
3948 Tickspan task_time;
3949
3950 Ticks start_processing = Ticks::now();
3951 {
3952 G1MarkScope code_mark_scope;
3953 G1EvacuateOptionalRegionsTask task(per_thread_states, _task_queues, workers()->active_workers());
3954 task_time = run_task(&task);
3955 // See comment in evacuate_collection_set() for the reason of the scope.
3956 }
3957 Tickspan total_processing = Ticks::now() - start_processing;
3958
3959 G1GCPhaseTimes* p = phase_times();
3960 p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
3961 }
3962
3963 void G1CollectedHeap::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3964 const double gc_start_time_ms = phase_times()->cur_collection_start_sec() * 1000.0;
3965
3966 while (!evacuation_failed() && _collection_set.optional_region_length() > 0) {
3967
3968 double time_used_ms = os::elapsedTime() * 1000.0 - gc_start_time_ms;
3969 double time_left_ms = MaxGCPauseMillis - time_used_ms;
3970
3971 if (time_left_ms < 0 ||
3972 !_collection_set.finalize_optional_for_evacuation(time_left_ms * policy()->optional_evacuation_fraction())) {
3973 log_trace(gc, ergo, cset)("Skipping evacuation of %u optional regions, no more regions can be evacuated in %.3fms",
3974 _collection_set.optional_region_length(), time_left_ms);
4960
4961 void G1CollectedHeap::rebuild_strong_code_roots() {
4962 RebuildStrongCodeRootClosure blob_cl(this);
4963 CodeCache::blobs_do(&blob_cl);
4964 }
4965
4966 void G1CollectedHeap::initialize_serviceability() {
4967 _g1mm->initialize_serviceability();
4968 }
4969
4970 MemoryUsage G1CollectedHeap::memory_usage() {
4971 return _g1mm->memory_usage();
4972 }
4973
4974 GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() {
4975 return _g1mm->memory_managers();
4976 }
4977
4978 GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {
4979 return _g1mm->memory_pools();
4980 }
|
72 #include "gc/shared/gcBehaviours.hpp"
73 #include "gc/shared/gcHeapSummary.hpp"
74 #include "gc/shared/gcId.hpp"
75 #include "gc/shared/gcLocker.hpp"
76 #include "gc/shared/gcTimer.hpp"
77 #include "gc/shared/gcTraceTime.inline.hpp"
78 #include "gc/shared/generationSpec.hpp"
79 #include "gc/shared/isGCActiveMark.hpp"
80 #include "gc/shared/locationPrinter.inline.hpp"
81 #include "gc/shared/oopStorageParState.hpp"
82 #include "gc/shared/preservedMarks.inline.hpp"
83 #include "gc/shared/suspendibleThreadSet.hpp"
84 #include "gc/shared/referenceProcessor.inline.hpp"
85 #include "gc/shared/taskTerminator.hpp"
86 #include "gc/shared/taskqueue.inline.hpp"
87 #include "gc/shared/weakProcessor.inline.hpp"
88 #include "gc/shared/workerPolicy.hpp"
89 #include "logging/log.hpp"
90 #include "memory/allocation.hpp"
91 #include "memory/iterator.hpp"
92 #include "memory/heapInspection.hpp"
93 #include "memory/resourceArea.hpp"
94 #include "memory/universe.hpp"
95 #include "oops/access.inline.hpp"
96 #include "oops/compressedOops.inline.hpp"
97 #include "oops/oop.inline.hpp"
98 #include "runtime/atomic.hpp"
99 #include "runtime/flags/flagSetting.hpp"
100 #include "runtime/handles.inline.hpp"
101 #include "runtime/init.hpp"
102 #include "runtime/orderAccess.hpp"
103 #include "runtime/threadSMR.hpp"
104 #include "runtime/vmThread.hpp"
105 #include "utilities/align.hpp"
106 #include "utilities/bitMap.inline.hpp"
107 #include "utilities/globalDefinitions.hpp"
108 #include "utilities/stack.inline.hpp"
109
110 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
111
112 // INVARIANTS/NOTES
145 if (!will_become_free(hr)) {
146 *card_ptr = G1CardTable::dirty_card_val();
147 _num_dirtied++;
148 }
149 }
150
151 size_t num_dirtied() const { return _num_dirtied; }
152 };
153
154
155 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
156 HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
157 }
158
159 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
160 // The from card cache is not the memory that is actually committed. So we cannot
161 // take advantage of the zero_filled parameter.
162 reset_from_card_cache(start_idx, num_regions);
163 }
164
165 void G1CollectedHeap::run_task(AbstractGangTask* task) {
166 workers()->run_task(task, workers()->active_workers());
167 }
168
169 Tickspan G1CollectedHeap::run_task_timed(AbstractGangTask* task) {
170 Ticks start = Ticks::now();
171 run_task(task);
172 return Ticks::now() - start;
173 }
174
175 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
176 MemRegion mr) {
177 return new HeapRegion(hrs_index, bot(), mr);
178 }
179
180 // Private methods.
181
182 HeapRegion* G1CollectedHeap::new_region(size_t word_size,
183 HeapRegionType type,
184 bool do_expand,
185 uint node_index) {
186 assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
187 "the only time we use this to allocate a humongous region is "
188 "when we are allocating a single humongous region");
189
190 HeapRegion* res = _hrm->allocate_free_region(type, node_index);
191
3750 _bytes_used_during_gc = 0;
3751
3752 _expand_heap_after_alloc_failure = true;
3753 _evacuation_failed = false;
3754
3755 // Disable the hot card cache.
3756 _hot_card_cache->reset_hot_cache_claimed_index();
3757 _hot_card_cache->set_use_cache(false);
3758
3759 // Initialize the GC alloc regions.
3760 _allocator->init_gc_alloc_regions(evacuation_info);
3761
3762 {
3763 Ticks start = Ticks::now();
3764 rem_set()->prepare_for_scan_heap_roots();
3765 phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
3766 }
3767
3768 {
3769 G1PrepareEvacuationTask g1_prep_task(this);
3770 Tickspan task_time = run_task_timed(&g1_prep_task);
3771
3772 phase_times()->record_register_regions(task_time.seconds() * 1000.0,
3773 g1_prep_task.humongous_total(),
3774 g1_prep_task.humongous_candidates());
3775 }
3776
3777 assert(_verifier->check_region_attr_table(), "Inconsistency in the region attributes table.");
3778 _preserved_marks_set.assert_empty();
3779
3780 #if COMPILER2_OR_JVMCI
3781 DerivedPointerTable::clear();
3782 #endif
3783
3784 // InitialMark needs claim bits to keep track of the marked-through CLDs.
3785 if (collector_state()->in_initial_mark_gc()) {
3786 concurrent_mark()->pre_initial_mark();
3787
3788 double start_clear_claimed_marks = os::elapsedTime();
3789
3790 ClassLoaderDataGraph::clear_claimed_marks();
3900 _root_processor(root_processor)
3901 { }
3902 };
3903
3904 void G1CollectedHeap::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3905 G1GCPhaseTimes* p = phase_times();
3906
3907 {
3908 Ticks start = Ticks::now();
3909 rem_set()->merge_heap_roots(true /* initial_evacuation */);
3910 p->record_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
3911 }
3912
3913 Tickspan task_time;
3914 const uint num_workers = workers()->active_workers();
3915
3916 Ticks start_processing = Ticks::now();
3917 {
3918 G1RootProcessor root_processor(this, num_workers);
3919 G1EvacuateRegionsTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, num_workers);
3920 task_time = run_task_timed(&g1_par_task);
3921 // Closing the inner scope will execute the destructor for the G1RootProcessor object.
3922 // To extract its code root fixup time we measure total time of this scope and
3923 // subtract from the time the WorkGang task took.
3924 }
3925 Tickspan total_processing = Ticks::now() - start_processing;
3926
3927 p->record_initial_evac_time(task_time.seconds() * 1000.0);
3928 p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
3929 }
3930
3931 class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
3932
3933 void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
3934 _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptObjCopy);
3935 _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptCodeRoots, G1GCPhaseTimes::OptObjCopy);
3936 }
3937
3938 void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
3939 G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination);
3940 }
3941
3942 public:
3943 G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
3944 G1ScannerTasksQueueSet* queues,
3945 uint num_workers) :
3946 G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
3947 }
3948 };
3949
3950 void G1CollectedHeap::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) {
3951 class G1MarkScope : public MarkScope { };
3952
3953 Tickspan task_time;
3954
3955 Ticks start_processing = Ticks::now();
3956 {
3957 G1MarkScope code_mark_scope;
3958 G1EvacuateOptionalRegionsTask task(per_thread_states, _task_queues, workers()->active_workers());
3959 task_time = run_task_timed(&task);
3960 // See comment in evacuate_collection_set() for the reason of the scope.
3961 }
3962 Tickspan total_processing = Ticks::now() - start_processing;
3963
3964 G1GCPhaseTimes* p = phase_times();
3965 p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
3966 }
3967
3968 void G1CollectedHeap::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3969 const double gc_start_time_ms = phase_times()->cur_collection_start_sec() * 1000.0;
3970
3971 while (!evacuation_failed() && _collection_set.optional_region_length() > 0) {
3972
3973 double time_used_ms = os::elapsedTime() * 1000.0 - gc_start_time_ms;
3974 double time_left_ms = MaxGCPauseMillis - time_used_ms;
3975
3976 if (time_left_ms < 0 ||
3977 !_collection_set.finalize_optional_for_evacuation(time_left_ms * policy()->optional_evacuation_fraction())) {
3978 log_trace(gc, ergo, cset)("Skipping evacuation of %u optional regions, no more regions can be evacuated in %.3fms",
3979 _collection_set.optional_region_length(), time_left_ms);
4965
4966 void G1CollectedHeap::rebuild_strong_code_roots() {
4967 RebuildStrongCodeRootClosure blob_cl(this);
4968 CodeCache::blobs_do(&blob_cl);
4969 }
4970
4971 void G1CollectedHeap::initialize_serviceability() {
4972 _g1mm->initialize_serviceability();
4973 }
4974
4975 MemoryUsage G1CollectedHeap::memory_usage() {
4976 return _g1mm->memory_usage();
4977 }
4978
4979 GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() {
4980 return _g1mm->memory_managers();
4981 }
4982
4983 GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {
4984 return _g1mm->memory_pools();
4985 }
4986
4987 class G1ParallelObjectIterator : public ParallelObjectIterator {
4988 private:
4989 G1CollectedHeap* _heap;
4990 HeapRegionClaimer _claimer;
4991
4992 public:
4993 G1ParallelObjectIterator(uint thread_num) :
4994 _heap(G1CollectedHeap::heap()),
4995 _claimer(thread_num == 0 ? G1CollectedHeap::heap()->workers()->active_workers() : thread_num) {}
4996
4997 virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
4998 _heap->object_iterate_parallel(cl, worker_id, &_claimer);
4999 }
5000 };
5001
5002 ParallelObjectIterator* G1CollectedHeap::parallel_object_iterator(uint thread_num) {
5003 return new G1ParallelObjectIterator(thread_num);
5004 }
5005
5006 void G1CollectedHeap::object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer) {
5007 IterateObjectClosureRegionClosure blk(cl);
5008 heap_region_par_iterate_from_worker_offset(&blk, claimer, worker_id);
5009 }
|