70 #include "gc/shared/gcBehaviours.hpp"
71 #include "gc/shared/gcHeapSummary.hpp"
72 #include "gc/shared/gcId.hpp"
73 #include "gc/shared/gcLocker.hpp"
74 #include "gc/shared/gcTimer.hpp"
75 #include "gc/shared/gcTraceTime.inline.hpp"
76 #include "gc/shared/generationSpec.hpp"
77 #include "gc/shared/isGCActiveMark.hpp"
78 #include "gc/shared/locationPrinter.inline.hpp"
79 #include "gc/shared/oopStorageParState.hpp"
80 #include "gc/shared/preservedMarks.inline.hpp"
81 #include "gc/shared/suspendibleThreadSet.hpp"
82 #include "gc/shared/referenceProcessor.inline.hpp"
83 #include "gc/shared/taskTerminator.hpp"
84 #include "gc/shared/taskqueue.inline.hpp"
85 #include "gc/shared/weakProcessor.inline.hpp"
86 #include "gc/shared/workerPolicy.hpp"
87 #include "logging/log.hpp"
88 #include "memory/allocation.hpp"
89 #include "memory/iterator.hpp"
90 #include "memory/resourceArea.hpp"
91 #include "memory/universe.hpp"
92 #include "oops/access.inline.hpp"
93 #include "oops/compressedOops.inline.hpp"
94 #include "oops/oop.inline.hpp"
95 #include "runtime/atomic.hpp"
96 #include "runtime/flags/flagSetting.hpp"
97 #include "runtime/handles.inline.hpp"
98 #include "runtime/init.hpp"
99 #include "runtime/orderAccess.hpp"
100 #include "runtime/threadSMR.hpp"
101 #include "runtime/vmThread.hpp"
102 #include "utilities/align.hpp"
103 #include "utilities/bitMap.inline.hpp"
104 #include "utilities/globalDefinitions.hpp"
105 #include "utilities/stack.inline.hpp"
106
107 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
108
109 // INVARIANTS/NOTES
142 if (!will_become_free(hr)) {
143 *card_ptr = G1CardTable::dirty_card_val();
144 _num_dirtied++;
145 }
146 }
147
148 size_t num_dirtied() const { return _num_dirtied; }
149 };
150
151
152 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
153 HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
154 }
155
156 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
157 // The from card cache is not the memory that is actually committed. So we cannot
158 // take advantage of the zero_filled parameter.
159 reset_from_card_cache(start_idx, num_regions);
160 }
161
162 Tickspan G1CollectedHeap::run_task(AbstractGangTask* task) {
163 Ticks start = Ticks::now();
164 workers()->run_task(task, workers()->active_workers());
165 return Ticks::now() - start;
166 }
167
168 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
169 MemRegion mr) {
170 return new HeapRegion(hrs_index, bot(), mr);
171 }
172
173 // Private methods.
174
175 HeapRegion* G1CollectedHeap::new_region(size_t word_size,
176 HeapRegionType type,
177 bool do_expand,
178 uint node_index) {
179 assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
180 "the only time we use this to allocate a humongous region is "
181 "when we are allocating a single humongous region");
182
183 HeapRegion* res = _hrm->allocate_free_region(type, node_index);
184
3749 _bytes_used_during_gc = 0;
3750
3751 _expand_heap_after_alloc_failure = true;
3752 _evacuation_failed = false;
3753
3754 // Disable the hot card cache.
3755 _hot_card_cache->reset_hot_cache_claimed_index();
3756 _hot_card_cache->set_use_cache(false);
3757
3758 // Initialize the GC alloc regions.
3759 _allocator->init_gc_alloc_regions(evacuation_info);
3760
3761 {
3762 Ticks start = Ticks::now();
3763 rem_set()->prepare_for_scan_heap_roots();
3764 phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
3765 }
3766
3767 {
3768 G1PrepareEvacuationTask g1_prep_task(this);
3769 Tickspan task_time = run_task(&g1_prep_task);
3770
3771 phase_times()->record_register_regions(task_time.seconds() * 1000.0,
3772 g1_prep_task.humongous_total(),
3773 g1_prep_task.humongous_candidates());
3774 }
3775
3776 assert(_verifier->check_region_attr_table(), "Inconsistency in the region attributes table.");
3777 _preserved_marks_set.assert_empty();
3778
3779 #if COMPILER2_OR_JVMCI
3780 DerivedPointerTable::clear();
3781 #endif
3782
3783 // InitialMark needs claim bits to keep track of the marked-through CLDs.
3784 if (collector_state()->in_initial_mark_gc()) {
3785 concurrent_mark()->pre_initial_mark();
3786
3787 double start_clear_claimed_marks = os::elapsedTime();
3788
3789 ClassLoaderDataGraph::clear_claimed_marks();
3896 _root_processor(root_processor)
3897 { }
3898 };
3899
3900 void G1CollectedHeap::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3901 G1GCPhaseTimes* p = phase_times();
3902
3903 {
3904 Ticks start = Ticks::now();
3905 rem_set()->merge_heap_roots(true /* initial_evacuation */);
3906 p->record_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
3907 }
3908
3909 Tickspan task_time;
3910 const uint num_workers = workers()->active_workers();
3911
3912 Ticks start_processing = Ticks::now();
3913 {
3914 G1RootProcessor root_processor(this, num_workers);
3915 G1EvacuateRegionsTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, num_workers);
3916 task_time = run_task(&g1_par_task);
3917 // Closing the inner scope will execute the destructor for the G1RootProcessor object.
3918 // To extract its code root fixup time we measure total time of this scope and
3919 // subtract from the time the WorkGang task took.
3920 }
3921 Tickspan total_processing = Ticks::now() - start_processing;
3922
3923 p->record_initial_evac_time(task_time.seconds() * 1000.0);
3924 p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
3925 }
3926
3927 class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
3928
3929 void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
3930 _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptObjCopy);
3931 _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptCodeRoots, G1GCPhaseTimes::OptObjCopy);
3932 }
3933
3934 void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
3935 G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination);
3936 }
3937
3938 public:
3939 G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
3940 RefToScanQueueSet* queues,
3941 uint num_workers) :
3942 G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
3943 }
3944 };
3945
3946 void G1CollectedHeap::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) {
3947 class G1MarkScope : public MarkScope { };
3948
3949 Tickspan task_time;
3950
3951 Ticks start_processing = Ticks::now();
3952 {
3953 G1MarkScope code_mark_scope;
3954 G1EvacuateOptionalRegionsTask task(per_thread_states, _task_queues, workers()->active_workers());
3955 task_time = run_task(&task);
3956 // See comment in evacuate_collection_set() for the reason of the scope.
3957 }
3958 Tickspan total_processing = Ticks::now() - start_processing;
3959
3960 G1GCPhaseTimes* p = phase_times();
3961 p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
3962 }
3963
3964 void G1CollectedHeap::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3965 const double gc_start_time_ms = phase_times()->cur_collection_start_sec() * 1000.0;
3966
3967 while (!evacuation_failed() && _collection_set.optional_region_length() > 0) {
3968
3969 double time_used_ms = os::elapsedTime() * 1000.0 - gc_start_time_ms;
3970 double time_left_ms = MaxGCPauseMillis - time_used_ms;
3971
3972 if (time_left_ms < 0 ||
3973 !_collection_set.finalize_optional_for_evacuation(time_left_ms * policy()->optional_evacuation_fraction())) {
3974 log_trace(gc, ergo, cset)("Skipping evacuation of %u optional regions, no more regions can be evacuated in %.3fms",
3975 _collection_set.optional_region_length(), time_left_ms);
4960
4961 void G1CollectedHeap::rebuild_strong_code_roots() {
4962 RebuildStrongCodeRootClosure blob_cl(this);
4963 CodeCache::blobs_do(&blob_cl);
4964 }
4965
4966 void G1CollectedHeap::initialize_serviceability() {
4967 _g1mm->initialize_serviceability();
4968 }
4969
4970 MemoryUsage G1CollectedHeap::memory_usage() {
4971 return _g1mm->memory_usage();
4972 }
4973
4974 GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() {
4975 return _g1mm->memory_managers();
4976 }
4977
4978 GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {
4979 return _g1mm->memory_pools();
4980 }
|
70 #include "gc/shared/gcBehaviours.hpp"
71 #include "gc/shared/gcHeapSummary.hpp"
72 #include "gc/shared/gcId.hpp"
73 #include "gc/shared/gcLocker.hpp"
74 #include "gc/shared/gcTimer.hpp"
75 #include "gc/shared/gcTraceTime.inline.hpp"
76 #include "gc/shared/generationSpec.hpp"
77 #include "gc/shared/isGCActiveMark.hpp"
78 #include "gc/shared/locationPrinter.inline.hpp"
79 #include "gc/shared/oopStorageParState.hpp"
80 #include "gc/shared/preservedMarks.inline.hpp"
81 #include "gc/shared/suspendibleThreadSet.hpp"
82 #include "gc/shared/referenceProcessor.inline.hpp"
83 #include "gc/shared/taskTerminator.hpp"
84 #include "gc/shared/taskqueue.inline.hpp"
85 #include "gc/shared/weakProcessor.inline.hpp"
86 #include "gc/shared/workerPolicy.hpp"
87 #include "logging/log.hpp"
88 #include "memory/allocation.hpp"
89 #include "memory/iterator.hpp"
90 #include "memory/heapInspection.hpp"
91 #include "memory/resourceArea.hpp"
92 #include "memory/universe.hpp"
93 #include "oops/access.inline.hpp"
94 #include "oops/compressedOops.inline.hpp"
95 #include "oops/oop.inline.hpp"
96 #include "runtime/atomic.hpp"
97 #include "runtime/flags/flagSetting.hpp"
98 #include "runtime/handles.inline.hpp"
99 #include "runtime/init.hpp"
100 #include "runtime/orderAccess.hpp"
101 #include "runtime/threadSMR.hpp"
102 #include "runtime/vmThread.hpp"
103 #include "utilities/align.hpp"
104 #include "utilities/bitMap.inline.hpp"
105 #include "utilities/globalDefinitions.hpp"
106 #include "utilities/stack.inline.hpp"
107
108 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
109
110 // INVARIANTS/NOTES
143 if (!will_become_free(hr)) {
144 *card_ptr = G1CardTable::dirty_card_val();
145 _num_dirtied++;
146 }
147 }
148
149 size_t num_dirtied() const { return _num_dirtied; }
150 };
151
152
153 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
154 HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
155 }
156
157 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
158 // The from card cache is not the memory that is actually committed. So we cannot
159 // take advantage of the zero_filled parameter.
160 reset_from_card_cache(start_idx, num_regions);
161 }
162
163 void G1CollectedHeap::run_task(AbstractGangTask* task) {
164 workers()->run_task(task, workers()->active_workers());
165 }
166
167 Tickspan G1CollectedHeap::run_task_timed(AbstractGangTask* task) {
168 Ticks start = Ticks::now();
169 run_task(task);
170 return Ticks::now() - start;
171 }
172
173 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
174 MemRegion mr) {
175 return new HeapRegion(hrs_index, bot(), mr);
176 }
177
178 // Private methods.
179
180 HeapRegion* G1CollectedHeap::new_region(size_t word_size,
181 HeapRegionType type,
182 bool do_expand,
183 uint node_index) {
184 assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
185 "the only time we use this to allocate a humongous region is "
186 "when we are allocating a single humongous region");
187
188 HeapRegion* res = _hrm->allocate_free_region(type, node_index);
189
3754 _bytes_used_during_gc = 0;
3755
3756 _expand_heap_after_alloc_failure = true;
3757 _evacuation_failed = false;
3758
3759 // Disable the hot card cache.
3760 _hot_card_cache->reset_hot_cache_claimed_index();
3761 _hot_card_cache->set_use_cache(false);
3762
3763 // Initialize the GC alloc regions.
3764 _allocator->init_gc_alloc_regions(evacuation_info);
3765
3766 {
3767 Ticks start = Ticks::now();
3768 rem_set()->prepare_for_scan_heap_roots();
3769 phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
3770 }
3771
3772 {
3773 G1PrepareEvacuationTask g1_prep_task(this);
3774 Tickspan task_time = run_task_timed(&g1_prep_task);
3775
3776 phase_times()->record_register_regions(task_time.seconds() * 1000.0,
3777 g1_prep_task.humongous_total(),
3778 g1_prep_task.humongous_candidates());
3779 }
3780
3781 assert(_verifier->check_region_attr_table(), "Inconsistency in the region attributes table.");
3782 _preserved_marks_set.assert_empty();
3783
3784 #if COMPILER2_OR_JVMCI
3785 DerivedPointerTable::clear();
3786 #endif
3787
3788 // InitialMark needs claim bits to keep track of the marked-through CLDs.
3789 if (collector_state()->in_initial_mark_gc()) {
3790 concurrent_mark()->pre_initial_mark();
3791
3792 double start_clear_claimed_marks = os::elapsedTime();
3793
3794 ClassLoaderDataGraph::clear_claimed_marks();
3901 _root_processor(root_processor)
3902 { }
3903 };
3904
3905 void G1CollectedHeap::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3906 G1GCPhaseTimes* p = phase_times();
3907
3908 {
3909 Ticks start = Ticks::now();
3910 rem_set()->merge_heap_roots(true /* initial_evacuation */);
3911 p->record_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
3912 }
3913
3914 Tickspan task_time;
3915 const uint num_workers = workers()->active_workers();
3916
3917 Ticks start_processing = Ticks::now();
3918 {
3919 G1RootProcessor root_processor(this, num_workers);
3920 G1EvacuateRegionsTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, num_workers);
3921 task_time = run_task_timed(&g1_par_task);
3922 // Closing the inner scope will execute the destructor for the G1RootProcessor object.
3923 // To extract its code root fixup time we measure total time of this scope and
3924 // subtract from the time the WorkGang task took.
3925 }
3926 Tickspan total_processing = Ticks::now() - start_processing;
3927
3928 p->record_initial_evac_time(task_time.seconds() * 1000.0);
3929 p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
3930 }
3931
3932 class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
3933
3934 void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
3935 _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptObjCopy);
3936 _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptCodeRoots, G1GCPhaseTimes::OptObjCopy);
3937 }
3938
3939 void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
3940 G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination);
3941 }
3942
3943 public:
3944 G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
3945 RefToScanQueueSet* queues,
3946 uint num_workers) :
3947 G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
3948 }
3949 };
3950
3951 void G1CollectedHeap::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) {
3952 class G1MarkScope : public MarkScope { };
3953
3954 Tickspan task_time;
3955
3956 Ticks start_processing = Ticks::now();
3957 {
3958 G1MarkScope code_mark_scope;
3959 G1EvacuateOptionalRegionsTask task(per_thread_states, _task_queues, workers()->active_workers());
3960 task_time = run_task_timed(&task);
3961 // See comment in evacuate_collection_set() for the reason of the scope.
3962 }
3963 Tickspan total_processing = Ticks::now() - start_processing;
3964
3965 G1GCPhaseTimes* p = phase_times();
3966 p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
3967 }
3968
3969 void G1CollectedHeap::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3970 const double gc_start_time_ms = phase_times()->cur_collection_start_sec() * 1000.0;
3971
3972 while (!evacuation_failed() && _collection_set.optional_region_length() > 0) {
3973
3974 double time_used_ms = os::elapsedTime() * 1000.0 - gc_start_time_ms;
3975 double time_left_ms = MaxGCPauseMillis - time_used_ms;
3976
3977 if (time_left_ms < 0 ||
3978 !_collection_set.finalize_optional_for_evacuation(time_left_ms * policy()->optional_evacuation_fraction())) {
3979 log_trace(gc, ergo, cset)("Skipping evacuation of %u optional regions, no more regions can be evacuated in %.3fms",
3980 _collection_set.optional_region_length(), time_left_ms);
4965
4966 void G1CollectedHeap::rebuild_strong_code_roots() {
4967 RebuildStrongCodeRootClosure blob_cl(this);
4968 CodeCache::blobs_do(&blob_cl);
4969 }
4970
4971 void G1CollectedHeap::initialize_serviceability() {
4972 _g1mm->initialize_serviceability();
4973 }
4974
4975 MemoryUsage G1CollectedHeap::memory_usage() {
4976 return _g1mm->memory_usage();
4977 }
4978
4979 GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() {
4980 return _g1mm->memory_managers();
4981 }
4982
4983 GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {
4984 return _g1mm->memory_pools();
4985 }
4986
4987 class G1ParallelObjectIterator : public ParallelObjectIterator {
4988 private:
4989 G1CollectedHeap* _heap;
4990 HeapRegionClaimer _claimer;
4991
4992 public:
4993 G1ParallelObjectIterator(uint thread_num) :
4994 _heap(G1CollectedHeap::heap()),
4995 _claimer(thread_num == 0 ? G1CollectedHeap::heap()->workers()->active_workers() : thread_num) {}
4996
4997 virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
4998 _heap->object_iterate_parallel(cl, worker_id, &_claimer);
4999 }
5000 };
5001
5002 ParallelObjectIterator* G1CollectedHeap::parallel_object_iterator(uint thread_num) {
5003 return new G1ParallelObjectIterator(thread_num);
5004 }
5005
5006 void G1CollectedHeap::object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer) {
5007 IterateObjectClosureRegionClosure blk(cl);
5008 heap_region_par_iterate_from_worker_offset(&blk, claimer, worker_id);
5009 }
|