72 #include "gc/shared/gcBehaviours.hpp"
73 #include "gc/shared/gcHeapSummary.hpp"
74 #include "gc/shared/gcId.hpp"
75 #include "gc/shared/gcLocker.hpp"
76 #include "gc/shared/gcTimer.hpp"
77 #include "gc/shared/gcTraceTime.inline.hpp"
78 #include "gc/shared/generationSpec.hpp"
79 #include "gc/shared/isGCActiveMark.hpp"
80 #include "gc/shared/locationPrinter.inline.hpp"
81 #include "gc/shared/oopStorageParState.hpp"
82 #include "gc/shared/preservedMarks.inline.hpp"
83 #include "gc/shared/suspendibleThreadSet.hpp"
84 #include "gc/shared/referenceProcessor.inline.hpp"
85 #include "gc/shared/taskTerminator.hpp"
86 #include "gc/shared/taskqueue.inline.hpp"
87 #include "gc/shared/weakProcessor.inline.hpp"
88 #include "gc/shared/workerPolicy.hpp"
89 #include "logging/log.hpp"
90 #include "memory/allocation.hpp"
91 #include "memory/iterator.hpp"
92 #include "memory/resourceArea.hpp"
93 #include "memory/universe.hpp"
94 #include "oops/access.inline.hpp"
95 #include "oops/compressedOops.inline.hpp"
96 #include "oops/oop.inline.hpp"
97 #include "runtime/atomic.hpp"
98 #include "runtime/handles.inline.hpp"
99 #include "runtime/init.hpp"
100 #include "runtime/orderAccess.hpp"
101 #include "runtime/threadSMR.hpp"
102 #include "runtime/vmThread.hpp"
103 #include "utilities/align.hpp"
104 #include "utilities/autoRestore.hpp"
105 #include "utilities/bitMap.inline.hpp"
106 #include "utilities/globalDefinitions.hpp"
107 #include "utilities/stack.inline.hpp"
108
109 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
110
111 // INVARIANTS/NOTES
144 if (!will_become_free(hr)) {
145 *card_ptr = G1CardTable::dirty_card_val();
146 _num_dirtied++;
147 }
148 }
149
150 size_t num_dirtied() const { return _num_dirtied; }
151 };
152
153
154 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
155 HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
156 }
157
158 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
159 // The from card cache is not the memory that is actually committed. So we cannot
160 // take advantage of the zero_filled parameter.
161 reset_from_card_cache(start_idx, num_regions);
162 }
163
164 Tickspan G1CollectedHeap::run_task(AbstractGangTask* task) {
165 Ticks start = Ticks::now();
166 workers()->run_task(task, workers()->active_workers());
167 return Ticks::now() - start;
168 }
169
170 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
171 MemRegion mr) {
172 return new HeapRegion(hrs_index, bot(), mr);
173 }
174
175 // Private methods.
176
177 HeapRegion* G1CollectedHeap::new_region(size_t word_size,
178 HeapRegionType type,
179 bool do_expand,
180 uint node_index) {
181 assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
182 "the only time we use this to allocate a humongous region is "
183 "when we are allocating a single humongous region");
184
185 HeapRegion* res = _hrm->allocate_free_region(type, node_index);
186
2278
2279 // Iterates an ObjectClosure over all objects within a HeapRegion.
2280
2281 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2282 ObjectClosure* _cl;
2283 public:
2284 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2285 bool do_heap_region(HeapRegion* r) {
2286 if (!r->is_continues_humongous()) {
2287 r->object_iterate(_cl);
2288 }
2289 return false;
2290 }
2291 };
2292
2293 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2294 IterateObjectClosureRegionClosure blk(cl);
2295 heap_region_iterate(&blk);
2296 }
2297
2298 void G1CollectedHeap::keep_alive(oop obj) {
2299 G1BarrierSet::enqueue(obj);
2300 }
2301
2302 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2303 _hrm->iterate(cl);
2304 }
2305
2306 void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
2307 HeapRegionClaimer *hrclaimer,
2308 uint worker_id) const {
2309 _hrm->par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
2310 }
2311
2312 void G1CollectedHeap::heap_region_par_iterate_from_start(HeapRegionClosure* cl,
2313 HeapRegionClaimer *hrclaimer) const {
2314 _hrm->par_iterate(cl, hrclaimer, 0);
2315 }
2316
2317 void G1CollectedHeap::collection_set_iterate_all(HeapRegionClosure* cl) {
3682 _bytes_used_during_gc = 0;
3683
3684 _expand_heap_after_alloc_failure = true;
3685 _evacuation_failed = false;
3686
3687 // Disable the hot card cache.
3688 _hot_card_cache->reset_hot_cache_claimed_index();
3689 _hot_card_cache->set_use_cache(false);
3690
3691 // Initialize the GC alloc regions.
3692 _allocator->init_gc_alloc_regions(evacuation_info);
3693
3694 {
3695 Ticks start = Ticks::now();
3696 rem_set()->prepare_for_scan_heap_roots();
3697 phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
3698 }
3699
3700 {
3701 G1PrepareEvacuationTask g1_prep_task(this);
3702 Tickspan task_time = run_task(&g1_prep_task);
3703
3704 phase_times()->record_register_regions(task_time.seconds() * 1000.0,
3705 g1_prep_task.humongous_total(),
3706 g1_prep_task.humongous_candidates());
3707 }
3708
3709 assert(_verifier->check_region_attr_table(), "Inconsistency in the region attributes table.");
3710 _preserved_marks_set.assert_empty();
3711
3712 #if COMPILER2_OR_JVMCI
3713 DerivedPointerTable::clear();
3714 #endif
3715
3716 // Concurrent start needs claim bits to keep track of the marked-through CLDs.
3717 if (collector_state()->in_concurrent_start_gc()) {
3718 concurrent_mark()->pre_concurrent_start();
3719
3720 double start_clear_claimed_marks = os::elapsedTime();
3721
3722 ClassLoaderDataGraph::clear_claimed_marks();
3831 _root_processor(root_processor)
3832 { }
3833 };
3834
3835 void G1CollectedHeap::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3836 G1GCPhaseTimes* p = phase_times();
3837
3838 {
3839 Ticks start = Ticks::now();
3840 rem_set()->merge_heap_roots(true /* initial_evacuation */);
3841 p->record_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
3842 }
3843
3844 Tickspan task_time;
3845 const uint num_workers = workers()->active_workers();
3846
3847 Ticks start_processing = Ticks::now();
3848 {
3849 G1RootProcessor root_processor(this, num_workers);
3850 G1EvacuateRegionsTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, num_workers);
3851 task_time = run_task(&g1_par_task);
3852 // Closing the inner scope will execute the destructor for the G1RootProcessor object.
3853 // To extract its code root fixup time we measure total time of this scope and
3854 // subtract from the time the WorkGang task took.
3855 }
3856 Tickspan total_processing = Ticks::now() - start_processing;
3857
3858 p->record_initial_evac_time(task_time.seconds() * 1000.0);
3859 p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
3860 }
3861
3862 class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
3863
3864 void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
3865 _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptObjCopy);
3866 _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptCodeRoots, G1GCPhaseTimes::OptObjCopy);
3867 }
3868
3869 void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
3870 G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination);
3871 }
3872
3873 public:
3874 G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
3875 G1ScannerTasksQueueSet* queues,
3876 uint num_workers) :
3877 G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
3878 }
3879 };
3880
3881 void G1CollectedHeap::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) {
3882 class G1MarkScope : public MarkScope { };
3883
3884 Tickspan task_time;
3885
3886 Ticks start_processing = Ticks::now();
3887 {
3888 G1MarkScope code_mark_scope;
3889 G1EvacuateOptionalRegionsTask task(per_thread_states, _task_queues, workers()->active_workers());
3890 task_time = run_task(&task);
3891 // See comment in evacuate_collection_set() for the reason of the scope.
3892 }
3893 Tickspan total_processing = Ticks::now() - start_processing;
3894
3895 G1GCPhaseTimes* p = phase_times();
3896 p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
3897 }
3898
3899 void G1CollectedHeap::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3900 const double gc_start_time_ms = phase_times()->cur_collection_start_sec() * 1000.0;
3901
3902 while (!evacuation_failed() && _collection_set.optional_region_length() > 0) {
3903
3904 double time_used_ms = os::elapsedTime() * 1000.0 - gc_start_time_ms;
3905 double time_left_ms = MaxGCPauseMillis - time_used_ms;
3906
3907 if (time_left_ms < 0 ||
3908 !_collection_set.finalize_optional_for_evacuation(time_left_ms * policy()->optional_evacuation_fraction())) {
3909 log_trace(gc, ergo, cset)("Skipping evacuation of %u optional regions, no more regions can be evacuated in %.3fms",
3910 _collection_set.optional_region_length(), time_left_ms);
|
72 #include "gc/shared/gcBehaviours.hpp"
73 #include "gc/shared/gcHeapSummary.hpp"
74 #include "gc/shared/gcId.hpp"
75 #include "gc/shared/gcLocker.hpp"
76 #include "gc/shared/gcTimer.hpp"
77 #include "gc/shared/gcTraceTime.inline.hpp"
78 #include "gc/shared/generationSpec.hpp"
79 #include "gc/shared/isGCActiveMark.hpp"
80 #include "gc/shared/locationPrinter.inline.hpp"
81 #include "gc/shared/oopStorageParState.hpp"
82 #include "gc/shared/preservedMarks.inline.hpp"
83 #include "gc/shared/suspendibleThreadSet.hpp"
84 #include "gc/shared/referenceProcessor.inline.hpp"
85 #include "gc/shared/taskTerminator.hpp"
86 #include "gc/shared/taskqueue.inline.hpp"
87 #include "gc/shared/weakProcessor.inline.hpp"
88 #include "gc/shared/workerPolicy.hpp"
89 #include "logging/log.hpp"
90 #include "memory/allocation.hpp"
91 #include "memory/iterator.hpp"
92 #include "memory/heapInspection.hpp"
93 #include "memory/resourceArea.hpp"
94 #include "memory/universe.hpp"
95 #include "oops/access.inline.hpp"
96 #include "oops/compressedOops.inline.hpp"
97 #include "oops/oop.inline.hpp"
98 #include "runtime/atomic.hpp"
99 #include "runtime/handles.inline.hpp"
100 #include "runtime/init.hpp"
101 #include "runtime/orderAccess.hpp"
102 #include "runtime/threadSMR.hpp"
103 #include "runtime/vmThread.hpp"
104 #include "utilities/align.hpp"
105 #include "utilities/autoRestore.hpp"
106 #include "utilities/bitMap.inline.hpp"
107 #include "utilities/globalDefinitions.hpp"
108 #include "utilities/stack.inline.hpp"
109
110 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
111
112 // INVARIANTS/NOTES
145 if (!will_become_free(hr)) {
146 *card_ptr = G1CardTable::dirty_card_val();
147 _num_dirtied++;
148 }
149 }
150
151 size_t num_dirtied() const { return _num_dirtied; }
152 };
153
154
155 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
156 HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
157 }
158
159 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
160 // The from card cache is not the memory that is actually committed. So we cannot
161 // take advantage of the zero_filled parameter.
162 reset_from_card_cache(start_idx, num_regions);
163 }
164
165 void G1CollectedHeap::run_task(AbstractGangTask* task) {
166 workers()->run_task(task, workers()->active_workers());
167 }
168
169 Tickspan G1CollectedHeap::run_task_timed(AbstractGangTask* task) {
170 Ticks start = Ticks::now();
171 run_task(task);
172 return Ticks::now() - start;
173 }
174
175 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
176 MemRegion mr) {
177 return new HeapRegion(hrs_index, bot(), mr);
178 }
179
180 // Private methods.
181
182 HeapRegion* G1CollectedHeap::new_region(size_t word_size,
183 HeapRegionType type,
184 bool do_expand,
185 uint node_index) {
186 assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
187 "the only time we use this to allocate a humongous region is "
188 "when we are allocating a single humongous region");
189
190 HeapRegion* res = _hrm->allocate_free_region(type, node_index);
191
2283
2284 // Iterates an ObjectClosure over all objects within a HeapRegion.
2285
2286 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2287 ObjectClosure* _cl;
2288 public:
2289 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2290 bool do_heap_region(HeapRegion* r) {
2291 if (!r->is_continues_humongous()) {
2292 r->object_iterate(_cl);
2293 }
2294 return false;
2295 }
2296 };
2297
2298 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2299 IterateObjectClosureRegionClosure blk(cl);
2300 heap_region_iterate(&blk);
2301 }
2302
2303 class G1ParallelObjectIterator : public ParallelObjectIterator {
2304 private:
2305 G1CollectedHeap* _heap;
2306 HeapRegionClaimer _claimer;
2307
2308 public:
2309 G1ParallelObjectIterator(uint thread_num) :
2310 _heap(G1CollectedHeap::heap()),
2311 _claimer(thread_num == 0 ? G1CollectedHeap::heap()->workers()->active_workers() : thread_num) {}
2312
2313 virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
2314 _heap->object_iterate_parallel(cl, worker_id, &_claimer);
2315 }
2316 };
2317
2318 ParallelObjectIterator* G1CollectedHeap::parallel_object_iterator(uint thread_num) {
2319 return new G1ParallelObjectIterator(thread_num);
2320 }
2321
2322 void G1CollectedHeap::object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer) {
2323 IterateObjectClosureRegionClosure blk(cl);
2324 heap_region_par_iterate_from_worker_offset(&blk, claimer, worker_id);
2325 }
2326
2327 void G1CollectedHeap::keep_alive(oop obj) {
2328 G1BarrierSet::enqueue(obj);
2329 }
2330
2331 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2332 _hrm->iterate(cl);
2333 }
2334
2335 void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
2336 HeapRegionClaimer *hrclaimer,
2337 uint worker_id) const {
2338 _hrm->par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
2339 }
2340
2341 void G1CollectedHeap::heap_region_par_iterate_from_start(HeapRegionClosure* cl,
2342 HeapRegionClaimer *hrclaimer) const {
2343 _hrm->par_iterate(cl, hrclaimer, 0);
2344 }
2345
2346 void G1CollectedHeap::collection_set_iterate_all(HeapRegionClosure* cl) {
3711 _bytes_used_during_gc = 0;
3712
3713 _expand_heap_after_alloc_failure = true;
3714 _evacuation_failed = false;
3715
3716 // Disable the hot card cache.
3717 _hot_card_cache->reset_hot_cache_claimed_index();
3718 _hot_card_cache->set_use_cache(false);
3719
3720 // Initialize the GC alloc regions.
3721 _allocator->init_gc_alloc_regions(evacuation_info);
3722
3723 {
3724 Ticks start = Ticks::now();
3725 rem_set()->prepare_for_scan_heap_roots();
3726 phase_times()->record_prepare_heap_roots_time_ms((Ticks::now() - start).seconds() * 1000.0);
3727 }
3728
3729 {
3730 G1PrepareEvacuationTask g1_prep_task(this);
3731 Tickspan task_time = run_task_timed(&g1_prep_task);
3732
3733 phase_times()->record_register_regions(task_time.seconds() * 1000.0,
3734 g1_prep_task.humongous_total(),
3735 g1_prep_task.humongous_candidates());
3736 }
3737
3738 assert(_verifier->check_region_attr_table(), "Inconsistency in the region attributes table.");
3739 _preserved_marks_set.assert_empty();
3740
3741 #if COMPILER2_OR_JVMCI
3742 DerivedPointerTable::clear();
3743 #endif
3744
3745 // Concurrent start needs claim bits to keep track of the marked-through CLDs.
3746 if (collector_state()->in_concurrent_start_gc()) {
3747 concurrent_mark()->pre_concurrent_start();
3748
3749 double start_clear_claimed_marks = os::elapsedTime();
3750
3751 ClassLoaderDataGraph::clear_claimed_marks();
3860 _root_processor(root_processor)
3861 { }
3862 };
3863
3864 void G1CollectedHeap::evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3865 G1GCPhaseTimes* p = phase_times();
3866
3867 {
3868 Ticks start = Ticks::now();
3869 rem_set()->merge_heap_roots(true /* initial_evacuation */);
3870 p->record_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
3871 }
3872
3873 Tickspan task_time;
3874 const uint num_workers = workers()->active_workers();
3875
3876 Ticks start_processing = Ticks::now();
3877 {
3878 G1RootProcessor root_processor(this, num_workers);
3879 G1EvacuateRegionsTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, num_workers);
3880 task_time = run_task_timed(&g1_par_task);
3881 // Closing the inner scope will execute the destructor for the G1RootProcessor object.
3882 // To extract its code root fixup time we measure total time of this scope and
3883 // subtract from the time the WorkGang task took.
3884 }
3885 Tickspan total_processing = Ticks::now() - start_processing;
3886
3887 p->record_initial_evac_time(task_time.seconds() * 1000.0);
3888 p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
3889 }
3890
3891 class G1EvacuateOptionalRegionsTask : public G1EvacuateRegionsBaseTask {
3892
3893 void scan_roots(G1ParScanThreadState* pss, uint worker_id) {
3894 _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptObjCopy);
3895 _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::OptCodeRoots, G1GCPhaseTimes::OptObjCopy);
3896 }
3897
3898 void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) {
3899 G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::OptObjCopy, G1GCPhaseTimes::OptTermination);
3900 }
3901
3902 public:
3903 G1EvacuateOptionalRegionsTask(G1ParScanThreadStateSet* per_thread_states,
3904 G1ScannerTasksQueueSet* queues,
3905 uint num_workers) :
3906 G1EvacuateRegionsBaseTask("G1 Evacuate Optional Regions", per_thread_states, queues, num_workers) {
3907 }
3908 };
3909
3910 void G1CollectedHeap::evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states) {
3911 class G1MarkScope : public MarkScope { };
3912
3913 Tickspan task_time;
3914
3915 Ticks start_processing = Ticks::now();
3916 {
3917 G1MarkScope code_mark_scope;
3918 G1EvacuateOptionalRegionsTask task(per_thread_states, _task_queues, workers()->active_workers());
3919 task_time = run_task_timed(&task);
3920 // See comment in evacuate_collection_set() for the reason of the scope.
3921 }
3922 Tickspan total_processing = Ticks::now() - start_processing;
3923
3924 G1GCPhaseTimes* p = phase_times();
3925 p->record_or_add_code_root_fixup_time((total_processing - task_time).seconds() * 1000.0);
3926 }
3927
3928 void G1CollectedHeap::evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states) {
3929 const double gc_start_time_ms = phase_times()->cur_collection_start_sec() * 1000.0;
3930
3931 while (!evacuation_failed() && _collection_set.optional_region_length() > 0) {
3932
3933 double time_used_ms = os::elapsedTime() * 1000.0 - gc_start_time_ms;
3934 double time_left_ms = MaxGCPauseMillis - time_used_ms;
3935
3936 if (time_left_ms < 0 ||
3937 !_collection_set.finalize_optional_for_evacuation(time_left_ms * policy()->optional_evacuation_fraction())) {
3938 log_trace(gc, ergo, cset)("Skipping evacuation of %u optional regions, no more regions can be evacuated in %.3fms",
3939 _collection_set.optional_region_length(), time_left_ms);
|