29 #include "classfile/systemDictionary.hpp"
30 #include "code/codeCache.hpp"
31 #include "gc/parallel/parallelScavengeHeap.hpp"
32 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
33 #include "gc/parallel/psMarkSweep.hpp"
34 #include "gc/parallel/psMarkSweepDecorator.hpp"
35 #include "gc/parallel/psOldGen.hpp"
36 #include "gc/parallel/psScavenge.hpp"
37 #include "gc/parallel/psYoungGen.hpp"
38 #include "gc/serial/markSweep.hpp"
39 #include "gc/shared/gcCause.hpp"
40 #include "gc/shared/gcHeapSummary.hpp"
41 #include "gc/shared/gcId.hpp"
42 #include "gc/shared/gcLocker.inline.hpp"
43 #include "gc/shared/gcTimer.hpp"
44 #include "gc/shared/gcTrace.hpp"
45 #include "gc/shared/gcTraceTime.inline.hpp"
46 #include "gc/shared/isGCActiveMark.hpp"
47 #include "gc/shared/referencePolicy.hpp"
48 #include "gc/shared/referenceProcessor.hpp"
49 #include "gc/shared/spaceDecorator.hpp"
50 #include "gc/shared/weakProcessor.hpp"
51 #include "logging/log.hpp"
52 #include "oops/oop.inline.hpp"
53 #include "runtime/biasedLocking.hpp"
54 #include "runtime/safepoint.hpp"
55 #include "runtime/vmThread.hpp"
56 #include "services/management.hpp"
57 #include "services/memoryService.hpp"
58 #include "utilities/align.hpp"
59 #include "utilities/events.hpp"
60 #include "utilities/stack.inline.hpp"
61
62 elapsedTimer PSMarkSweep::_accumulated_time;
63 jlong PSMarkSweep::_time_of_last_gc = 0;
64 CollectorCounters* PSMarkSweep::_counters = NULL;
65
66 void PSMarkSweep::initialize() {
67 MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
68 set_ref_processor(new ReferenceProcessor(mr)); // a vanilla ref proc
81 // Note that the all_soft_refs_clear flag in the collector policy
82 // may be true because this method can be called without intervening
83 // activity. For example when the heap space is tight and full measure
84 // are being taken to free space.
85
86 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
87 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
88 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
89 assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
90
91 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
92 GCCause::Cause gc_cause = heap->gc_cause();
93 PSAdaptiveSizePolicy* policy = heap->size_policy();
94 IsGCActiveMark mark;
95
96 if (ScavengeBeforeFullGC) {
97 PSScavenge::invoke_no_policy();
98 }
99
100 const bool clear_all_soft_refs =
101 heap->collector_policy()->should_clear_all_soft_refs();
102
103 uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
104 UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
105 PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
106 }
107
108 // This method contains no policy. You should probably
109 // be calling invoke() instead.
110 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
111 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
112 assert(ref_processor() != NULL, "Sanity");
113
114 if (GCLocker::check_active_before_gc()) {
115 return false;
116 }
117
118 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
119 GCCause::Cause gc_cause = heap->gc_cause();
120
121 GCIdMark gc_id_mark;
122 _gc_timer->register_gc_start();
123 _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
124
125 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
126
127 // The scope of casr should end after code that can change
128 // CollectorPolicy::_should_clear_all_soft_refs.
129 ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
130
131 PSYoungGen* young_gen = heap->young_gen();
132 PSOldGen* old_gen = heap->old_gen();
133
134 // Increment the invocation count
135 heap->increment_total_collections(true /* full */);
136
137 // Save information needed to minimize mangling
138 heap->record_gen_tops_before_GC();
139
140 // We need to track unique mark sweep invocations as well.
141 _total_invocations++;
142
143 heap->print_heap_before_gc();
144 heap->trace_heap_before_gc(_gc_tracer);
145
146 // Fill in TLABs
147 heap->accumulate_statistics_all_tlabs();
148 heap->ensure_parsability(true); // retire TLABs
149
303 young_gen->from_space()->capacity_in_bytes() -
304 young_gen->to_space()->capacity_in_bytes();
305
306 // Used for diagnostics
307 size_policy->clear_generation_free_space_flags();
308
309 size_policy->compute_generations_free_space(young_live,
310 eden_live,
311 old_live,
312 cur_eden,
313 max_old_gen_size,
314 max_eden_size,
315 true /* full gc*/);
316
317 size_policy->check_gc_overhead_limit(young_live,
318 eden_live,
319 max_old_gen_size,
320 max_eden_size,
321 true /* full gc*/,
322 gc_cause,
323 heap->collector_policy());
324
325 size_policy->decay_supplemental_growth(true /* full gc*/);
326
327 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
328
329 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
330 size_policy->calculated_survivor_size_in_bytes());
331 }
332 log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
333 }
334
335 if (UsePerfData) {
336 heap->gc_policy_counters()->update_counters();
337 heap->gc_policy_counters()->update_old_capacity(
338 old_gen->capacity_in_bytes());
339 heap->gc_policy_counters()->update_young_capacity(
340 young_gen->capacity_in_bytes());
341 }
342
343 heap->resize_all_tlabs();
|
29 #include "classfile/systemDictionary.hpp"
30 #include "code/codeCache.hpp"
31 #include "gc/parallel/parallelScavengeHeap.hpp"
32 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
33 #include "gc/parallel/psMarkSweep.hpp"
34 #include "gc/parallel/psMarkSweepDecorator.hpp"
35 #include "gc/parallel/psOldGen.hpp"
36 #include "gc/parallel/psScavenge.hpp"
37 #include "gc/parallel/psYoungGen.hpp"
38 #include "gc/serial/markSweep.hpp"
39 #include "gc/shared/gcCause.hpp"
40 #include "gc/shared/gcHeapSummary.hpp"
41 #include "gc/shared/gcId.hpp"
42 #include "gc/shared/gcLocker.inline.hpp"
43 #include "gc/shared/gcTimer.hpp"
44 #include "gc/shared/gcTrace.hpp"
45 #include "gc/shared/gcTraceTime.inline.hpp"
46 #include "gc/shared/isGCActiveMark.hpp"
47 #include "gc/shared/referencePolicy.hpp"
48 #include "gc/shared/referenceProcessor.hpp"
49 #include "gc/shared/softRefPolicy.hpp"
50 #include "gc/shared/spaceDecorator.hpp"
51 #include "gc/shared/weakProcessor.hpp"
52 #include "logging/log.hpp"
53 #include "oops/oop.inline.hpp"
54 #include "runtime/biasedLocking.hpp"
55 #include "runtime/safepoint.hpp"
56 #include "runtime/vmThread.hpp"
57 #include "services/management.hpp"
58 #include "services/memoryService.hpp"
59 #include "utilities/align.hpp"
60 #include "utilities/events.hpp"
61 #include "utilities/stack.inline.hpp"
62
63 elapsedTimer PSMarkSweep::_accumulated_time;
64 jlong PSMarkSweep::_time_of_last_gc = 0;
65 CollectorCounters* PSMarkSweep::_counters = NULL;
66
67 void PSMarkSweep::initialize() {
68 MemRegion mr = ParallelScavengeHeap::heap()->reserved_region();
69 set_ref_processor(new ReferenceProcessor(mr)); // a vanilla ref proc
82 // Note that the all_soft_refs_clear flag in the collector policy
83 // may be true because this method can be called without intervening
84 // activity. For example when the heap space is tight and full measure
85 // are being taken to free space.
86
87 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
88 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
89 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
90 assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
91
92 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
93 GCCause::Cause gc_cause = heap->gc_cause();
94 PSAdaptiveSizePolicy* policy = heap->size_policy();
95 IsGCActiveMark mark;
96
97 if (ScavengeBeforeFullGC) {
98 PSScavenge::invoke_no_policy();
99 }
100
101 const bool clear_all_soft_refs =
102 heap->soft_ref_policy()->should_clear_all_soft_refs();
103
104 uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
105 UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
106 PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
107 }
108
109 // This method contains no policy. You should probably
110 // be calling invoke() instead.
111 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
112 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
113 assert(ref_processor() != NULL, "Sanity");
114
115 if (GCLocker::check_active_before_gc()) {
116 return false;
117 }
118
119 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
120 GCCause::Cause gc_cause = heap->gc_cause();
121
122 GCIdMark gc_id_mark;
123 _gc_timer->register_gc_start();
124 _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
125
126 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
127
128 // The scope of casr should end after code that can change
129 // SoftRefPolicy::_should_clear_all_soft_refs.
130 ClearedAllSoftRefs casr(clear_all_softrefs, heap->soft_ref_policy());
131
132 PSYoungGen* young_gen = heap->young_gen();
133 PSOldGen* old_gen = heap->old_gen();
134
135 // Increment the invocation count
136 heap->increment_total_collections(true /* full */);
137
138 // Save information needed to minimize mangling
139 heap->record_gen_tops_before_GC();
140
141 // We need to track unique mark sweep invocations as well.
142 _total_invocations++;
143
144 heap->print_heap_before_gc();
145 heap->trace_heap_before_gc(_gc_tracer);
146
147 // Fill in TLABs
148 heap->accumulate_statistics_all_tlabs();
149 heap->ensure_parsability(true); // retire TLABs
150
304 young_gen->from_space()->capacity_in_bytes() -
305 young_gen->to_space()->capacity_in_bytes();
306
307 // Used for diagnostics
308 size_policy->clear_generation_free_space_flags();
309
310 size_policy->compute_generations_free_space(young_live,
311 eden_live,
312 old_live,
313 cur_eden,
314 max_old_gen_size,
315 max_eden_size,
316 true /* full gc*/);
317
318 size_policy->check_gc_overhead_limit(young_live,
319 eden_live,
320 max_old_gen_size,
321 max_eden_size,
322 true /* full gc*/,
323 gc_cause,
324 heap->soft_ref_policy());
325
326 size_policy->decay_supplemental_growth(true /* full gc*/);
327
328 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
329
330 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
331 size_policy->calculated_survivor_size_in_bytes());
332 }
333 log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
334 }
335
336 if (UsePerfData) {
337 heap->gc_policy_counters()->update_counters();
338 heap->gc_policy_counters()->update_old_capacity(
339 old_gen->capacity_in_bytes());
340 heap->gc_policy_counters()->update_young_capacity(
341 young_gen->capacity_in_bytes());
342 }
343
344 heap->resize_all_tlabs();
|