1 /*
2 * Copyright (c) 2004, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/adaptiveSizePolicy.hpp"
27 #include "gc/shared/collectorPolicy.hpp"
28 #include "gc/shared/gcCause.hpp"
29 #include "gc/shared/gcUtil.inline.hpp"
30 #include "gc/shared/softRefPolicy.hpp"
31 #include "gc/shared/workgroup.hpp"
32 #include "logging/log.hpp"
33 #include "runtime/timer.hpp"
34 #include "utilities/ostream.hpp"
35
36 elapsedTimer AdaptiveSizePolicy::_minor_timer;
37 elapsedTimer AdaptiveSizePolicy::_major_timer;
38 bool AdaptiveSizePolicy::_debug_perturbation = false;
39
40 // The throughput goal is implemented as
41 // _throughput_goal = 1 - ( 1 / (1 + gc_cost_ratio))
42 // gc_cost_ratio is the ratio
43 // application cost / gc cost
44 // For example a gc_cost_ratio of 4 translates into a
45 // throughput goal of .80
46
47 AdaptiveSizePolicy::AdaptiveSizePolicy(size_t init_eden_size,
48 size_t init_promo_size,
49 size_t init_survivor_size,
50 double gc_pause_goal_sec,
51 uint gc_cost_ratio) :
52 _throughput_goal(1.0 - double(1.0 / (1.0 + (double) gc_cost_ratio))),
53 _eden_size(init_eden_size),
54 _promo_size(init_promo_size),
55 _survivor_size(init_survivor_size),
56 _gc_overhead_limit_exceeded(false),
57 _print_gc_overhead_limit_would_be_exceeded(false),
58 _gc_overhead_limit_count(0),
75
76 _avg_survived = new AdaptivePaddedAverage(AdaptiveSizePolicyWeight,
77 SurvivorPadding);
78 _avg_pretenured = new AdaptivePaddedNoZeroDevAverage(
79 AdaptiveSizePolicyWeight,
80 SurvivorPadding);
81
82 _minor_pause_old_estimator =
83 new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
84 _minor_pause_young_estimator =
85 new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
86 _minor_collection_estimator =
87 new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
88 _major_collection_estimator =
89 new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
90
91 // Start the timers
92 _minor_timer.start();
93
94 _young_gen_policy_is_ready = false;
95 }
96
97 // If the number of GC threads was set on the command line,
98 // use it.
99 // Else
100 // Calculate the number of GC threads based on the number of Java threads.
101 // Calculate the number of GC threads based on the size of the heap.
102 // Use the larger.
103
104 uint AdaptiveSizePolicy::calc_default_active_workers(uintx total_workers,
105 const uintx min_workers,
106 uintx active_workers,
107 uintx application_workers) {
108 // If the user has specifically set the number of
109 // GC threads, use them.
110
111 // If the user has turned off using a dynamic number of GC threads
112 // or the users has requested a specific number, set the active
113 // number of workers to all the workers.
114
115 uintx new_active_workers = total_workers;
116 uintx prev_active_workers = active_workers;
117 uintx active_workers_by_JT = 0;
118 uintx active_workers_by_heap_size = 0;
119
120 // Always use at least min_workers but use up to
121 // GCThreadsPerJavaThreads * application threads.
122 active_workers_by_JT =
123 MAX2((uintx) GCWorkersPerJavaThread * application_workers,
124 min_workers);
125
126 // Choose a number of GC threads based on the current size
127 // of the heap. This may be complicated because the size of
128 // the heap depends on factors such as the throughput goal.
129 // Still a large heap should be collected by more GC threads.
130 active_workers_by_heap_size =
131 MAX2((size_t) 2U, Universe::heap()->capacity() / HeapSizePerGCThread);
132
133 uintx max_active_workers =
134 MAX2(active_workers_by_JT, active_workers_by_heap_size);
135
136 new_active_workers = MIN2(max_active_workers, (uintx) total_workers);
137
138 // Increase GC workers instantly but decrease them more
139 // slowly.
140 if (new_active_workers < prev_active_workers) {
141 new_active_workers =
142 MAX2(min_workers, (prev_active_workers + new_active_workers) / 2);
143 }
144
145 // Check once more that the number of workers is within the limits.
146 assert(min_workers <= total_workers, "Minimum workers not consistent with total workers");
147 assert(new_active_workers >= min_workers, "Minimum workers not observed");
148 assert(new_active_workers <= total_workers, "Total workers not observed");
149
150 if (ForceDynamicNumberOfGCThreads) {
151 // Assume this is debugging and jiggle the number of GC threads.
152 if (new_active_workers == prev_active_workers) {
153 if (new_active_workers < total_workers) {
154 new_active_workers++;
155 } else if (new_active_workers > min_workers) {
156 new_active_workers--;
157 }
158 }
159 if (new_active_workers == total_workers) {
160 if (_debug_perturbation) {
161 new_active_workers = min_workers;
162 }
163 _debug_perturbation = !_debug_perturbation;
164 }
165 assert((new_active_workers <= ParallelGCThreads) &&
166 (new_active_workers >= min_workers),
167 "Jiggled active workers too much");
168 }
169
170 log_trace(gc, task)("GCTaskManager::calc_default_active_workers() : "
171 "active_workers(): " UINTX_FORMAT " new_active_workers: " UINTX_FORMAT " "
172 "prev_active_workers: " UINTX_FORMAT "\n"
173 " active_workers_by_JT: " UINTX_FORMAT " active_workers_by_heap_size: " UINTX_FORMAT,
174 active_workers, new_active_workers, prev_active_workers,
175 active_workers_by_JT, active_workers_by_heap_size);
176 assert(new_active_workers > 0, "Always need at least 1");
177 return new_active_workers;
178 }
179
180 uint AdaptiveSizePolicy::calc_active_workers(uintx total_workers,
181 uintx active_workers,
182 uintx application_workers) {
183 // If the user has specifically set the number of
184 // GC threads, use them.
185
186 // If the user has turned off using a dynamic number of GC threads
187 // or the users has requested a specific number, set the active
188 // number of workers to all the workers.
189
190 uint new_active_workers;
191 if (!UseDynamicNumberOfGCThreads ||
192 (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
193 new_active_workers = total_workers;
194 } else {
195 uintx min_workers = (total_workers == 1) ? 1 : 2;
196 new_active_workers = calc_default_active_workers(total_workers,
197 min_workers,
198 active_workers,
199 application_workers);
200 }
201 assert(new_active_workers > 0, "Always need at least 1");
202 return new_active_workers;
203 }
204
205 uint AdaptiveSizePolicy::calc_active_conc_workers(uintx total_workers,
206 uintx active_workers,
207 uintx application_workers) {
208 if (!UseDynamicNumberOfGCThreads ||
209 (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
210 return ConcGCThreads;
211 } else {
212 uint no_of_gc_threads = calc_default_active_workers(total_workers,
213 1, /* Minimum number of workers */
214 active_workers,
215 application_workers);
216 return no_of_gc_threads;
217 }
218 }
219
220 bool AdaptiveSizePolicy::tenuring_threshold_change() const {
221 return decrement_tenuring_threshold_for_gc_cost() ||
222 increment_tenuring_threshold_for_gc_cost() ||
223 decrement_tenuring_threshold_for_survivor_limit();
224 }
225
226 void AdaptiveSizePolicy::minor_collection_begin() {
227 // Update the interval time
228 _minor_timer.stop();
229 // Save most recent collection time
230 _latest_minor_mutator_interval_seconds = _minor_timer.seconds();
231 _minor_timer.reset();
232 _minor_timer.start();
233 }
234
235 void AdaptiveSizePolicy::update_minor_pause_young_estimator(
236 double minor_pause_in_ms) {
237 double eden_size_in_mbytes = ((double)_eden_size)/((double)M);
|
1 /*
2 * Copyright (c) 2004, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/adaptiveSizePolicy.hpp"
27 #include "gc/shared/gcCause.hpp"
28 #include "gc/shared/gcUtil.inline.hpp"
29 #include "gc/shared/softRefPolicy.hpp"
30 #include "logging/log.hpp"
31 #include "runtime/timer.hpp"
32
33 elapsedTimer AdaptiveSizePolicy::_minor_timer;
34 elapsedTimer AdaptiveSizePolicy::_major_timer;
35
36 // The throughput goal is implemented as
37 // _throughput_goal = 1 - ( 1 / (1 + gc_cost_ratio))
38 // gc_cost_ratio is the ratio
39 // application cost / gc cost
40 // For example a gc_cost_ratio of 4 translates into a
41 // throughput goal of .80
42
43 AdaptiveSizePolicy::AdaptiveSizePolicy(size_t init_eden_size,
44 size_t init_promo_size,
45 size_t init_survivor_size,
46 double gc_pause_goal_sec,
47 uint gc_cost_ratio) :
48 _throughput_goal(1.0 - double(1.0 / (1.0 + (double) gc_cost_ratio))),
49 _eden_size(init_eden_size),
50 _promo_size(init_promo_size),
51 _survivor_size(init_survivor_size),
52 _gc_overhead_limit_exceeded(false),
53 _print_gc_overhead_limit_would_be_exceeded(false),
54 _gc_overhead_limit_count(0),
71
72 _avg_survived = new AdaptivePaddedAverage(AdaptiveSizePolicyWeight,
73 SurvivorPadding);
74 _avg_pretenured = new AdaptivePaddedNoZeroDevAverage(
75 AdaptiveSizePolicyWeight,
76 SurvivorPadding);
77
78 _minor_pause_old_estimator =
79 new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
80 _minor_pause_young_estimator =
81 new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
82 _minor_collection_estimator =
83 new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
84 _major_collection_estimator =
85 new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
86
87 // Start the timers
88 _minor_timer.start();
89
90 _young_gen_policy_is_ready = false;
91 }
92
93 bool AdaptiveSizePolicy::tenuring_threshold_change() const {
94 return decrement_tenuring_threshold_for_gc_cost() ||
95 increment_tenuring_threshold_for_gc_cost() ||
96 decrement_tenuring_threshold_for_survivor_limit();
97 }
98
99 void AdaptiveSizePolicy::minor_collection_begin() {
100 // Update the interval time
101 _minor_timer.stop();
102 // Save most recent collection time
103 _latest_minor_mutator_interval_seconds = _minor_timer.seconds();
104 _minor_timer.reset();
105 _minor_timer.start();
106 }
107
108 void AdaptiveSizePolicy::update_minor_pause_young_estimator(
109 double minor_pause_in_ms) {
110 double eden_size_in_mbytes = ((double)_eden_size)/((double)M);
|