11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "gc/shared/gcPolicyCounters.hpp"
26 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
27 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp"
28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
31 #include "gc/shenandoah/shenandoahPartialGC.hpp"
32 #include "runtime/os.hpp"
33 #include "utilities/quickSort.hpp"
34
35 #define SHENANDOAH_ERGO_DISABLE_FLAG(name) \
36 do { \
37 if (FLAG_IS_DEFAULT(name) && (name)) { \
38 log_info(gc)("Heuristics ergonomically sets -XX:-" #name); \
39 FLAG_SET_DEFAULT(name, false); \
40 } \
41 } while (0)
42
43 #define SHENANDOAH_ERGO_ENABLE_FLAG(name) \
44 do { \
45 if (FLAG_IS_DEFAULT(name) && !(name)) { \
46 log_info(gc)("Heuristics ergonomically sets -XX:+" #name); \
47 FLAG_SET_DEFAULT(name, true); \
48 } \
49 } while (0)
50
51 #define SHENANDOAH_ERGO_OVERRIDE_DEFAULT(name, value) \
162 _last_cycle_end = os::elapsedTime();
163 }
164
165 virtual void record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
166 // Do nothing
167 }
168
169 virtual void print_thresholds() {
170 }
171
172 virtual bool should_start_normal_gc() const = 0;
173
174 virtual bool should_start_update_refs() {
175 return _update_refs_early;
176 }
177
178 virtual bool update_refs() const {
179 return _update_refs_early;
180 }
181
182 virtual bool should_start_partial_gc() {
183 return false;
184 }
185
186 virtual bool can_do_partial_gc() {
187 return false;
188 }
189
190 virtual bool should_start_traversal_gc() {
191 return false;
192 }
193
194 virtual bool can_do_traversal_gc() {
195 return false;
196 }
197
198 virtual bool should_degenerate_cycle() {
199 return _degenerated_cycles_in_a_row <= ShenandoahFullGCThreshold;
200 }
201
202 virtual void record_success_concurrent() {
203 _degenerated_cycles_in_a_row = 0;
204 _successful_cycles_in_a_row++;
205 }
206
207 virtual void record_success_degenerated() {
208 _degenerated_cycles_in_a_row++;
209 _successful_cycles_in_a_row = 0;
210 }
211
391
392 void ShenandoahCollectorPolicy::record_gc_start() {
393 _heuristics->record_gc_start();
394 }
395
396 void ShenandoahCollectorPolicy::record_gc_end() {
397 _heuristics->record_gc_end();
398 }
399
400 class ShenandoahPassiveHeuristics : public ShenandoahHeuristics {
401 public:
402 ShenandoahPassiveHeuristics() : ShenandoahHeuristics() {
403 // Do not allow concurrent cycles.
404 FLAG_SET_DEFAULT(ExplicitGCInvokesConcurrent, false);
405
406 // Passive runs with max speed, reacts on allocation failure.
407 FLAG_SET_DEFAULT(ShenandoahPacing, false);
408
409 // Disable known barriers by default.
410 SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahSATBBarrier);
411 SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahConditionalSATBBarrier);
412 SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahKeepAliveBarrier);
413 SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahWriteBarrier);
414 SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahReadBarrier);
415 SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValEnqueueBarrier);
416 SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValWriteBarrier);
417 SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValReadBarrier);
418 SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCASBarrier);
419 SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahAcmpBarrier);
420 SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCloneBarrier);
421 SHENANDOAH_ERGO_DISABLE_FLAG(UseShenandoahMatrix);
422 }
423
424 virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
425 RegionData* data, size_t size,
426 size_t free) {
427 for (size_t idx = 0; idx < size; idx++) {
428 ShenandoahHeapRegion* r = data[idx]._region;
429 if (r->garbage() > 0) {
430 cset->add_region(r);
431 }
432 }
433 }
434
435 virtual bool should_start_normal_gc() const {
436 // Never do concurrent GCs.
868 if (conc_mark_avg + conc_uprefs_avg < cycle_gap_avg * threshold) {
869 _update_refs_early = true;
870 }
871 }
872 return _update_refs_early;
873 }
874
875 virtual const char* name() {
876 return "adaptive";
877 }
878
879 virtual bool is_diagnostic() {
880 return false;
881 }
882
883 virtual bool is_experimental() {
884 return false;
885 }
886 };
887
888 class ShenandoahPartialHeuristics : public ShenandoahAdaptiveHeuristics {
889 protected:
890 size_t* _from_idxs;
891
892 public:
893 ShenandoahPartialHeuristics() : ShenandoahAdaptiveHeuristics() {
894 FLAG_SET_DEFAULT(UseShenandoahMatrix, true);
895
896 // Set up special barriers for concurrent partial GC.
897 FLAG_SET_DEFAULT(ShenandoahConditionalSATBBarrier, true);
898 FLAG_SET_DEFAULT(ShenandoahSATBBarrier, false);
899 FLAG_SET_DEFAULT(ShenandoahStoreValWriteBarrier, true);
900 FLAG_SET_DEFAULT(ShenandoahStoreValReadBarrier, false);
901
902 SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahRefProcFrequency, 1);
903 // TODO: Disable this optimization for now, as it also requires the matrix barriers.
904 #ifdef COMPILER2
905 FLAG_SET_DEFAULT(ArrayCopyLoadStoreMaxElem, 0);
906 #endif
907 }
908
909 void initialize() {
910 _from_idxs = NEW_C_HEAP_ARRAY(size_t, ShenandoahHeap::heap()->num_regions(), mtGC);
911 }
912
913 virtual ~ShenandoahPartialHeuristics() {
914 FREE_C_HEAP_ARRAY(size_t, _from_idxs);
915 }
916
917 bool should_start_update_refs() {
918 return true;
919 }
920
921 bool update_refs() const {
922 return true;
923 }
924
925 bool can_do_partial_gc() {
926 return true;
927 }
928
929 bool should_start_normal_gc() const {
930 return false;
931 }
932
933 virtual bool is_diagnostic() {
934 return false;
935 }
936
937 virtual bool is_experimental() {
938 return true;
939 }
940
941 virtual bool should_start_partial_gc() = 0;
942 virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) = 0;
943
944 };
945
946 class ShenandoahPartialConnectedHeuristics : public ShenandoahPartialHeuristics {
947 public:
948 virtual const char* name() {
949 return "connectedness";
950 }
951
952 bool should_start_partial_gc() {
953 ShenandoahHeap* heap = ShenandoahHeap::heap();
954
955 if (heap->has_forwarded_objects()) {
956 // Cannot start partial if heap is not completely updated.
957 return false;
958 }
959
960 size_t capacity = heap->capacity();
961 size_t used = heap->used();
962 size_t prev_used = heap->used_at_last_gc();
963
964 if (used < prev_used) {
965 // Major collection must have happened, "used" data is unreliable, wait for update.
966 return false;
967 }
968
969 size_t threshold = heap->capacity() * ShenandoahConnectednessPercentage / 100;
970 size_t allocated = used - prev_used;
971 bool result = allocated > threshold;
972
973 FormatBuffer<> msg("%s. Capacity: " SIZE_FORMAT "M, Used: " SIZE_FORMAT "M, Previous Used: " SIZE_FORMAT
974 "M, Allocated: " SIZE_FORMAT "M, Threshold: " SIZE_FORMAT "M",
975 result ? "Partial cycle triggered" : "Partial cycle skipped",
976 capacity/M, used/M, prev_used/M, allocated/M, threshold/M);
977
978 if (result) {
979 log_info(gc,ergo)("%s", msg.buffer());
980 } else {
981 log_trace(gc,ergo)("%s", msg.buffer());
982 }
983 return result;
984 }
985
986 void choose_collection_set(ShenandoahCollectionSet* collection_set) {
987 ShenandoahHeap* heap = ShenandoahHeap::heap();
988 ShenandoahConnectionMatrix* matrix = heap->connection_matrix();
989 size_t num_regions = heap->num_regions();
990
991 RegionConnections* connects = get_region_connects_cache(num_regions);
992 size_t connect_cnt = 0;
993
994 for (uint to_idx = 0; to_idx < num_regions; to_idx++) {
995 ShenandoahHeapRegion* region = heap->get_region(to_idx);
996 region->set_root(false);
997 if (!region->is_regular()) continue;
998
999 uint count = matrix->count_connected_to(to_idx, num_regions);
1000 if (count < ShenandoahPartialInboundThreshold) {
1001 connects[connect_cnt]._region = region;
1002 connects[connect_cnt]._connections = count;
1003 connect_cnt++;
1004 }
1005 }
1006
1007 QuickSort::sort<RegionConnections>(connects, (int)connect_cnt, compare_by_connects, false);
1008
1009 // Heuristics triggered partial when allocated was larger than a threshold.
1010 // New allocations might have happened while we were preparing for GC,
1011 // capture all them in this cycle. This "adjusts" the threshold automatically.
1012 size_t used = heap->used();
1013 size_t prev_used = heap->used_at_last_gc();
1014 guarantee(used >= prev_used, "Invariant");
1015 size_t target = MIN3(ShenandoahHeapRegion::required_regions(used - prev_used), num_regions, connect_cnt);
1016
1017 for (size_t c = 0; c < target; c++) {
1018 assert (c == 0 || connects[c]._connections >= connects[c-1]._connections, "monotonicity");
1019
1020 ShenandoahHeapRegion* region = connects[c]._region;
1021 size_t to_idx = region->region_number();
1022 assert(region->is_regular(), "filtered before");
1023 assert(! heap->region_in_collection_set(to_idx), "must not be in cset yet");
1024
1025 size_t from_idx_count = 0;
1026 if (matrix->enumerate_connected_to(to_idx, num_regions,
1027 _from_idxs, from_idx_count,
1028 ShenandoahPartialInboundThreshold)) {
1029 maybe_add_heap_region(region, collection_set);
1030 for (size_t i = 0; i < from_idx_count; i++) {
1031 ShenandoahHeapRegion* r = heap->get_region(_from_idxs[i]);
1032 if (!r->is_root()) {
1033 r->set_root(true);
1034 }
1035 }
1036 }
1037 }
1038
1039 collection_set->update_region_status();
1040 }
1041 };
1042
1043 class ShenandoahGenerationalPartialHeuristics : public ShenandoahPartialHeuristics {
1044 public:
1045
1046 ShenandoahGenerationalPartialHeuristics() : ShenandoahPartialHeuristics() {
1047 SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahPartialInboundThreshold, 100);
1048 }
1049
1050 virtual const char* name() {
1051 return "generational";
1052 }
1053
1054 virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) {
1055 ShenandoahHeap* heap = ShenandoahHeap::heap();
1056 ShenandoahConnectionMatrix* matrix = heap->connection_matrix();
1057 uint64_t alloc_seq_at_last_gc_end = heap->alloc_seq_at_last_gc_end();
1058 uint64_t alloc_seq_at_last_gc_start = heap->alloc_seq_at_last_gc_start();
1059
1060 size_t num_regions = heap->num_regions();
1061
1062 RegionData* candidates = get_region_data_cache(num_regions);
1063
1064 for (size_t i = 0; i < num_regions; i++) {
1065 candidates[i]._region = heap->get_region(i);
1066 candidates[i]._seqnum_last_alloc = heap->get_region(i)->seqnum_last_alloc();
1067 }
1068
1069 QuickSort::sort<RegionData>(candidates, (int)num_regions, compare_by_alloc_seq_descending, false);
1070
1071 // Heuristics triggered partial when allocated was larger than a threshold.
1072 // New allocations might have happened while we were preparing for GC,
1073 // capture all them in this cycle. This "adjusts" the threshold automatically.
1074 size_t used = heap->used();
1075 size_t prev_used = heap->used_at_last_gc();
1076 guarantee(used >= prev_used, "Invariant");
1077 size_t target = MIN2(ShenandoahHeapRegion::required_regions(used - prev_used), num_regions);
1078
1079 for (uint to_idx = 0; to_idx < num_regions; to_idx++) {
1080 ShenandoahHeapRegion* region = heap->get_region(to_idx);
1081 region->set_root(false);
1082 }
1083
1084 uint count = 0;
1085
1086 for (uint i = 0; (i < num_regions) && (count < target); i++) {
1087 ShenandoahHeapRegion* contender = candidates[i]._region;
1088 if (contender->seqnum_last_alloc() <= alloc_seq_at_last_gc_end) {
1089 break;
1090 }
1091
1092 size_t index = contender->region_number();
1093 size_t from_idx_count = 0;
1094 if (matrix->enumerate_connected_to(index, num_regions, _from_idxs, from_idx_count,
1095 ShenandoahPartialInboundThreshold)) {
1096 if (maybe_add_heap_region(contender, collection_set)) {
1097 count++;
1098 }
1099
1100 for (uint f = 0; f < from_idx_count; f++) {
1101 ShenandoahHeapRegion* r = heap->get_region(_from_idxs[f]);
1102 if (!r->is_root()) {
1103 r->set_root(true);
1104 }
1105 }
1106 }
1107 }
1108 collection_set->update_region_status();
1109
1110 log_info(gc,ergo)("Regions: Max: " SIZE_FORMAT ", Target: " SIZE_FORMAT " (" SIZE_FORMAT "%%), In CSet: " SIZE_FORMAT,
1111 num_regions, target, ShenandoahGenerationalYoungGenPercentage, collection_set->count());
1112 }
1113
1114 bool should_start_partial_gc() {
1115 ShenandoahHeap* heap = ShenandoahHeap::heap();
1116
1117 if (heap->has_forwarded_objects()) {
1118 // Cannot start partial if heap is not completely updated.
1119 return false;
1120 }
1121
1122 size_t capacity = heap->capacity();
1123 size_t used = heap->used();
1124 size_t prev_used = heap->used_at_last_gc();
1125
1126 if (used < prev_used) {
1127 // Major collection must have happened, "used" data is unreliable, wait for update.
1128 return false;
1129 }
1130
1131 size_t threshold = heap->capacity() * ShenandoahGenerationalYoungGenPercentage / 100;
1132 size_t allocated = used - prev_used;
1133
1134 // Start the next young gc after we've allocated percentage_young of the heap.
1135 bool result = allocated > threshold;
1136
1137 FormatBuffer<> msg("%s. Capacity: " SIZE_FORMAT "M, Used: " SIZE_FORMAT "M, Previous Used: " SIZE_FORMAT
1138 "M, Allocated: " SIZE_FORMAT "M, Threshold: " SIZE_FORMAT "M",
1139 result ? "Partial cycle triggered" : "Partial cycle skipped",
1140 capacity/M, used/M, prev_used/M, allocated/M, threshold/M);
1141
1142 if (result) {
1143 log_info(gc,ergo)("%s", msg.buffer());
1144 } else {
1145 log_trace(gc,ergo)("%s", msg.buffer());
1146 }
1147 return result;
1148 }
1149 };
1150
1151 class ShenandoahLRUPartialHeuristics : public ShenandoahPartialHeuristics {
1152 public:
1153 ShenandoahLRUPartialHeuristics() : ShenandoahPartialHeuristics() {
1154 SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahPartialInboundThreshold, 100);
1155 }
1156
1157 virtual const char* name() {
1158 return "LRU";
1159 }
1160
1161 virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) {
1162 ShenandoahHeap* heap = ShenandoahHeap::heap();
1163 ShenandoahConnectionMatrix* matrix = heap->connection_matrix();
1164 uint64_t alloc_seq_at_last_gc_start = heap->alloc_seq_at_last_gc_start();
1165
1166 size_t num_regions = heap->num_regions();
1167
1168 RegionData* candidates = get_region_data_cache(num_regions);
1169 int candidate_idx = 0;
1170 for (size_t i = 0; i < num_regions; i++) {
1171 ShenandoahHeapRegion* r = heap->get_region(i);
1172 if (r->is_regular() && (r->seqnum_last_alloc() > 0)) {
1173 candidates[candidate_idx]._region = heap->get_region(i);
1174 candidates[candidate_idx]._seqnum_last_alloc = heap->get_region(i)->seqnum_last_alloc();
1175 candidate_idx++;
1176 }
1177 }
1178
1179 size_t sorted_count = candidate_idx;
1180 QuickSort::sort<RegionData>(candidates, (int)sorted_count, compare_by_alloc_seq_ascending, false);
1181
1182 // Heuristics triggered partial when allocated was larger than a threshold.
1183 // New allocations might have happened while we were preparing for GC,
1184 // capture all them in this cycle. This "adjusts" the threshold automatically.
1185 size_t used = heap->used();
1186 size_t prev_used = heap->used_at_last_gc();
1187 guarantee(used >= prev_used, "Invariant");
1188 size_t target = MIN2(ShenandoahHeapRegion::required_regions(used - prev_used), sorted_count);
1189
1190 for (uint to_idx = 0; to_idx < num_regions; to_idx++) {
1191 ShenandoahHeapRegion* region = heap->get_region(to_idx);
1192 region->set_root(false);
1193 }
1194 uint count = 0;
1195
1196 for (uint i = 0; (i < sorted_count) && (count < target); i++) {
1197 ShenandoahHeapRegion* contender = candidates[i]._region;
1198 if (contender->seqnum_last_alloc() >= alloc_seq_at_last_gc_start) {
1199 break;
1200 }
1201
1202 size_t index = contender->region_number();
1203 size_t from_idx_count = 0;
1204 if (matrix->enumerate_connected_to(index, num_regions,_from_idxs, from_idx_count,
1205 ShenandoahPartialInboundThreshold)) {
1206 if (maybe_add_heap_region(contender, collection_set)) {
1207 count++;
1208 }
1209 for (uint f = 0; f < from_idx_count; f++) {
1210 ShenandoahHeapRegion* r = heap->get_region(_from_idxs[f]);
1211 if (!r->is_root()) {
1212 r->set_root(true);
1213 }
1214 }
1215 }
1216 }
1217 collection_set->update_region_status();
1218
1219 log_info(gc,ergo)("Regions: Max: " SIZE_FORMAT ", Target: " SIZE_FORMAT " (" SIZE_FORMAT "%%), In CSet: " SIZE_FORMAT,
1220 num_regions, target, ShenandoahLRUOldGenPercentage, collection_set->count());
1221 }
1222
1223 bool should_start_partial_gc() {
1224 ShenandoahHeap* heap = ShenandoahHeap::heap();
1225
1226 if (heap->has_forwarded_objects()) {
1227 // Cannot start partial if heap is not completely updated.
1228 return false;
1229 }
1230
1231 size_t capacity = heap->capacity();
1232 size_t used = heap->used();
1233 size_t prev_used = heap->used_at_last_gc();
1234
1235 if (used < prev_used) {
1236 // Major collection must have happened, "used" data is unreliable, wait for update.
1237 return false;
1238 }
1239
1240 // For now don't start until we are 40% full
1241 size_t allocated = used - prev_used;
1242 size_t threshold = heap->capacity() * ShenandoahLRUOldGenPercentage / 100;
1243 size_t minimum = heap->capacity() * 0.4;
1244
1245 bool result = ((used > minimum) && (allocated > threshold));
1246
1247 FormatBuffer<> msg("%s. Capacity: " SIZE_FORMAT "M, Used: " SIZE_FORMAT "M, Previous Used: " SIZE_FORMAT
1248 "M, Allocated: " SIZE_FORMAT "M, Threshold: " SIZE_FORMAT "M, Minimum: " SIZE_FORMAT "M",
1249 result ? "Partial cycle triggered" : "Partial cycle skipped",
1250 capacity/M, used/M, prev_used/M, allocated/M, threshold/M, minimum/M);
1251
1252 if (result) {
1253 log_info(gc,ergo)("%s", msg.buffer());
1254 } else {
1255 log_trace(gc,ergo)("%s", msg.buffer());
1256 }
1257 return result;
1258 }
1259
1260 };
1261
1262 class ShenandoahTraversalHeuristics : public ShenandoahHeuristics {
1263 public:
1264 ShenandoahTraversalHeuristics() : ShenandoahHeuristics() {
1265 FLAG_SET_DEFAULT(UseShenandoahMatrix, false);
1266 FLAG_SET_DEFAULT(ShenandoahSATBBarrier, false);
1267 FLAG_SET_DEFAULT(ShenandoahConditionalSATBBarrier, false);
1268 FLAG_SET_DEFAULT(ShenandoahStoreValReadBarrier, false);
1269 FLAG_SET_DEFAULT(ShenandoahStoreValWriteBarrier, false);
1270 FLAG_SET_DEFAULT(ShenandoahStoreValEnqueueBarrier, true);
1271 FLAG_SET_DEFAULT(ShenandoahKeepAliveBarrier, false);
1272 FLAG_SET_DEFAULT(ShenandoahBarriersForConst, true);
1273 FLAG_SET_DEFAULT(ShenandoahWriteBarrierRB, false);
1274 FLAG_SET_DEFAULT(ShenandoahAllocImplicitLive, false);
1275 FLAG_SET_DEFAULT(ShenandoahAllowMixedAllocs, false);
1276 }
1277
1278 virtual bool should_start_normal_gc() const {
1279 return false;
1280 }
1281
1282 virtual bool is_experimental() {
1283 return true;
1284 }
1285
1286 virtual bool is_diagnostic() {
1287 return false;
1288 }
1289
1290 virtual bool can_do_traversal_gc() {
1291 return true;
1292 }
1293
1294 virtual const char* name() {
1295 return "traversal";
1296 }
1297
1298 virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) {
1299 ShenandoahHeap* heap = ShenandoahHeap::heap();
1300 for (size_t i = 0; i < heap->num_regions(); i++) {
1301 ShenandoahHeapRegion* r = heap->get_region(i);
1302 assert(!r->is_root(), "must not be root region");
1303 assert(!collection_set->is_in(r), "must not yet be in cset");
1304 if (r->is_regular() && r->used() > 0) {
1305 size_t garbage_percent = r->garbage() * 100 / ShenandoahHeapRegion::region_size_bytes();
1306 if (garbage_percent > ShenandoahGarbageThreshold) {
1307 collection_set->add_region(r);
1308 }
1309 }
1310 heap->set_next_top_at_mark_start(r->bottom(), r->top());
1311 heap->set_complete_top_at_mark_start(r->bottom(), r->top()); // For debugging purposes
1312 r->clear_live_data();
1313 }
1314 collection_set->update_region_status();
1315 }
1316
1317 virtual bool should_start_traversal_gc() {
1318
1319 ShenandoahHeap* heap = ShenandoahHeap::heap();
1320
1321 if (heap->has_forwarded_objects()) return false;
1322
1323 double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000;
1324 bool periodic_gc = (last_time_ms > ShenandoahGuaranteedGCInterval);
1325 if (periodic_gc) {
1326 log_info(gc,ergo)("Periodic GC triggered. Time since last GC: %.0f ms, Guaranteed Interval: " UINTX_FORMAT " ms",
1327 last_time_ms, ShenandoahGuaranteedGCInterval);
1328 return true;
1329 }
1330
1331 size_t capacity = heap->capacity();
1332 size_t used = heap->used();
1333 return 100 - (used * 100 / capacity) < ShenandoahFreeThreshold;
1334 }
1335
1336 virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
1337 RegionData* data, size_t data_size,
1338 size_t free) {
1339 ShouldNotReachHere();
1340 }
1341 };
1342
1343
1344 ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() :
1345 _cycle_counter(0),
1346 _success_concurrent_gcs(0),
1347 _success_partial_gcs(0),
1348 _success_degenerated_gcs(0),
1349 _success_full_gcs(0),
1350 _explicit_concurrent(0),
1351 _explicit_full(0),
1352 _alloc_failure_degenerated(0),
1353 _alloc_failure_full(0),
1354 _alloc_failure_degenerated_upgrade_to_full(0)
1355 {
1356 Copy::zero_to_bytes(_degen_points, sizeof(size_t) * ShenandoahHeap::_DEGENERATED_LIMIT);
1357
1358 ShenandoahHeapRegion::setup_heap_region_size(initial_heap_byte_size(), max_heap_byte_size());
1359
1360 initialize_all();
1361
1362 _tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer();
1363
1364 if (ShenandoahGCHeuristics != NULL) {
1365 _minor_heuristics = NULL;
1366 if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
1367 _heuristics = new ShenandoahAggressiveHeuristics();
1368 } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) {
1369 _heuristics = new ShenandoahStaticHeuristics();
1370 } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
1371 _heuristics = new ShenandoahAdaptiveHeuristics();
1372 } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
1373 _heuristics = new ShenandoahPassiveHeuristics();
1374 } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) {
1375 _heuristics = new ShenandoahCompactHeuristics();
1376 } else if (strcmp(ShenandoahGCHeuristics, "connected") == 0) {
1377 _heuristics = new ShenandoahAdaptiveHeuristics();
1378 _minor_heuristics = new ShenandoahPartialConnectedHeuristics();
1379 } else if (strcmp(ShenandoahGCHeuristics, "generational") == 0) {
1380 _heuristics = new ShenandoahAdaptiveHeuristics();
1381 _minor_heuristics = new ShenandoahGenerationalPartialHeuristics();
1382 } else if (strcmp(ShenandoahGCHeuristics, "LRU") == 0) {
1383 _heuristics = new ShenandoahAdaptiveHeuristics();
1384 _minor_heuristics = new ShenandoahLRUPartialHeuristics();
1385 } else if (strcmp(ShenandoahGCHeuristics, "traversal") == 0) {
1386 _heuristics = new ShenandoahTraversalHeuristics();
1387 } else {
1388 vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
1389 }
1390
1391 if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
1392 vm_exit_during_initialization(
1393 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
1394 _heuristics->name()));
1395 }
1396 if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
1397 vm_exit_during_initialization(
1398 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
1399 _heuristics->name()));
1400 }
1401 if (_minor_heuristics != NULL && _minor_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
1402 vm_exit_during_initialization(
1403 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
1404 _minor_heuristics->name()));
1405 }
1406 if (_minor_heuristics != NULL && _minor_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
1407 vm_exit_during_initialization(
1408 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
1409 _minor_heuristics->name()));
1410 }
1411
1412 if (ShenandoahConditionalSATBBarrier && ShenandoahSATBBarrier) {
1413 vm_exit_during_initialization("Cannot use both ShenandoahSATBBarrier and ShenandoahConditionalSATBBarrier");
1414 }
1415 if (ShenandoahStoreValWriteBarrier && ShenandoahStoreValReadBarrier) {
1416 vm_exit_during_initialization("Cannot use both ShenandoahStoreValWriteBarrier and ShenandoahStoreValReadBarrier");
1417 }
1418 if (ShenandoahStoreValEnqueueBarrier && ShenandoahStoreValReadBarrier) {
1419 vm_exit_during_initialization("Cannot use both ShenandoahStoreValEnqueueBarrier and ShenandoahStoreValReadBarrier");
1420 }
1421 if (ShenandoahStoreValWriteBarrier && ShenandoahStoreValEnqueueBarrier) {
1422 vm_exit_during_initialization("Cannot use both ShenandoahStoreValWriteBarrier and ShenandoahStoreValEnqueueBarrier");
1423 }
1424 if (_minor_heuristics != NULL) {
1425 log_info(gc, init)("Shenandoah heuristics: %s minor with %s major",
1426 _minor_heuristics->name(), _heuristics->name());
1427 } else {
1428 log_info(gc, init)("Shenandoah heuristics: %s",
1429 _heuristics->name());
1430 }
1431 _heuristics->print_thresholds();
1432 } else {
1433 ShouldNotReachHere();
1434 }
1435 }
1436
1437 ShenandoahCollectorPolicy* ShenandoahCollectorPolicy::as_pgc_policy() {
1438 return this;
1439 }
1440
1441 BarrierSet::Name ShenandoahCollectorPolicy::barrier_set_name() {
1442 return BarrierSet::Shenandoah;
1443 }
1444
1445 HeapWord* ShenandoahCollectorPolicy::mem_allocate_work(size_t size,
1446 bool is_tlab,
1447 bool* gc_overhead_limit_was_exceeded) {
1448 guarantee(false, "Not using this policy feature yet.");
1449 return NULL;
1450 }
1451
1452 HeapWord* ShenandoahCollectorPolicy::satisfy_failed_allocation(size_t size, bool is_tlab) {
1453 guarantee(false, "Not using this policy feature yet.");
1454 return NULL;
1455 }
1456
1457 void ShenandoahCollectorPolicy::initialize_alignments() {
1458
1459 // This is expected by our algorithm for ShenandoahHeap::heap_region_containing().
1460 _space_alignment = ShenandoahHeapRegion::region_size_bytes();
1461 _heap_alignment = ShenandoahHeapRegion::region_size_bytes();
1462 }
1463
1464 void ShenandoahCollectorPolicy::post_heap_initialize() {
1465 _heuristics->initialize();
1466 if (_minor_heuristics != NULL) {
1467 _minor_heuristics->initialize();
1468 }
1469 }
1470
1471 void ShenandoahCollectorPolicy::record_explicit_to_concurrent() {
1472 _heuristics->record_explicit_gc();
1473 _explicit_concurrent++;
1474 }
1475
1476 void ShenandoahCollectorPolicy::record_explicit_to_full() {
1477 _heuristics->record_explicit_gc();
1478 _explicit_full++;
1479 }
1480
1481 void ShenandoahCollectorPolicy::record_alloc_failure_to_full() {
1482 _heuristics->record_allocation_failure_gc();
1483 _alloc_failure_full++;
1484 }
1485
1486 void ShenandoahCollectorPolicy::record_alloc_failure_to_degenerated(ShenandoahHeap::ShenandoahDegenPoint point) {
1487 assert(point < ShenandoahHeap::_DEGENERATED_LIMIT, "sanity");
1488 _heuristics->record_allocation_failure_gc();
1489 _alloc_failure_degenerated++;
1490 _degen_points[point]++;
1491 }
1492
1493 void ShenandoahCollectorPolicy::record_degenerated_upgrade_to_full() {
1494 _alloc_failure_degenerated_upgrade_to_full++;
1495 }
1496
1497 void ShenandoahCollectorPolicy::record_success_concurrent() {
1498 _heuristics->record_success_concurrent();
1499 _success_concurrent_gcs++;
1500 }
1501
1502 void ShenandoahCollectorPolicy::record_success_partial() {
1503 _success_partial_gcs++;
1504 }
1505
1506 void ShenandoahCollectorPolicy::record_success_degenerated() {
1507 _heuristics->record_success_degenerated();
1508 _success_degenerated_gcs++;
1509 }
1510
1511 void ShenandoahCollectorPolicy::record_success_full() {
1512 _heuristics->record_success_full();
1513 _success_full_gcs++;
1514 }
1515
1516 bool ShenandoahCollectorPolicy::should_start_normal_gc() {
1517 return _heuristics->should_start_normal_gc();
1518 }
1519
1520 bool ShenandoahCollectorPolicy::should_degenerate_cycle() {
1521 return _heuristics->should_degenerate_cycle();
1522 }
1523
1524 bool ShenandoahCollectorPolicy::update_refs() {
1525 if (_minor_heuristics != NULL && _minor_heuristics->update_refs()) {
1526 return true;
1527 }
1528 return _heuristics->update_refs();
1529 }
1530
1531 bool ShenandoahCollectorPolicy::should_start_update_refs() {
1532 if (_minor_heuristics != NULL && _minor_heuristics->should_start_update_refs()) {
1533 return true;
1534 }
1535 return _heuristics->should_start_update_refs();
1536 }
1537
1538 void ShenandoahCollectorPolicy::record_peak_occupancy() {
1539 _heuristics->record_peak_occupancy();
1540 }
1541
1542 void ShenandoahCollectorPolicy::choose_collection_set(ShenandoahCollectionSet* collection_set,
1543 bool minor) {
1544 if (minor)
1545 _minor_heuristics->choose_collection_set(collection_set);
1546 else
1547 _heuristics->choose_collection_set(collection_set);
1548 }
1549
1550 bool ShenandoahCollectorPolicy::should_process_references() {
1551 return _heuristics->should_process_references();
1552 }
1553
1554 bool ShenandoahCollectorPolicy::should_unload_classes() {
1555 return _heuristics->should_unload_classes();
1556 }
1557
1558 size_t ShenandoahCollectorPolicy::cycle_counter() const {
1559 return _cycle_counter;
1560 }
1561
1562 void ShenandoahCollectorPolicy::record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
1563 _heuristics->record_phase_time(phase, secs);
1564 }
1565
1566 bool ShenandoahCollectorPolicy::should_start_partial_gc() {
1567 if (_minor_heuristics != NULL) {
1568 return _minor_heuristics->should_start_partial_gc();
1569 } else {
1570 return false; // no minor heuristics -> no partial gc
1571 }
1572 }
1573
1574 bool ShenandoahCollectorPolicy::can_do_partial_gc() {
1575 if (_minor_heuristics != NULL) {
1576 return _minor_heuristics->can_do_partial_gc();
1577 } else {
1578 return false; // no minor heuristics -> no partial gc
1579 }
1580 }
1581
1582 bool ShenandoahCollectorPolicy::should_start_traversal_gc() {
1583 return _heuristics->should_start_traversal_gc();
1584 }
1585
1586 bool ShenandoahCollectorPolicy::can_do_traversal_gc() {
1587 return _heuristics->can_do_traversal_gc();
1588 }
1589
1590 void ShenandoahCollectorPolicy::record_cycle_start() {
1591 _cycle_counter++;
1592 _heuristics->record_cycle_start();
1593 }
1594
1595 void ShenandoahCollectorPolicy::record_cycle_end() {
1596 _heuristics->record_cycle_end();
1597 }
1598
1599 void ShenandoahCollectorPolicy::record_shutdown() {
1600 _in_shutdown.set();
1601 }
1602
1603 bool ShenandoahCollectorPolicy::is_at_shutdown() {
1604 return _in_shutdown.is_set();
1605 }
1606
1607 void ShenandoahCollectorPolicy::print_gc_stats(outputStream* out) const {
1608 out->print_cr("Under allocation pressure, concurrent cycles may cancel, and either continue cycle");
1609 out->print_cr("under stop-the-world pause or result in stop-the-world Full GC. Increase heap size,");
1610 out->print_cr("tune GC heuristics, set more aggressive pacing delay, or lower allocation rate");
1611 out->print_cr("to avoid Degenerated and Full GC cycles.");
1612 out->cr();
1613
1614 out->print_cr(SIZE_FORMAT_W(5) " successful partial concurrent GCs", _success_partial_gcs);
1615 out->cr();
1616
1617 out->print_cr(SIZE_FORMAT_W(5) " successful concurrent GCs", _success_concurrent_gcs);
1618 out->print_cr(" " SIZE_FORMAT_W(5) " invoked explicitly", _explicit_concurrent);
1619 out->cr();
1620
1621 out->print_cr(SIZE_FORMAT_W(5) " Degenerated GCs", _success_degenerated_gcs);
1622 out->print_cr(" " SIZE_FORMAT_W(5) " caused by allocation failure", _alloc_failure_degenerated);
1623 for (int c = 0; c < ShenandoahHeap::_DEGENERATED_LIMIT; c++) {
1624 if (_degen_points[c] > 0) {
1625 const char* desc = ShenandoahHeap::degen_point_to_string((ShenandoahHeap::ShenandoahDegenPoint)c);
1626 out->print_cr(" " SIZE_FORMAT_W(5) " happened at %s", _degen_points[c], desc);
1627 }
1628 }
1629 out->print_cr(" " SIZE_FORMAT_W(5) " upgraded to Full GC", _alloc_failure_degenerated_upgrade_to_full);
1630 out->cr();
1631
1632 out->print_cr(SIZE_FORMAT_W(5) " Full GCs", _success_full_gcs + _alloc_failure_degenerated_upgrade_to_full);
1633 out->print_cr(" " SIZE_FORMAT_W(5) " invoked explicitly", _explicit_full);
1634 out->print_cr(" " SIZE_FORMAT_W(5) " caused by allocation failure", _alloc_failure_full);
|
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "gc/shared/gcPolicyCounters.hpp"
26 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
27 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp"
28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
31 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
32 #include "runtime/os.hpp"
33 #include "utilities/quickSort.hpp"
34
35 #define SHENANDOAH_ERGO_DISABLE_FLAG(name) \
36 do { \
37 if (FLAG_IS_DEFAULT(name) && (name)) { \
38 log_info(gc)("Heuristics ergonomically sets -XX:-" #name); \
39 FLAG_SET_DEFAULT(name, false); \
40 } \
41 } while (0)
42
43 #define SHENANDOAH_ERGO_ENABLE_FLAG(name) \
44 do { \
45 if (FLAG_IS_DEFAULT(name) && !(name)) { \
46 log_info(gc)("Heuristics ergonomically sets -XX:+" #name); \
47 FLAG_SET_DEFAULT(name, true); \
48 } \
49 } while (0)
50
51 #define SHENANDOAH_ERGO_OVERRIDE_DEFAULT(name, value) \
162 _last_cycle_end = os::elapsedTime();
163 }
164
165 virtual void record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
166 // Do nothing
167 }
168
169 virtual void print_thresholds() {
170 }
171
172 virtual bool should_start_normal_gc() const = 0;
173
174 virtual bool should_start_update_refs() {
175 return _update_refs_early;
176 }
177
178 virtual bool update_refs() const {
179 return _update_refs_early;
180 }
181
182 virtual ShenandoahHeap::GCCycleMode should_start_traversal_gc() {
183 return ShenandoahHeap::NONE;
184 }
185
186 virtual bool can_do_traversal_gc() {
187 return false;
188 }
189
190 virtual bool should_degenerate_cycle() {
191 return _degenerated_cycles_in_a_row <= ShenandoahFullGCThreshold;
192 }
193
194 virtual void record_success_concurrent() {
195 _degenerated_cycles_in_a_row = 0;
196 _successful_cycles_in_a_row++;
197 }
198
199 virtual void record_success_degenerated() {
200 _degenerated_cycles_in_a_row++;
201 _successful_cycles_in_a_row = 0;
202 }
203
383
384 void ShenandoahCollectorPolicy::record_gc_start() {
385 _heuristics->record_gc_start();
386 }
387
388 void ShenandoahCollectorPolicy::record_gc_end() {
389 _heuristics->record_gc_end();
390 }
391
392 class ShenandoahPassiveHeuristics : public ShenandoahHeuristics {
393 public:
394 ShenandoahPassiveHeuristics() : ShenandoahHeuristics() {
395 // Do not allow concurrent cycles.
396 FLAG_SET_DEFAULT(ExplicitGCInvokesConcurrent, false);
397
398 // Passive runs with max speed, reacts on allocation failure.
399 FLAG_SET_DEFAULT(ShenandoahPacing, false);
400
401 // Disable known barriers by default.
402 SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahSATBBarrier);
403 SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahKeepAliveBarrier);
404 SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahWriteBarrier);
405 SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahReadBarrier);
406 SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValEnqueueBarrier);
407 SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValReadBarrier);
408 SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCASBarrier);
409 SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahAcmpBarrier);
410 SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCloneBarrier);
411 SHENANDOAH_ERGO_DISABLE_FLAG(UseShenandoahMatrix);
412 }
413
414 virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
415 RegionData* data, size_t size,
416 size_t free) {
417 for (size_t idx = 0; idx < size; idx++) {
418 ShenandoahHeapRegion* r = data[idx]._region;
419 if (r->garbage() > 0) {
420 cset->add_region(r);
421 }
422 }
423 }
424
425 virtual bool should_start_normal_gc() const {
426 // Never do concurrent GCs.
858 if (conc_mark_avg + conc_uprefs_avg < cycle_gap_avg * threshold) {
859 _update_refs_early = true;
860 }
861 }
862 return _update_refs_early;
863 }
864
865 virtual const char* name() {
866 return "adaptive";
867 }
868
869 virtual bool is_diagnostic() {
870 return false;
871 }
872
873 virtual bool is_experimental() {
874 return false;
875 }
876 };
877
878 class ShenandoahTraversalHeuristics : public ShenandoahHeuristics {
879 protected:
880
881 public:
882 ShenandoahTraversalHeuristics() : ShenandoahHeuristics() {
883 FLAG_SET_DEFAULT(UseShenandoahMatrix, false);
884 FLAG_SET_DEFAULT(ShenandoahSATBBarrier, false);
885 FLAG_SET_DEFAULT(ShenandoahStoreValReadBarrier, false);
886 FLAG_SET_DEFAULT(ShenandoahStoreValEnqueueBarrier, true);
887 FLAG_SET_DEFAULT(ShenandoahKeepAliveBarrier, false);
888 FLAG_SET_DEFAULT(ShenandoahBarriersForConst, true);
889 FLAG_SET_DEFAULT(ShenandoahWriteBarrierRB, false);
890 FLAG_SET_DEFAULT(ShenandoahAllocImplicitLive, false);
891 FLAG_SET_DEFAULT(ShenandoahAllowMixedAllocs, false);
892 FLAG_SET_DEFAULT(ShenandoahRecycleClearsBitmap, true);
893
894 SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahRefProcFrequency, 1);
895 SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahUnloadClassesFrequency, 1);
896
897 }
898
899 virtual bool should_start_normal_gc() const {
900 return false;
901 }
902
903 virtual bool is_experimental() {
904 return true;
905 }
906
907 virtual bool is_diagnostic() {
908 return false;
909 }
910
911 virtual bool can_do_traversal_gc() {
912 return true;
913 }
914
915 virtual const char* name() {
916 return "traversal";
917 }
918
919 virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) {
920 ShenandoahHeap* heap = ShenandoahHeap::heap();
921
922 // No root regions in this mode.
923 ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
924 ShenandoahHeapRegionSet* root_regions = traversal_gc->root_regions();
925 root_regions->clear();
926
927 ShenandoahHeapRegionSet* traversal_set = traversal_gc->traversal_set();
928 traversal_set->clear();
929 for (size_t i = 0; i < heap->num_regions(); i++) {
930 ShenandoahHeapRegion* r = heap->get_region(i);
931 assert(!collection_set->is_in(r), "must not yet be in cset");
932 if (r->is_regular() && r->used() > 0) {
933 size_t garbage_percent = r->garbage() * 100 / ShenandoahHeapRegion::region_size_bytes();
934 if (garbage_percent > ShenandoahGarbageThreshold) {
935 collection_set->add_region(r);
936 }
937 }
938 r->clear_live_data();
939 traversal_set->add_region(r);
940 }
941 collection_set->update_region_status();
942 }
943
944 virtual ShenandoahHeap::GCCycleMode should_start_traversal_gc() {
945
946 ShenandoahHeap* heap = ShenandoahHeap::heap();
947
948 if (heap->has_forwarded_objects()) return ShenandoahHeap::NONE;
949
950 double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000;
951 bool periodic_gc = (last_time_ms > ShenandoahGuaranteedGCInterval);
952 if (periodic_gc) {
953 log_info(gc,ergo)("Periodic GC triggered. Time since last GC: %.0f ms, Guaranteed Interval: " UINTX_FORMAT " ms",
954 last_time_ms, ShenandoahGuaranteedGCInterval);
955 return ShenandoahHeap::MAJOR;
956 }
957
958 size_t capacity = heap->capacity();
959 size_t used = heap->used();
960 return 100 - (used * 100 / capacity) < ShenandoahFreeThreshold ? ShenandoahHeap::MAJOR : ShenandoahHeap::NONE;
961 }
962
963 protected:
964 virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
965 RegionData* data, size_t data_size,
966 size_t free) {
967 ShouldNotReachHere();
968 }
969 };
970
971 class ShenandoahPartialHeuristics : public ShenandoahTraversalHeuristics {
972 protected:
973 size_t* _from_idxs;
974
975 bool is_minor_gc() const { return ShenandoahHeap::heap()->is_minor_gc(); }
976
977 // Utility method to remove any cset regions from root set and
978 // add all cset regions to the traversal set.
979 void filter_regions() {
980 ShenandoahHeap* heap = ShenandoahHeap::heap();
981 ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
982 size_t num_regions = heap->num_regions();
983 ShenandoahCollectionSet* collection_set = heap->collection_set();
984 ShenandoahHeapRegionSet* root_regions = traversal_gc->root_regions();
985 ShenandoahHeapRegionSet* traversal_set = traversal_gc->traversal_set();
986 traversal_set->clear();
987
988 for (size_t i = 0; i < num_regions; i++) {
989 ShenandoahHeapRegion* region = heap->get_region(i);
990 if (collection_set->is_in(i)) {
991 if (root_regions->is_in(i)) {
992 root_regions->remove_region(region);
993 }
994 traversal_set->add_region_check_for_duplicates(region);
995 assert(traversal_set->is_in(i), "must be in traversal set now");
996 }
997 }
998 }
999
1000 public:
1001 ShenandoahPartialHeuristics() :
1002 ShenandoahTraversalHeuristics() {
1003
1004 FLAG_SET_DEFAULT(UseShenandoahMatrix, true);
1005
1006 // TODO: Disable this optimization for now, as it also requires the matrix barriers.
1007 #ifdef COMPILER2
1008 FLAG_SET_DEFAULT(ArrayCopyLoadStoreMaxElem, 0);
1009 #endif
1010 }
1011
1012 void initialize() {
1013 _from_idxs = NEW_C_HEAP_ARRAY(size_t, ShenandoahHeap::heap()->num_regions(), mtGC);
1014 }
1015
1016 virtual ~ShenandoahPartialHeuristics() {
1017 FREE_C_HEAP_ARRAY(size_t, _from_idxs);
1018 }
1019
1020 bool should_start_update_refs() {
1021 return false;
1022 }
1023
1024 bool update_refs() const {
1025 return false;
1026 }
1027
1028 virtual bool should_unload_classes() {
1029 return ShenandoahUnloadClassesFrequency != 0;
1030 }
1031
1032 virtual bool should_process_references() {
1033 return ShenandoahRefProcFrequency != 0;
1034 }
1035
1036 bool should_start_normal_gc() const {
1037 return false;
1038 }
1039
1040 virtual bool is_diagnostic() {
1041 return false;
1042 }
1043
1044 virtual bool is_experimental() {
1045 return true;
1046 }
1047
1048 };
1049
1050 class ShenandoahPartialConnectedHeuristics : public ShenandoahPartialHeuristics {
1051 public:
1052 virtual const char* name() {
1053 return "connectedness";
1054 }
1055
1056 ShenandoahHeap::GCCycleMode should_start_traversal_gc() {
1057 ShenandoahHeap::GCCycleMode cycle_mode = ShenandoahPartialHeuristics::should_start_traversal_gc();
1058 if (cycle_mode != ShenandoahHeap::NONE) {
1059 return cycle_mode;
1060 }
1061
1062 ShenandoahHeap* heap = ShenandoahHeap::heap();
1063
1064 if (heap->has_forwarded_objects()) {
1065 // Cannot start partial if heap is not completely updated.
1066 return ShenandoahHeap::NONE;
1067 }
1068
1069 size_t capacity = heap->capacity();
1070 size_t used = heap->used();
1071 size_t prev_used = heap->used_at_last_gc();
1072
1073 if (used < prev_used) {
1074 // Major collection must have happened, "used" data is unreliable, wait for update.
1075 return ShenandoahHeap::NONE;
1076 }
1077
1078 size_t threshold = heap->capacity() * ShenandoahConnectednessPercentage / 100;
1079 size_t allocated = used - prev_used;
1080 bool result = allocated > threshold;
1081
1082 FormatBuffer<> msg("%s. Capacity: " SIZE_FORMAT "M, Used: " SIZE_FORMAT "M, Previous Used: " SIZE_FORMAT
1083 "M, Allocated: " SIZE_FORMAT "M, Threshold: " SIZE_FORMAT "M",
1084 result ? "Partial cycle triggered" : "Partial cycle skipped",
1085 capacity/M, used/M, prev_used/M, allocated/M, threshold/M);
1086
1087 if (result) {
1088 log_info(gc,ergo)("%s", msg.buffer());
1089 } else {
1090 log_trace(gc,ergo)("%s", msg.buffer());
1091 }
1092 return result ? ShenandoahHeap::MINOR : ShenandoahHeap::NONE;
1093 }
1094
1095 void choose_collection_set(ShenandoahCollectionSet* collection_set) {
1096 if (!is_minor_gc()) {
1097 return ShenandoahPartialHeuristics::choose_collection_set(collection_set);
1098 }
1099
1100 ShenandoahHeap* heap = ShenandoahHeap::heap();
1101 ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1102 ShenandoahConnectionMatrix* matrix = heap->connection_matrix();
1103 ShenandoahHeapRegionSet* root_regions = traversal_gc->root_regions();
1104 root_regions->clear();
1105 size_t num_regions = heap->num_regions();
1106
1107 RegionConnections* connects = get_region_connects_cache(num_regions);
1108 size_t connect_cnt = 0;
1109
1110 for (uint to_idx = 0; to_idx < num_regions; to_idx++) {
1111 ShenandoahHeapRegion* region = heap->get_region(to_idx);
1112 if (!region->is_regular()) continue;
1113
1114 uint count = matrix->count_connected_to(to_idx, num_regions);
1115 if (count < ShenandoahPartialInboundThreshold) {
1116 connects[connect_cnt]._region = region;
1117 connects[connect_cnt]._connections = count;
1118 connect_cnt++;
1119 }
1120 }
1121
1122 QuickSort::sort<RegionConnections>(connects, (int)connect_cnt, compare_by_connects, false);
1123
1124 // Heuristics triggered partial when allocated was larger than a threshold.
1125 // New allocations might have happened while we were preparing for GC,
1126 // capture all them in this cycle. This "adjusts" the threshold automatically.
1127 size_t used = heap->used();
1128 size_t prev_used = heap->used_at_last_gc();
1129 guarantee(used >= prev_used, "Invariant");
1130 size_t target = MIN3(ShenandoahHeapRegion::required_regions(used - prev_used), num_regions, connect_cnt);
1131
1132 for (size_t c = 0; c < target; c++) {
1133 assert (c == 0 || connects[c]._connections >= connects[c-1]._connections, "monotonicity");
1134
1135 ShenandoahHeapRegion* region = connects[c]._region;
1136 size_t to_idx = region->region_number();
1137 assert(region->is_regular(), "filtered before");
1138 assert(! heap->region_in_collection_set(to_idx), "must not be in cset yet");
1139
1140 size_t from_idx_count = 0;
1141 if (matrix->enumerate_connected_to(to_idx, num_regions,
1142 _from_idxs, from_idx_count,
1143 ShenandoahPartialInboundThreshold)) {
1144 maybe_add_heap_region(region, collection_set);
1145 for (size_t i = 0; i < from_idx_count; i++) {
1146 ShenandoahHeapRegion* r = heap->get_region(_from_idxs[i]);
1147 root_regions->add_region_check_for_duplicates(r);
1148 }
1149 }
1150 }
1151 filter_regions();
1152 collection_set->update_region_status();
1153 }
1154 };
1155
1156 class ShenandoahGenerationalPartialHeuristics : public ShenandoahPartialHeuristics {
1157 public:
1158
1159 ShenandoahGenerationalPartialHeuristics() : ShenandoahPartialHeuristics() {
1160 SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahPartialInboundThreshold, 100);
1161 }
1162
1163 virtual const char* name() {
1164 return "generational";
1165 }
1166
1167 virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) {
1168 if (!is_minor_gc()) {
1169 return ShenandoahPartialHeuristics::choose_collection_set(collection_set);
1170 }
1171
1172 ShenandoahHeap* heap = ShenandoahHeap::heap();
1173 ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1174 ShenandoahConnectionMatrix* matrix = heap->connection_matrix();
1175 uint64_t alloc_seq_at_last_gc_end = heap->alloc_seq_at_last_gc_end();
1176 uint64_t alloc_seq_at_last_gc_start = heap->alloc_seq_at_last_gc_start();
1177
1178 size_t num_regions = heap->num_regions();
1179
1180 RegionData* candidates = get_region_data_cache(num_regions);
1181
1182 for (size_t i = 0; i < num_regions; i++) {
1183 candidates[i]._region = heap->get_region(i);
1184 candidates[i]._seqnum_last_alloc = heap->get_region(i)->seqnum_last_alloc();
1185 }
1186
1187 QuickSort::sort<RegionData>(candidates, (int)num_regions, compare_by_alloc_seq_descending, false);
1188
1189 // Heuristics triggered partial when allocated was larger than a threshold.
1190 // New allocations might have happened while we were preparing for GC,
1191 // capture all them in this cycle. This "adjusts" the threshold automatically.
1192 size_t used = heap->used();
1193 size_t prev_used = heap->used_at_last_gc();
1194 guarantee(used >= prev_used, "Invariant");
1195 size_t target = MIN2(ShenandoahHeapRegion::required_regions(used - prev_used), num_regions);
1196
1197 ShenandoahHeapRegionSet* root_regions = traversal_gc->root_regions();
1198 root_regions->clear();
1199
1200 uint count = 0;
1201
1202 for (uint i = 0; (i < num_regions) && (count < target); i++) {
1203 ShenandoahHeapRegion* contender = candidates[i]._region;
1204 if (contender->seqnum_last_alloc() <= alloc_seq_at_last_gc_end) {
1205 break;
1206 }
1207
1208 size_t index = contender->region_number();
1209 size_t from_idx_count = 0;
1210 if (matrix->enumerate_connected_to(index, num_regions, _from_idxs, from_idx_count,
1211 ShenandoahPartialInboundThreshold)) {
1212 if (maybe_add_heap_region(contender, collection_set)) {
1213 count++;
1214 }
1215
1216 for (uint f = 0; f < from_idx_count; f++) {
1217 ShenandoahHeapRegion* r = heap->get_region(_from_idxs[f]);
1218 root_regions->add_region_check_for_duplicates(r);
1219 }
1220 }
1221 }
1222 filter_regions();
1223 collection_set->update_region_status();
1224
1225 log_info(gc,ergo)("Regions: Max: " SIZE_FORMAT ", Target: " SIZE_FORMAT " (" SIZE_FORMAT "%%), In CSet: " SIZE_FORMAT,
1226 num_regions, target, ShenandoahGenerationalYoungGenPercentage, collection_set->count());
1227 }
1228
1229 ShenandoahHeap::GCCycleMode should_start_traversal_gc() {
1230 ShenandoahHeap::GCCycleMode cycle_mode = ShenandoahPartialHeuristics::should_start_traversal_gc();
1231 if (cycle_mode != ShenandoahHeap::NONE) {
1232 return cycle_mode;
1233 }
1234
1235 ShenandoahHeap* heap = ShenandoahHeap::heap();
1236
1237 if (heap->has_forwarded_objects()) {
1238 // Cannot start partial if heap is not completely updated.
1239 return ShenandoahHeap::NONE;
1240 }
1241
1242 size_t capacity = heap->capacity();
1243 size_t used = heap->used();
1244 size_t prev_used = heap->used_at_last_gc();
1245
1246 if (used < prev_used) {
1247 // Major collection must have happened, "used" data is unreliable, wait for update.
1248 return ShenandoahHeap::NONE;
1249 }
1250
1251 size_t threshold = heap->capacity() * ShenandoahGenerationalYoungGenPercentage / 100;
1252 size_t allocated = used - prev_used;
1253
1254 // Start the next young gc after we've allocated percentage_young of the heap.
1255 bool result = allocated > threshold;
1256
1257 FormatBuffer<> msg("%s. Capacity: " SIZE_FORMAT "M, Used: " SIZE_FORMAT "M, Previous Used: " SIZE_FORMAT
1258 "M, Allocated: " SIZE_FORMAT "M, Threshold: " SIZE_FORMAT "M",
1259 result ? "Partial cycle triggered" : "Partial cycle skipped",
1260 capacity/M, used/M, prev_used/M, allocated/M, threshold/M);
1261
1262 if (result) {
1263 log_info(gc,ergo)("%s", msg.buffer());
1264 } else {
1265 log_trace(gc,ergo)("%s", msg.buffer());
1266 }
1267 return result ? ShenandoahHeap::MINOR : ShenandoahHeap::NONE;
1268 }
1269 };
1270
1271 class ShenandoahLRUPartialHeuristics : public ShenandoahPartialHeuristics {
1272 public:
1273 ShenandoahLRUPartialHeuristics() : ShenandoahPartialHeuristics() {
1274 SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahPartialInboundThreshold, 100);
1275 }
1276
1277 virtual const char* name() {
1278 return "LRU";
1279 }
1280
1281 virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) {
1282 if (!is_minor_gc()) {
1283 return ShenandoahPartialHeuristics::choose_collection_set(collection_set);
1284 }
1285
1286 ShenandoahHeap* heap = ShenandoahHeap::heap();
1287 ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1288 ShenandoahConnectionMatrix* matrix = heap->connection_matrix();
1289 uint64_t alloc_seq_at_last_gc_start = heap->alloc_seq_at_last_gc_start();
1290
1291 size_t num_regions = heap->num_regions();
1292
1293 RegionData* candidates = get_region_data_cache(num_regions);
1294 int candidate_idx = 0;
1295 for (size_t i = 0; i < num_regions; i++) {
1296 ShenandoahHeapRegion* r = heap->get_region(i);
1297 if (r->is_regular() && (r->seqnum_last_alloc() > 0)) {
1298 candidates[candidate_idx]._region = heap->get_region(i);
1299 candidates[candidate_idx]._seqnum_last_alloc = heap->get_region(i)->seqnum_last_alloc();
1300 candidate_idx++;
1301 }
1302 }
1303
1304 size_t sorted_count = candidate_idx;
1305 QuickSort::sort<RegionData>(candidates, (int)sorted_count, compare_by_alloc_seq_ascending, false);
1306
1307 // Heuristics triggered partial when allocated was larger than a threshold.
1308 // New allocations might have happened while we were preparing for GC,
1309 // capture all them in this cycle. This "adjusts" the threshold automatically.
1310 size_t used = heap->used();
1311 size_t prev_used = heap->used_at_last_gc();
1312 guarantee(used >= prev_used, "Invariant");
1313 size_t target = MIN2(ShenandoahHeapRegion::required_regions(used - prev_used), sorted_count);
1314
1315 ShenandoahHeapRegionSet* root_regions = traversal_gc->root_regions();
1316 root_regions->clear();
1317
1318 uint count = 0;
1319
1320 for (uint i = 0; (i < sorted_count) && (count < target); i++) {
1321 ShenandoahHeapRegion* contender = candidates[i]._region;
1322 if (contender->seqnum_last_alloc() >= alloc_seq_at_last_gc_start) {
1323 break;
1324 }
1325
1326 size_t index = contender->region_number();
1327 size_t from_idx_count = 0;
1328 if (matrix->enumerate_connected_to(index, num_regions,_from_idxs, from_idx_count,
1329 ShenandoahPartialInboundThreshold)) {
1330 if (maybe_add_heap_region(contender, collection_set)) {
1331 count++;
1332 }
1333 for (uint f = 0; f < from_idx_count; f++) {
1334 ShenandoahHeapRegion* r = heap->get_region(_from_idxs[f]);
1335 root_regions->add_region_check_for_duplicates(r);
1336 }
1337 }
1338 }
1339 filter_regions();
1340 collection_set->update_region_status();
1341
1342 log_info(gc,ergo)("Regions: Max: " SIZE_FORMAT ", Target: " SIZE_FORMAT " (" SIZE_FORMAT "%%), In CSet: " SIZE_FORMAT,
1343 num_regions, target, ShenandoahLRUOldGenPercentage, collection_set->count());
1344 }
1345
1346 ShenandoahHeap::GCCycleMode should_start_traversal_gc() {
1347 ShenandoahHeap::GCCycleMode cycle_mode = ShenandoahPartialHeuristics::should_start_traversal_gc();
1348 if (cycle_mode != ShenandoahHeap::NONE) {
1349 return cycle_mode;
1350 }
1351
1352 ShenandoahHeap* heap = ShenandoahHeap::heap();
1353
1354 if (heap->has_forwarded_objects()) {
1355 // Cannot start partial if heap is not completely updated.
1356 return ShenandoahHeap::NONE;
1357 }
1358
1359 size_t capacity = heap->capacity();
1360 size_t used = heap->used();
1361 size_t prev_used = heap->used_at_last_gc();
1362
1363 if (used < prev_used) {
1364 // Major collection must have happened, "used" data is unreliable, wait for update.
1365 return ShenandoahHeap::NONE;
1366 }
1367
1368 // For now don't start until we are 40% full
1369 size_t allocated = used - prev_used;
1370 size_t threshold = heap->capacity() * ShenandoahLRUOldGenPercentage / 100;
1371 size_t minimum = heap->capacity() * 0.4;
1372
1373 bool result = ((used > minimum) && (allocated > threshold));
1374
1375 FormatBuffer<> msg("%s. Capacity: " SIZE_FORMAT "M, Used: " SIZE_FORMAT "M, Previous Used: " SIZE_FORMAT
1376 "M, Allocated: " SIZE_FORMAT "M, Threshold: " SIZE_FORMAT "M, Minimum: " SIZE_FORMAT "M",
1377 result ? "Partial cycle triggered" : "Partial cycle skipped",
1378 capacity/M, used/M, prev_used/M, allocated/M, threshold/M, minimum/M);
1379
1380 if (result) {
1381 log_info(gc,ergo)("%s", msg.buffer());
1382 } else {
1383 log_trace(gc,ergo)("%s", msg.buffer());
1384 }
1385 return result ? ShenandoahHeap::MINOR : ShenandoahHeap::NONE;
1386 }
1387
1388 };
1389
1390 ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() :
1391 _cycle_counter(0),
1392 _success_concurrent_gcs(0),
1393 _success_degenerated_gcs(0),
1394 _success_full_gcs(0),
1395 _explicit_concurrent(0),
1396 _explicit_full(0),
1397 _alloc_failure_degenerated(0),
1398 _alloc_failure_full(0),
1399 _alloc_failure_degenerated_upgrade_to_full(0)
1400 {
1401 Copy::zero_to_bytes(_degen_points, sizeof(size_t) * ShenandoahHeap::_DEGENERATED_LIMIT);
1402
1403 ShenandoahHeapRegion::setup_heap_region_size(initial_heap_byte_size(), max_heap_byte_size());
1404
1405 initialize_all();
1406
1407 _tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer();
1408
1409 if (ShenandoahGCHeuristics != NULL) {
1410 if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
1411 _heuristics = new ShenandoahAggressiveHeuristics();
1412 } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) {
1413 _heuristics = new ShenandoahStaticHeuristics();
1414 } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
1415 _heuristics = new ShenandoahAdaptiveHeuristics();
1416 } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
1417 _heuristics = new ShenandoahPassiveHeuristics();
1418 } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) {
1419 _heuristics = new ShenandoahCompactHeuristics();
1420 } else if (strcmp(ShenandoahGCHeuristics, "connected") == 0) {
1421 _heuristics = new ShenandoahPartialConnectedHeuristics();
1422 } else if (strcmp(ShenandoahGCHeuristics, "generational") == 0) {
1423 _heuristics = new ShenandoahGenerationalPartialHeuristics();
1424 } else if (strcmp(ShenandoahGCHeuristics, "LRU") == 0) {
1425 _heuristics = new ShenandoahLRUPartialHeuristics();
1426 } else if (strcmp(ShenandoahGCHeuristics, "traversal") == 0) {
1427 _heuristics = new ShenandoahTraversalHeuristics();
1428 } else {
1429 vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
1430 }
1431
1432 if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
1433 vm_exit_during_initialization(
1434 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
1435 _heuristics->name()));
1436 }
1437 if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
1438 vm_exit_during_initialization(
1439 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
1440 _heuristics->name()));
1441 }
1442
1443 if (ShenandoahStoreValEnqueueBarrier && ShenandoahStoreValReadBarrier) {
1444 vm_exit_during_initialization("Cannot use both ShenandoahStoreValEnqueueBarrier and ShenandoahStoreValReadBarrier");
1445 }
1446 log_info(gc, init)("Shenandoah heuristics: %s",
1447 _heuristics->name());
1448 _heuristics->print_thresholds();
1449 } else {
1450 ShouldNotReachHere();
1451 }
1452 }
1453
1454 ShenandoahCollectorPolicy* ShenandoahCollectorPolicy::as_pgc_policy() {
1455 return this;
1456 }
1457
1458 BarrierSet::Name ShenandoahCollectorPolicy::barrier_set_name() {
1459 return BarrierSet::Shenandoah;
1460 }
1461
1462 HeapWord* ShenandoahCollectorPolicy::mem_allocate_work(size_t size,
1463 bool is_tlab,
1464 bool* gc_overhead_limit_was_exceeded) {
1465 guarantee(false, "Not using this policy feature yet.");
1466 return NULL;
1467 }
1468
1469 HeapWord* ShenandoahCollectorPolicy::satisfy_failed_allocation(size_t size, bool is_tlab) {
1470 guarantee(false, "Not using this policy feature yet.");
1471 return NULL;
1472 }
1473
1474 void ShenandoahCollectorPolicy::initialize_alignments() {
1475
1476 // This is expected by our algorithm for ShenandoahHeap::heap_region_containing().
1477 _space_alignment = ShenandoahHeapRegion::region_size_bytes();
1478 _heap_alignment = ShenandoahHeapRegion::region_size_bytes();
1479 }
1480
1481 void ShenandoahCollectorPolicy::post_heap_initialize() {
1482 _heuristics->initialize();
1483 }
1484
1485 void ShenandoahCollectorPolicy::record_explicit_to_concurrent() {
1486 _heuristics->record_explicit_gc();
1487 _explicit_concurrent++;
1488 }
1489
1490 void ShenandoahCollectorPolicy::record_explicit_to_full() {
1491 _heuristics->record_explicit_gc();
1492 _explicit_full++;
1493 }
1494
1495 void ShenandoahCollectorPolicy::record_alloc_failure_to_full() {
1496 _heuristics->record_allocation_failure_gc();
1497 _alloc_failure_full++;
1498 }
1499
1500 void ShenandoahCollectorPolicy::record_alloc_failure_to_degenerated(ShenandoahHeap::ShenandoahDegenPoint point) {
1501 assert(point < ShenandoahHeap::_DEGENERATED_LIMIT, "sanity");
1502 _heuristics->record_allocation_failure_gc();
1503 _alloc_failure_degenerated++;
1504 _degen_points[point]++;
1505 }
1506
1507 void ShenandoahCollectorPolicy::record_degenerated_upgrade_to_full() {
1508 _alloc_failure_degenerated_upgrade_to_full++;
1509 }
1510
1511 void ShenandoahCollectorPolicy::record_success_concurrent() {
1512 _heuristics->record_success_concurrent();
1513 _success_concurrent_gcs++;
1514 }
1515
1516 void ShenandoahCollectorPolicy::record_success_degenerated() {
1517 _heuristics->record_success_degenerated();
1518 _success_degenerated_gcs++;
1519 }
1520
1521 void ShenandoahCollectorPolicy::record_success_full() {
1522 _heuristics->record_success_full();
1523 _success_full_gcs++;
1524 }
1525
1526 bool ShenandoahCollectorPolicy::should_start_normal_gc() {
1527 return _heuristics->should_start_normal_gc();
1528 }
1529
1530 bool ShenandoahCollectorPolicy::should_degenerate_cycle() {
1531 return _heuristics->should_degenerate_cycle();
1532 }
1533
1534 bool ShenandoahCollectorPolicy::update_refs() {
1535 return _heuristics->update_refs();
1536 }
1537
1538 bool ShenandoahCollectorPolicy::should_start_update_refs() {
1539 return _heuristics->should_start_update_refs();
1540 }
1541
1542 void ShenandoahCollectorPolicy::record_peak_occupancy() {
1543 _heuristics->record_peak_occupancy();
1544 }
1545
1546 void ShenandoahCollectorPolicy::choose_collection_set(ShenandoahCollectionSet* collection_set,
1547 bool minor) {
1548 _heuristics->choose_collection_set(collection_set);
1549 }
1550
1551 bool ShenandoahCollectorPolicy::should_process_references() {
1552 return _heuristics->should_process_references();
1553 }
1554
1555 bool ShenandoahCollectorPolicy::should_unload_classes() {
1556 return _heuristics->should_unload_classes();
1557 }
1558
1559 size_t ShenandoahCollectorPolicy::cycle_counter() const {
1560 return _cycle_counter;
1561 }
1562
1563 void ShenandoahCollectorPolicy::record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
1564 _heuristics->record_phase_time(phase, secs);
1565 }
1566
1567 ShenandoahHeap::GCCycleMode ShenandoahCollectorPolicy::should_start_traversal_gc() {
1568 return _heuristics->should_start_traversal_gc();
1569 }
1570
1571 bool ShenandoahCollectorPolicy::can_do_traversal_gc() {
1572 return _heuristics->can_do_traversal_gc();
1573 }
1574
1575 void ShenandoahCollectorPolicy::record_cycle_start() {
1576 _cycle_counter++;
1577 _heuristics->record_cycle_start();
1578 }
1579
1580 void ShenandoahCollectorPolicy::record_cycle_end() {
1581 _heuristics->record_cycle_end();
1582 }
1583
1584 void ShenandoahCollectorPolicy::record_shutdown() {
1585 _in_shutdown.set();
1586 }
1587
1588 bool ShenandoahCollectorPolicy::is_at_shutdown() {
1589 return _in_shutdown.is_set();
1590 }
1591
1592 void ShenandoahCollectorPolicy::print_gc_stats(outputStream* out) const {
1593 out->print_cr("Under allocation pressure, concurrent cycles may cancel, and either continue cycle");
1594 out->print_cr("under stop-the-world pause or result in stop-the-world Full GC. Increase heap size,");
1595 out->print_cr("tune GC heuristics, set more aggressive pacing delay, or lower allocation rate");
1596 out->print_cr("to avoid Degenerated and Full GC cycles.");
1597 out->cr();
1598
1599 out->print_cr(SIZE_FORMAT_W(5) " successful concurrent GCs", _success_concurrent_gcs);
1600 out->print_cr(" " SIZE_FORMAT_W(5) " invoked explicitly", _explicit_concurrent);
1601 out->cr();
1602
1603 out->print_cr(SIZE_FORMAT_W(5) " Degenerated GCs", _success_degenerated_gcs);
1604 out->print_cr(" " SIZE_FORMAT_W(5) " caused by allocation failure", _alloc_failure_degenerated);
1605 for (int c = 0; c < ShenandoahHeap::_DEGENERATED_LIMIT; c++) {
1606 if (_degen_points[c] > 0) {
1607 const char* desc = ShenandoahHeap::degen_point_to_string((ShenandoahHeap::ShenandoahDegenPoint)c);
1608 out->print_cr(" " SIZE_FORMAT_W(5) " happened at %s", _degen_points[c], desc);
1609 }
1610 }
1611 out->print_cr(" " SIZE_FORMAT_W(5) " upgraded to Full GC", _alloc_failure_degenerated_upgrade_to_full);
1612 out->cr();
1613
1614 out->print_cr(SIZE_FORMAT_W(5) " Full GCs", _success_full_gcs + _alloc_failure_degenerated_upgrade_to_full);
1615 out->print_cr(" " SIZE_FORMAT_W(5) " invoked explicitly", _explicit_full);
1616 out->print_cr(" " SIZE_FORMAT_W(5) " caused by allocation failure", _alloc_failure_full);
|