1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" 27 #include "gc_implementation/shared/gcPolicyCounters.hpp" 28 #include "gc_implementation/shared/vmGCOperations.hpp" 29 #include "memory/cardTableRS.hpp" 30 #include "memory/collectorPolicy.hpp" 31 #include "memory/gcLocker.inline.hpp" 32 #include "memory/genCollectedHeap.hpp" 33 #include "memory/generationSpec.hpp" 34 #include "memory/space.hpp" 35 #include "memory/universe.hpp" 36 #include "runtime/arguments.hpp" 37 #include "runtime/globals_extension.hpp" 38 #include "runtime/handles.inline.hpp" 39 #include "runtime/java.hpp" 40 #include "runtime/thread.inline.hpp" 41 #include "runtime/vmThread.hpp" 42 #include "utilities/macros.hpp" 43 44 // CollectorPolicy methods 45 46 CollectorPolicy::CollectorPolicy() : 47 _space_alignment(0), 48 _heap_alignment(0), 49 _initial_heap_byte_size(InitialHeapSize), 50 _max_heap_byte_size(MaxHeapSize), 51 _min_heap_byte_size(Arguments::min_heap_size()), 52 _max_heap_size_cmdline(false), 53 _size_policy(NULL), 54 _should_clear_all_soft_refs(false), 55 _all_soft_refs_clear(false) 56 {} 57 58 #ifdef ASSERT 59 void CollectorPolicy::assert_flags() { 60 assert(InitialHeapSize <= MaxHeapSize, "Ergonomics decided on incompatible initial and maximum heap sizes"); 61 assert(InitialHeapSize % _heap_alignment == 0, "InitialHeapSize alignment"); 62 assert(MaxHeapSize % _heap_alignment == 0, "MaxHeapSize alignment"); 63 } 64 65 void CollectorPolicy::assert_size_info() { 66 assert(InitialHeapSize == _initial_heap_byte_size, "Discrepancy between InitialHeapSize flag and local storage"); 67 assert(MaxHeapSize == _max_heap_byte_size, "Discrepancy between MaxHeapSize flag and local storage"); 68 assert(_max_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible minimum and maximum heap sizes"); 69 assert(_initial_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible initial and minimum heap sizes"); 70 assert(_max_heap_byte_size >= _initial_heap_byte_size, "Ergonomics decided on incompatible initial and maximum heap sizes"); 71 assert(_min_heap_byte_size % _heap_alignment == 0, "min_heap_byte_size alignment"); 72 assert(_initial_heap_byte_size % _heap_alignment == 0, "initial_heap_byte_size alignment"); 73 assert(_max_heap_byte_size % _heap_alignment == 0, "max_heap_byte_size alignment"); 74 } 75 #endif // ASSERT 76 77 void CollectorPolicy::initialize_flags() { 78 assert(_space_alignment != 0, "Space alignment not set up properly"); 79 assert(_heap_alignment != 0, "Heap alignment not set up properly"); 80 assert(_heap_alignment >= _space_alignment, 81 err_msg("heap_alignment: " SIZE_FORMAT " less than space_alignment: " SIZE_FORMAT, 82 _heap_alignment, _space_alignment)); 83 assert(_heap_alignment % _space_alignment == 0, 84 err_msg("heap_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT, 85 _heap_alignment, _space_alignment)); 86 87 if (FLAG_IS_CMDLINE(MaxHeapSize)) { 88 if (FLAG_IS_CMDLINE(InitialHeapSize) && InitialHeapSize > MaxHeapSize) { 89 vm_exit_during_initialization("Initial heap size set to a larger value than the maximum heap size"); 90 } 91 if (_min_heap_byte_size != 0 && MaxHeapSize < _min_heap_byte_size) { 92 vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified"); 93 } 94 _max_heap_size_cmdline = true; 95 } 96 97 // Check heap parameter properties 98 if (InitialHeapSize < M) { 99 vm_exit_during_initialization("Too small initial heap"); 100 } 101 if (_min_heap_byte_size < M) { 102 vm_exit_during_initialization("Too small minimum heap"); 103 } 104 105 // User inputs from -Xmx and -Xms must be aligned 106 _min_heap_byte_size = align_size_up(_min_heap_byte_size, _heap_alignment); 107 uintx aligned_initial_heap_size = align_size_up(InitialHeapSize, _heap_alignment); 108 uintx aligned_max_heap_size = align_size_up(MaxHeapSize, _heap_alignment); 109 110 // Write back to flags if the values changed 111 if (aligned_initial_heap_size != InitialHeapSize) { 112 FLAG_SET_ERGO(uintx, InitialHeapSize, aligned_initial_heap_size); 113 } 114 if (aligned_max_heap_size != MaxHeapSize) { 115 FLAG_SET_ERGO(uintx, MaxHeapSize, aligned_max_heap_size); 116 } 117 118 if (FLAG_IS_CMDLINE(InitialHeapSize) && _min_heap_byte_size != 0 && 119 InitialHeapSize < _min_heap_byte_size) { 120 vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified"); 121 } 122 if (!FLAG_IS_DEFAULT(InitialHeapSize) && InitialHeapSize > MaxHeapSize) { 123 FLAG_SET_ERGO(uintx, MaxHeapSize, InitialHeapSize); 124 } else if (!FLAG_IS_DEFAULT(MaxHeapSize) && InitialHeapSize > MaxHeapSize) { 125 FLAG_SET_ERGO(uintx, InitialHeapSize, MaxHeapSize); 126 if (InitialHeapSize < _min_heap_byte_size) { 127 _min_heap_byte_size = InitialHeapSize; 128 } 129 } 130 131 _initial_heap_byte_size = InitialHeapSize; 132 _max_heap_byte_size = MaxHeapSize; 133 134 FLAG_SET_ERGO(uintx, MinHeapDeltaBytes, align_size_up(MinHeapDeltaBytes, _space_alignment)); 135 136 DEBUG_ONLY(CollectorPolicy::assert_flags();) 137 } 138 139 void CollectorPolicy::initialize_size_info() { 140 if (PrintGCDetails && Verbose) { 141 gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap " 142 SIZE_FORMAT " Maximum heap " SIZE_FORMAT, 143 _min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size); 144 } 145 146 DEBUG_ONLY(CollectorPolicy::assert_size_info();) 147 } 148 149 bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) { 150 bool result = _should_clear_all_soft_refs; 151 set_should_clear_all_soft_refs(false); 152 return result; 153 } 154 155 GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, 156 int max_covered_regions) { 157 return new CardTableRS(whole_heap, max_covered_regions); 158 } 159 160 void CollectorPolicy::cleared_all_soft_refs() { 161 // If near gc overhear limit, continue to clear SoftRefs. SoftRefs may 162 // have been cleared in the last collection but if the gc overhear 163 // limit continues to be near, SoftRefs should still be cleared. 164 if (size_policy() != NULL) { 165 _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near(); 166 } 167 _all_soft_refs_clear = true; 168 } 169 170 size_t CollectorPolicy::compute_heap_alignment() { 171 // The card marking array and the offset arrays for old generations are 172 // committed in os pages as well. Make sure they are entirely full (to 173 // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1 174 // byte entry and the os page size is 4096, the maximum heap size should 175 // be 512*4096 = 2MB aligned. 176 177 size_t alignment = GenRemSet::max_alignment_constraint(); 178 179 if (UseLargePages) { 180 // In presence of large pages we have to make sure that our 181 // alignment is large page aware. 182 alignment = lcm(os::large_page_size(), alignment); 183 } 184 185 return alignment; 186 } 187 188 // GenCollectorPolicy methods 189 190 GenCollectorPolicy::GenCollectorPolicy() : 191 _min_young_size(0), 192 _initial_young_size(0), 193 _max_young_size(0), 194 _min_old_size(0), 195 _initial_old_size(0), 196 _max_old_size(0), 197 _gen_alignment(0), 198 _young_gen_spec(NULL), 199 _old_gen_spec(NULL) 200 {} 201 202 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { 203 return align_size_down_bounded(base_size / (NewRatio + 1), _gen_alignment); 204 } 205 206 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size, 207 size_t maximum_size) { 208 size_t max_minus = maximum_size - _gen_alignment; 209 return desired_size < max_minus ? desired_size : max_minus; 210 } 211 212 213 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size, 214 size_t init_promo_size, 215 size_t init_survivor_size) { 216 const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0; 217 _size_policy = new AdaptiveSizePolicy(init_eden_size, 218 init_promo_size, 219 init_survivor_size, 220 max_gc_pause_sec, 221 GCTimeRatio); 222 } 223 224 size_t GenCollectorPolicy::young_gen_size_lower_bound() { 225 // The young generation must be aligned and have room for eden + two survivors 226 return align_size_up(3 * _space_alignment, _gen_alignment); 227 } 228 229 #ifdef ASSERT 230 void GenCollectorPolicy::assert_flags() { 231 CollectorPolicy::assert_flags(); 232 assert(NewSize >= _min_young_size, "Ergonomics decided on a too small young gen size"); 233 assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes"); 234 assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young gen and heap sizes"); 235 assert(NewSize % _gen_alignment == 0, "NewSize alignment"); 236 assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize % _gen_alignment == 0, "MaxNewSize alignment"); 237 assert(OldSize + NewSize <= MaxHeapSize, "Ergonomics decided on incompatible generation and heap sizes"); 238 assert(OldSize % _gen_alignment == 0, "OldSize alignment"); 239 } 240 241 void GenCollectorPolicy::assert_size_info() { 242 CollectorPolicy::assert_size_info(); 243 // GenCollectorPolicy::initialize_size_info may update the MaxNewSize 244 assert(MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young and heap sizes"); 245 assert(NewSize == _initial_young_size, "Discrepancy between NewSize flag and local storage"); 246 assert(MaxNewSize == _max_young_size, "Discrepancy between MaxNewSize flag and local storage"); 247 assert(OldSize == _initial_old_size, "Discrepancy between OldSize flag and local storage"); 248 assert(_min_young_size <= _initial_young_size, "Ergonomics decided on incompatible minimum and initial young gen sizes"); 249 assert(_initial_young_size <= _max_young_size, "Ergonomics decided on incompatible initial and maximum young gen sizes"); 250 assert(_min_young_size % _gen_alignment == 0, "_min_young_size alignment"); 251 assert(_initial_young_size % _gen_alignment == 0, "_initial_young_size alignment"); 252 assert(_max_young_size % _gen_alignment == 0, "_max_young_size alignment"); 253 assert(_min_young_size <= bound_minus_alignment(_min_young_size, _min_heap_byte_size), 254 "Ergonomics made minimum young generation larger than minimum heap"); 255 assert(_initial_young_size <= bound_minus_alignment(_initial_young_size, _initial_heap_byte_size), 256 "Ergonomics made initial young generation larger than initial heap"); 257 assert(_max_young_size <= bound_minus_alignment(_max_young_size, _max_heap_byte_size), 258 "Ergonomics made maximum young generation lager than maximum heap"); 259 assert(_min_old_size <= _initial_old_size, "Ergonomics decided on incompatible minimum and initial old gen sizes"); 260 assert(_initial_old_size <= _max_old_size, "Ergonomics decided on incompatible initial and maximum old gen sizes"); 261 assert(_max_old_size % _gen_alignment == 0, "_max_old_size alignment"); 262 assert(_initial_old_size % _gen_alignment == 0, "_initial_old_size alignment"); 263 assert(_max_heap_byte_size <= (_max_young_size + _max_old_size), "Total maximum heap sizes must be sum of generation maximum sizes"); 264 assert(_min_young_size + _min_old_size <= _min_heap_byte_size, "Minimum generation sizes exceed minimum heap size"); 265 assert(_initial_young_size + _initial_old_size == _initial_heap_byte_size, "Initial generation sizes should match initial heap size"); 266 assert(_max_young_size + _max_old_size == _max_heap_byte_size, "Maximum generation sizes should match maximum heap size"); 267 } 268 #endif // ASSERT 269 270 void GenCollectorPolicy::initialize_flags() { 271 CollectorPolicy::initialize_flags(); 272 273 assert(_gen_alignment != 0, "Generation alignment not set up properly"); 274 assert(_heap_alignment >= _gen_alignment, 275 err_msg("heap_alignment: " SIZE_FORMAT " less than gen_alignment: " SIZE_FORMAT, 276 _heap_alignment, _gen_alignment)); 277 assert(_gen_alignment % _space_alignment == 0, 278 err_msg("gen_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT, 279 _gen_alignment, _space_alignment)); 280 assert(_heap_alignment % _gen_alignment == 0, 281 err_msg("heap_alignment: " SIZE_FORMAT " not aligned by gen_alignment: " SIZE_FORMAT, 282 _heap_alignment, _gen_alignment)); 283 284 // All generational heaps have a youngest gen; handle those flags here 285 286 // Make sure the heap is large enough for two generations 287 uintx smallest_new_size = young_gen_size_lower_bound(); 288 uintx smallest_heap_size = align_size_up(smallest_new_size + align_size_up(_space_alignment, _gen_alignment), 289 _heap_alignment); 290 if (MaxHeapSize < smallest_heap_size) { 291 FLAG_SET_ERGO(uintx, MaxHeapSize, smallest_heap_size); 292 _max_heap_byte_size = MaxHeapSize; 293 } 294 // If needed, synchronize _min_heap_byte size and _initial_heap_byte_size 295 if (_min_heap_byte_size < smallest_heap_size) { 296 _min_heap_byte_size = smallest_heap_size; 297 if (InitialHeapSize < _min_heap_byte_size) { 298 FLAG_SET_ERGO(uintx, InitialHeapSize, smallest_heap_size); 299 _initial_heap_byte_size = smallest_heap_size; 300 } 301 } 302 303 // Make sure NewSize allows an old generation to fit even if set on the command line 304 if (FLAG_IS_CMDLINE(NewSize) && NewSize >= _initial_heap_byte_size) { 305 warning("NewSize was set larger than initial heap size, will use initial heap size."); 306 NewSize = bound_minus_alignment(NewSize, _initial_heap_byte_size); 307 } 308 309 // Now take the actual NewSize into account. We will silently increase NewSize 310 // if the user specified a smaller or unaligned value. 311 uintx bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize); 312 bounded_new_size = MAX2(smallest_new_size, (uintx)align_size_down(bounded_new_size, _gen_alignment)); 313 if (bounded_new_size != NewSize) { 314 // Do not use FLAG_SET_ERGO to update NewSize here, since this will override 315 // if NewSize was set on the command line or not. This information is needed 316 // later when setting the initial and minimum young generation size. 317 NewSize = bounded_new_size; 318 } 319 _min_young_size = smallest_new_size; 320 _initial_young_size = NewSize; 321 322 if (!FLAG_IS_DEFAULT(MaxNewSize)) { 323 if (MaxNewSize >= MaxHeapSize) { 324 // Make sure there is room for an old generation 325 uintx smaller_max_new_size = MaxHeapSize - _gen_alignment; 326 if (FLAG_IS_CMDLINE(MaxNewSize)) { 327 warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or greater than the entire " 328 "heap (" SIZE_FORMAT "k). A new max generation size of " SIZE_FORMAT "k will be used.", 329 MaxNewSize/K, MaxHeapSize/K, smaller_max_new_size/K); 330 } 331 FLAG_SET_ERGO(uintx, MaxNewSize, smaller_max_new_size); 332 if (NewSize > MaxNewSize) { 333 FLAG_SET_ERGO(uintx, NewSize, MaxNewSize); 334 _initial_young_size = NewSize; 335 } 336 } else if (MaxNewSize < _initial_young_size) { 337 FLAG_SET_ERGO(uintx, MaxNewSize, _initial_young_size); 338 } else if (!is_size_aligned(MaxNewSize, _gen_alignment)) { 339 FLAG_SET_ERGO(uintx, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment)); 340 } 341 _max_young_size = MaxNewSize; 342 } 343 344 if (NewSize > MaxNewSize) { 345 // At this point this should only happen if the user specifies a large NewSize and/or 346 // a small (but not too small) MaxNewSize. 347 if (FLAG_IS_CMDLINE(MaxNewSize)) { 348 warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). " 349 "A new max generation size of " SIZE_FORMAT "k will be used.", 350 NewSize/K, MaxNewSize/K, NewSize/K); 351 } 352 FLAG_SET_ERGO(uintx, MaxNewSize, NewSize); 353 _max_young_size = MaxNewSize; 354 } 355 356 if (SurvivorRatio < 1 || NewRatio < 1) { 357 vm_exit_during_initialization("Invalid young gen ratio specified"); 358 } 359 360 if (!is_size_aligned(OldSize, _gen_alignment)) { 361 // Setting OldSize directly to preserve information about the possible 362 // setting of OldSize on the command line. 363 OldSize = align_size_down(OldSize, _gen_alignment); 364 } 365 366 if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) { 367 // NewRatio will be used later to set the young generation size so we use 368 // it to calculate how big the heap should be based on the requested OldSize 369 // and NewRatio. 370 assert(NewRatio > 0, "NewRatio should have been set up earlier"); 371 size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1); 372 373 calculated_heapsize = align_size_up(calculated_heapsize, _heap_alignment); 374 FLAG_SET_ERGO(uintx, MaxHeapSize, calculated_heapsize); 375 _max_heap_byte_size = MaxHeapSize; 376 FLAG_SET_ERGO(uintx, InitialHeapSize, calculated_heapsize); 377 _initial_heap_byte_size = InitialHeapSize; 378 } 379 380 // Adjust NewSize and OldSize or MaxHeapSize to match each other 381 if (NewSize + OldSize > MaxHeapSize) { 382 if (_max_heap_size_cmdline) { 383 // Somebody has set a maximum heap size with the intention that we should not 384 // exceed it. Adjust New/OldSize as necessary. 385 uintx calculated_size = NewSize + OldSize; 386 double shrink_factor = (double) MaxHeapSize / calculated_size; 387 uintx smaller_new_size = align_size_down((uintx)(NewSize * shrink_factor), _gen_alignment); 388 FLAG_SET_ERGO(uintx, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size)); 389 _initial_young_size = NewSize; 390 391 // OldSize is already aligned because above we aligned MaxHeapSize to 392 // _heap_alignment, and we just made sure that NewSize is aligned to 393 // _gen_alignment. In initialize_flags() we verified that _heap_alignment 394 // is a multiple of _gen_alignment. 395 FLAG_SET_ERGO(uintx, OldSize, MaxHeapSize - NewSize); 396 } else { 397 FLAG_SET_ERGO(uintx, MaxHeapSize, align_size_up(NewSize + OldSize, _heap_alignment)); 398 _max_heap_byte_size = MaxHeapSize; 399 } 400 } 401 402 // Update NewSize, if possible, to avoid sizing the young gen too small when only 403 // OldSize is set on the command line. 404 if (FLAG_IS_CMDLINE(OldSize) && !FLAG_IS_CMDLINE(NewSize)) { 405 if (OldSize < _initial_heap_byte_size) { 406 size_t new_size = _initial_heap_byte_size - OldSize; 407 // Need to compare against the flag value for max since _max_young_size 408 // might not have been set yet. 409 if (new_size >= _min_young_size && new_size <= MaxNewSize) { 410 FLAG_SET_ERGO(uintx, NewSize, new_size); 411 _initial_young_size = NewSize; 412 } 413 } 414 } 415 416 always_do_update_barrier = UseConcMarkSweepGC; 417 418 DEBUG_ONLY(GenCollectorPolicy::assert_flags();) 419 } 420 421 // Values set on the command line win over any ergonomically 422 // set command line parameters. 423 // Ergonomic choice of parameters are done before this 424 // method is called. Values for command line parameters such as NewSize 425 // and MaxNewSize feed those ergonomic choices into this method. 426 // This method makes the final generation sizings consistent with 427 // themselves and with overall heap sizings. 428 // In the absence of explicitly set command line flags, policies 429 // such as the use of NewRatio are used to size the generation. 430 431 // Minimum sizes of the generations may be different than 432 // the initial sizes. An inconsistency is permitted here 433 // in the total size that can be specified explicitly by 434 // command line specification of OldSize and NewSize and 435 // also a command line specification of -Xms. Issue a warning 436 // but allow the values to pass. 437 void GenCollectorPolicy::initialize_size_info() { 438 CollectorPolicy::initialize_size_info(); 439 440 _initial_young_size = NewSize; 441 _max_young_size = MaxNewSize; 442 _initial_old_size = OldSize; 443 444 // Determine maximum size of the young generation. 445 446 if (FLAG_IS_DEFAULT(MaxNewSize)) { 447 _max_young_size = scale_by_NewRatio_aligned(_max_heap_byte_size); 448 // Bound the maximum size by NewSize below (since it historically 449 // would have been NewSize and because the NewRatio calculation could 450 // yield a size that is too small) and bound it by MaxNewSize above. 451 // Ergonomics plays here by previously calculating the desired 452 // NewSize and MaxNewSize. 453 _max_young_size = MIN2(MAX2(_max_young_size, _initial_young_size), MaxNewSize); 454 } 455 456 // Given the maximum young size, determine the initial and 457 // minimum young sizes. 458 459 if (_max_heap_byte_size == _initial_heap_byte_size) { 460 // The maximum and initial heap sizes are the same so the generation's 461 // initial size must be the same as it maximum size. Use NewSize as the 462 // size if set on command line. 463 _max_young_size = FLAG_IS_CMDLINE(NewSize) ? NewSize : _max_young_size; 464 _initial_young_size = _max_young_size; 465 466 // Also update the minimum size if min == initial == max. 467 if (_max_heap_byte_size == _min_heap_byte_size) { 468 _min_young_size = _max_young_size; 469 } 470 } else { 471 if (FLAG_IS_CMDLINE(NewSize)) { 472 // If NewSize is set on the command line, we should use it as 473 // the initial size, but make sure it is within the heap bounds. 474 _initial_young_size = 475 MIN2(_max_young_size, bound_minus_alignment(NewSize, _initial_heap_byte_size)); 476 _min_young_size = bound_minus_alignment(_initial_young_size, _min_heap_byte_size); 477 } else { 478 // For the case where NewSize is not set on the command line, use 479 // NewRatio to size the initial generation size. Use the current 480 // NewSize as the floor, because if NewRatio is overly large, the resulting 481 // size can be too small. 482 _initial_young_size = 483 MIN2(_max_young_size, MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize)); 484 } 485 } 486 487 if (PrintGCDetails && Verbose) { 488 gclog_or_tty->print_cr("1: Minimum young " SIZE_FORMAT " Initial young " 489 SIZE_FORMAT " Maximum young " SIZE_FORMAT, 490 _min_young_size, _initial_young_size, _max_young_size); 491 } 492 493 // At this point the minimum, initial and maximum sizes 494 // of the overall heap and of the young generation have been determined. 495 // The maximum old size can be determined from the maximum young 496 // and maximum heap size since no explicit flags exist 497 // for setting the old generation maximum. 498 _max_old_size = MAX2(_max_heap_byte_size - _max_young_size, _gen_alignment); 499 500 // If no explicit command line flag has been set for the 501 // old generation size, use what is left. 502 if (!FLAG_IS_CMDLINE(OldSize)) { 503 // The user has not specified any value but the ergonomics 504 // may have chosen a value (which may or may not be consistent 505 // with the overall heap size). In either case make 506 // the minimum, maximum and initial sizes consistent 507 // with the young sizes and the overall heap sizes. 508 _min_old_size = _gen_alignment; 509 _initial_old_size = MIN2(_max_old_size, MAX2(_initial_heap_byte_size - _initial_young_size, _min_old_size)); 510 // _max_old_size has already been made consistent above. 511 } else { 512 // OldSize has been explicitly set on the command line. Use it 513 // for the initial size but make sure the minimum allow a young 514 // generation to fit as well. 515 // If the user has explicitly set an OldSize that is inconsistent 516 // with other command line flags, issue a warning. 517 // The generation minimums and the overall heap minimum should 518 // be within one generation alignment. 519 if (_initial_old_size > _max_old_size) { 520 warning("Inconsistency between maximum heap size and maximum " 521 "generation sizes: using maximum heap = " SIZE_FORMAT 522 " -XX:OldSize flag is being ignored", 523 _max_heap_byte_size); 524 _initial_old_size = _max_old_size; 525 } 526 527 _min_old_size = MIN2(_initial_old_size, _min_heap_byte_size - _min_young_size); 528 } 529 530 // The initial generation sizes should match the initial heap size, 531 // if not issue a warning and resize the generations. This behavior 532 // differs from JDK8 where the generation sizes have higher priority 533 // than the initial heap size. 534 if ((_initial_old_size + _initial_young_size) != _initial_heap_byte_size) { 535 warning("Inconsistency between generation sizes and heap size, resizing " 536 "the generations to fit the heap."); 537 538 size_t desired_young_size = _initial_heap_byte_size - _initial_old_size; 539 if (_initial_heap_byte_size < _initial_old_size) { 540 // Old want all memory, use minimum for young and rest for old 541 _initial_young_size = _min_young_size; 542 _initial_old_size = _initial_heap_byte_size - _min_young_size; 543 } else if (desired_young_size > _max_young_size) { 544 // Need to increase both young and old generation 545 _initial_young_size = _max_young_size; 546 _initial_old_size = _initial_heap_byte_size - _max_young_size; 547 } else if (desired_young_size < _min_young_size) { 548 // Need to decrease both young and old generation 549 _initial_young_size = _min_young_size; 550 _initial_old_size = _initial_heap_byte_size - _min_young_size; 551 } else { 552 // The young generation boundaries allow us to only update the 553 // young generation. 554 _initial_young_size = desired_young_size; 555 } 556 557 if (PrintGCDetails && Verbose) { 558 gclog_or_tty->print_cr("2: Minimum young " SIZE_FORMAT " Initial young " 559 SIZE_FORMAT " Maximum young " SIZE_FORMAT, 560 _min_young_size, _initial_young_size, _max_young_size); 561 } 562 } 563 564 // Write back to flags if necessary. 565 if (NewSize != _initial_young_size) { 566 FLAG_SET_ERGO(uintx, NewSize, _initial_young_size); 567 } 568 569 if (MaxNewSize != _max_young_size) { 570 FLAG_SET_ERGO(uintx, MaxNewSize, _max_young_size); 571 } 572 573 if (OldSize != _initial_old_size) { 574 FLAG_SET_ERGO(uintx, OldSize, _initial_old_size); 575 } 576 577 if (PrintGCDetails && Verbose) { 578 gclog_or_tty->print_cr("Minimum old " SIZE_FORMAT " Initial old " 579 SIZE_FORMAT " Maximum old " SIZE_FORMAT, 580 _min_old_size, _initial_old_size, _max_old_size); 581 } 582 583 DEBUG_ONLY(GenCollectorPolicy::assert_size_info();) 584 } 585 586 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, 587 bool is_tlab, 588 bool* gc_overhead_limit_was_exceeded) { 589 GenCollectedHeap *gch = GenCollectedHeap::heap(); 590 591 debug_only(gch->check_for_valid_allocation_state()); 592 assert(gch->no_gc_in_progress(), "Allocation during gc not allowed"); 593 594 // In general gc_overhead_limit_was_exceeded should be false so 595 // set it so here and reset it to true only if the gc time 596 // limit is being exceeded as checked below. 597 *gc_overhead_limit_was_exceeded = false; 598 599 HeapWord* result = NULL; 600 601 // Loop until the allocation is satisfied, or unsatisfied after GC. 602 for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) { 603 HandleMark hm; // Discard any handles allocated in each iteration. 604 605 // First allocation attempt is lock-free. 606 Generation *young = gch->young_gen(); 607 assert(young->supports_inline_contig_alloc(), 608 "Otherwise, must do alloc within heap lock"); 609 if (young->should_allocate(size, is_tlab)) { 610 result = young->par_allocate(size, is_tlab); 611 if (result != NULL) { 612 assert(gch->is_in_reserved(result), "result not in heap"); 613 return result; 614 } 615 } 616 unsigned int gc_count_before; // Read inside the Heap_lock locked region. 617 { 618 MutexLocker ml(Heap_lock); 619 if (PrintGC && Verbose) { 620 gclog_or_tty->print_cr("GenCollectorPolicy::mem_allocate_work:" 621 " attempting locked slow path allocation"); 622 } 623 // Note that only large objects get a shot at being 624 // allocated in later generations. 625 bool first_only = ! should_try_older_generation_allocation(size); 626 627 result = gch->attempt_allocation(size, is_tlab, first_only); 628 if (result != NULL) { 629 assert(gch->is_in_reserved(result), "result not in heap"); 630 return result; 631 } 632 633 if (GC_locker::is_active_and_needs_gc()) { 634 if (is_tlab) { 635 return NULL; // Caller will retry allocating individual object. 636 } 637 if (!gch->is_maximal_no_gc()) { 638 // Try and expand heap to satisfy request. 639 result = expand_heap_and_allocate(size, is_tlab); 640 // Result could be null if we are out of space. 641 if (result != NULL) { 642 return result; 643 } 644 } 645 646 if (gclocker_stalled_count > GCLockerRetryAllocationCount) { 647 return NULL; // We didn't get to do a GC and we didn't get any memory. 648 } 649 650 // If this thread is not in a jni critical section, we stall 651 // the requestor until the critical section has cleared and 652 // GC allowed. When the critical section clears, a GC is 653 // initiated by the last thread exiting the critical section; so 654 // we retry the allocation sequence from the beginning of the loop, 655 // rather than causing more, now probably unnecessary, GC attempts. 656 JavaThread* jthr = JavaThread::current(); 657 if (!jthr->in_critical()) { 658 MutexUnlocker mul(Heap_lock); 659 // Wait for JNI critical section to be exited 660 GC_locker::stall_until_clear(); 661 gclocker_stalled_count += 1; 662 continue; 663 } else { 664 if (CheckJNICalls) { 665 fatal("Possible deadlock due to allocating while" 666 " in jni critical section"); 667 } 668 return NULL; 669 } 670 } 671 672 // Read the gc count while the heap lock is held. 673 gc_count_before = Universe::heap()->total_collections(); 674 } 675 676 VM_GenCollectForAllocation op(size, is_tlab, gc_count_before); 677 VMThread::execute(&op); 678 if (op.prologue_succeeded()) { 679 result = op.result(); 680 if (op.gc_locked()) { 681 assert(result == NULL, "must be NULL if gc_locked() is true"); 682 continue; // Retry and/or stall as necessary. 683 } 684 685 // Allocation has failed and a collection 686 // has been done. If the gc time limit was exceeded the 687 // this time, return NULL so that an out-of-memory 688 // will be thrown. Clear gc_overhead_limit_exceeded 689 // so that the overhead exceeded does not persist. 690 691 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); 692 const bool softrefs_clear = all_soft_refs_clear(); 693 694 if (limit_exceeded && softrefs_clear) { 695 *gc_overhead_limit_was_exceeded = true; 696 size_policy()->set_gc_overhead_limit_exceeded(false); 697 if (op.result() != NULL) { 698 CollectedHeap::fill_with_object(op.result(), size); 699 } 700 return NULL; 701 } 702 assert(result == NULL || gch->is_in_reserved(result), 703 "result not in heap"); 704 return result; 705 } 706 707 // Give a warning if we seem to be looping forever. 708 if ((QueuedAllocationWarningCount > 0) && 709 (try_count % QueuedAllocationWarningCount == 0)) { 710 warning("GenCollectorPolicy::mem_allocate_work retries %d times \n\t" 711 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : ""); 712 } 713 } 714 } 715 716 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size, 717 bool is_tlab) { 718 GenCollectedHeap *gch = GenCollectedHeap::heap(); 719 HeapWord* result = NULL; 720 Generation *old = gch->old_gen(); 721 if (old->should_allocate(size, is_tlab)) { 722 result = old->expand_and_allocate(size, is_tlab); 723 } 724 if (result == NULL) { 725 Generation *young = gch->young_gen(); 726 if (young->should_allocate(size, is_tlab)) { 727 result = young->expand_and_allocate(size, is_tlab); 728 } 729 } 730 assert(result == NULL || gch->is_in_reserved(result), "result not in heap"); 731 return result; 732 } 733 734 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size, 735 bool is_tlab) { 736 GenCollectedHeap *gch = GenCollectedHeap::heap(); 737 GCCauseSetter x(gch, GCCause::_allocation_failure); 738 HeapWord* result = NULL; 739 740 assert(size != 0, "Precondition violated"); 741 if (GC_locker::is_active_and_needs_gc()) { 742 // GC locker is active; instead of a collection we will attempt 743 // to expand the heap, if there's room for expansion. 744 if (!gch->is_maximal_no_gc()) { 745 result = expand_heap_and_allocate(size, is_tlab); 746 } 747 return result; // Could be null if we are out of space. 748 } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) { 749 // Do an incremental collection. 750 gch->do_collection(false /* full */, 751 false /* clear_all_soft_refs */, 752 size /* size */, 753 is_tlab /* is_tlab */, 754 Generation::Old /* max_gen */); 755 } else { 756 if (Verbose && PrintGCDetails) { 757 gclog_or_tty->print(" :: Trying full because partial may fail :: "); 758 } 759 // Try a full collection; see delta for bug id 6266275 760 // for the original code and why this has been simplified 761 // with from-space allocation criteria modified and 762 // such allocation moved out of the safepoint path. 763 gch->do_collection(true /* full */, 764 false /* clear_all_soft_refs */, 765 size /* size */, 766 is_tlab /* is_tlab */, 767 Generation::Old /* max_gen */); 768 } 769 770 result = gch->attempt_allocation(size, is_tlab, false /*first_only*/); 771 772 if (result != NULL) { 773 assert(gch->is_in_reserved(result), "result not in heap"); 774 return result; 775 } 776 777 // OK, collection failed, try expansion. 778 result = expand_heap_and_allocate(size, is_tlab); 779 if (result != NULL) { 780 return result; 781 } 782 783 // If we reach this point, we're really out of memory. Try every trick 784 // we can to reclaim memory. Force collection of soft references. Force 785 // a complete compaction of the heap. Any additional methods for finding 786 // free memory should be here, especially if they are expensive. If this 787 // attempt fails, an OOM exception will be thrown. 788 { 789 UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted 790 791 gch->do_collection(true /* full */, 792 true /* clear_all_soft_refs */, 793 size /* size */, 794 is_tlab /* is_tlab */, 795 Generation::Old /* max_gen */); 796 } 797 798 result = gch->attempt_allocation(size, is_tlab, false /* first_only */); 799 if (result != NULL) { 800 assert(gch->is_in_reserved(result), "result not in heap"); 801 return result; 802 } 803 804 assert(!should_clear_all_soft_refs(), 805 "Flag should have been handled and cleared prior to this point"); 806 807 // What else? We might try synchronous finalization later. If the total 808 // space available is large enough for the allocation, then a more 809 // complete compaction phase than we've tried so far might be 810 // appropriate. 811 return NULL; 812 } 813 814 MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation( 815 ClassLoaderData* loader_data, 816 size_t word_size, 817 Metaspace::MetadataType mdtype) { 818 uint loop_count = 0; 819 uint gc_count = 0; 820 uint full_gc_count = 0; 821 822 assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock"); 823 824 do { 825 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 826 if (result != NULL) { 827 return result; 828 } 829 830 if (GC_locker::is_active_and_needs_gc()) { 831 // If the GC_locker is active, just expand and allocate. 832 // If that does not succeed, wait if this thread is not 833 // in a critical section itself. 834 result = 835 loader_data->metaspace_non_null()->expand_and_allocate(word_size, 836 mdtype); 837 if (result != NULL) { 838 return result; 839 } 840 JavaThread* jthr = JavaThread::current(); 841 if (!jthr->in_critical()) { 842 // Wait for JNI critical section to be exited 843 GC_locker::stall_until_clear(); 844 // The GC invoked by the last thread leaving the critical 845 // section will be a young collection and a full collection 846 // is (currently) needed for unloading classes so continue 847 // to the next iteration to get a full GC. 848 continue; 849 } else { 850 if (CheckJNICalls) { 851 fatal("Possible deadlock due to allocating while" 852 " in jni critical section"); 853 } 854 return NULL; 855 } 856 } 857 858 { // Need lock to get self consistent gc_count's 859 MutexLocker ml(Heap_lock); 860 gc_count = Universe::heap()->total_collections(); 861 full_gc_count = Universe::heap()->total_full_collections(); 862 } 863 864 // Generate a VM operation 865 VM_CollectForMetadataAllocation op(loader_data, 866 word_size, 867 mdtype, 868 gc_count, 869 full_gc_count, 870 GCCause::_metadata_GC_threshold); 871 VMThread::execute(&op); 872 873 // If GC was locked out, try again. Check before checking success because the 874 // prologue could have succeeded and the GC still have been locked out. 875 if (op.gc_locked()) { 876 continue; 877 } 878 879 if (op.prologue_succeeded()) { 880 return op.result(); 881 } 882 loop_count++; 883 if ((QueuedAllocationWarningCount > 0) && 884 (loop_count % QueuedAllocationWarningCount == 0)) { 885 warning("satisfy_failed_metadata_allocation() retries %d times \n\t" 886 " size=" SIZE_FORMAT, loop_count, word_size); 887 } 888 } while (true); // Until a GC is done 889 } 890 891 // Return true if any of the following is true: 892 // . the allocation won't fit into the current young gen heap 893 // . gc locker is occupied (jni critical section) 894 // . heap memory is tight -- the most recent previous collection 895 // was a full collection because a partial collection (would 896 // have) failed and is likely to fail again 897 bool GenCollectorPolicy::should_try_older_generation_allocation( 898 size_t word_size) const { 899 GenCollectedHeap* gch = GenCollectedHeap::heap(); 900 size_t young_capacity = gch->young_gen()->capacity_before_gc(); 901 return (word_size > heap_word_size(young_capacity)) 902 || GC_locker::is_active_and_needs_gc() 903 || gch->incremental_collection_failed(); 904 } 905 906 907 // 908 // MarkSweepPolicy methods 909 // 910 911 void MarkSweepPolicy::initialize_alignments() { 912 _space_alignment = _gen_alignment = (uintx)Generation::GenGrain; 913 _heap_alignment = compute_heap_alignment(); 914 } 915 916 void MarkSweepPolicy::initialize_generations() { 917 if (UseParNewGC) { 918 _young_gen_spec = new GenerationSpec(Generation::ParNew, _initial_young_size, _max_young_size, _gen_alignment); 919 } else { 920 _young_gen_spec = new GenerationSpec(Generation::DefNew, _initial_young_size, _max_young_size, _gen_alignment); 921 } 922 _old_gen_spec = new GenerationSpec(Generation::MarkSweepCompact, _initial_old_size, _max_old_size, _gen_alignment); 923 924 if (_young_gen_spec == NULL || _old_gen_spec == NULL) { 925 vm_exit_during_initialization("Unable to allocate gen spec"); 926 } 927 } 928 929 void MarkSweepPolicy::initialize_gc_policy_counters() { 930 // Initialize the policy counters - 2 collectors, 3 generations. 931 if (UseParNewGC) { 932 _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3); 933 } else { 934 _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3); 935 } 936 } 937 938 /////////////// Unit tests /////////////// 939 940 #ifndef PRODUCT 941 // Testing that the NewSize flag is handled correct is hard because it 942 // depends on so many other configurable variables. This test only tries to 943 // verify that there are some basic rules for NewSize honored by the policies. 944 class TestGenCollectorPolicy { 945 public: 946 static void test_new_size() { 947 size_t flag_value; 948 949 save_flags(); 950 951 // If NewSize is set on the command line, it should be used 952 // for both min and initial young size if less than min heap. 953 flag_value = 20 * M; 954 set_basic_flag_values(); 955 FLAG_SET_CMDLINE(uintx, NewSize, flag_value); 956 verify_young_min(flag_value); 957 958 set_basic_flag_values(); 959 FLAG_SET_CMDLINE(uintx, NewSize, flag_value); 960 verify_young_initial(flag_value); 961 962 // If NewSize is set on command line, but is larger than the min 963 // heap size, it should only be used for initial young size. 964 flag_value = 80 * M; 965 set_basic_flag_values(); 966 FLAG_SET_CMDLINE(uintx, NewSize, flag_value); 967 verify_young_initial(flag_value); 968 969 // If NewSize has been ergonomically set, the collector policy 970 // should use it for min but calculate the initial young size 971 // using NewRatio. 972 flag_value = 20 * M; 973 set_basic_flag_values(); 974 FLAG_SET_ERGO(uintx, NewSize, flag_value); 975 verify_young_min(flag_value); 976 977 set_basic_flag_values(); 978 FLAG_SET_ERGO(uintx, NewSize, flag_value); 979 verify_scaled_young_initial(InitialHeapSize); 980 981 restore_flags(); 982 } 983 984 static void test_old_size() { 985 size_t flag_value; 986 987 save_flags(); 988 989 // If OldSize is set on the command line, it should be used 990 // for both min and initial old size if less than min heap. 991 flag_value = 20 * M; 992 set_basic_flag_values(); 993 FLAG_SET_CMDLINE(uintx, OldSize, flag_value); 994 verify_old_min(flag_value); 995 996 set_basic_flag_values(); 997 FLAG_SET_CMDLINE(uintx, OldSize, flag_value); 998 verify_old_initial(flag_value); 999 1000 // If MaxNewSize is large, the maximum OldSize will be less than 1001 // what's requested on the command line and it should be reset 1002 // ergonomically. 1003 flag_value = 30 * M; 1004 set_basic_flag_values(); 1005 FLAG_SET_CMDLINE(uintx, OldSize, flag_value); 1006 FLAG_SET_CMDLINE(uintx, MaxNewSize, 170*M); 1007 // Calculate what we expect the flag to be. 1008 flag_value = MaxHeapSize - MaxNewSize; 1009 verify_old_initial(flag_value); 1010 1011 } 1012 1013 static void verify_young_min(size_t expected) { 1014 MarkSweepPolicy msp; 1015 msp.initialize_all(); 1016 1017 assert(msp.min_young_size() <= expected, err_msg("%zu > %zu", msp.min_young_size(), expected)); 1018 } 1019 1020 static void verify_young_initial(size_t expected) { 1021 MarkSweepPolicy msp; 1022 msp.initialize_all(); 1023 1024 assert(msp.initial_young_size() == expected, err_msg("%zu != %zu", msp.initial_young_size(), expected)); 1025 } 1026 1027 static void verify_scaled_young_initial(size_t initial_heap_size) { 1028 MarkSweepPolicy msp; 1029 msp.initialize_all(); 1030 1031 size_t expected = msp.scale_by_NewRatio_aligned(initial_heap_size); 1032 assert(msp.initial_young_size() == expected, err_msg("%zu != %zu", msp.initial_young_size(), expected)); 1033 assert(FLAG_IS_ERGO(NewSize) && NewSize == expected, 1034 err_msg("NewSize should have been set ergonomically to %zu, but was %zu", expected, NewSize)); 1035 } 1036 1037 static void verify_old_min(size_t expected) { 1038 MarkSweepPolicy msp; 1039 msp.initialize_all(); 1040 1041 assert(msp.min_old_size() <= expected, err_msg("%zu > %zu", msp.min_old_size(), expected)); 1042 } 1043 1044 static void verify_old_initial(size_t expected) { 1045 MarkSweepPolicy msp; 1046 msp.initialize_all(); 1047 1048 assert(msp.initial_old_size() == expected, err_msg("%zu != %zu", msp.initial_old_size(), expected)); 1049 } 1050 1051 1052 private: 1053 static size_t original_InitialHeapSize; 1054 static size_t original_MaxHeapSize; 1055 static size_t original_MaxNewSize; 1056 static size_t original_MinHeapDeltaBytes; 1057 static size_t original_NewSize; 1058 static size_t original_OldSize; 1059 1060 static void set_basic_flag_values() { 1061 FLAG_SET_ERGO(uintx, MaxHeapSize, 180 * M); 1062 FLAG_SET_ERGO(uintx, InitialHeapSize, 100 * M); 1063 FLAG_SET_ERGO(uintx, OldSize, 4 * M); 1064 FLAG_SET_ERGO(uintx, NewSize, 1 * M); 1065 FLAG_SET_ERGO(uintx, MaxNewSize, 80 * M); 1066 Arguments::set_min_heap_size(40 * M); 1067 } 1068 1069 static void save_flags() { 1070 original_InitialHeapSize = InitialHeapSize; 1071 original_MaxHeapSize = MaxHeapSize; 1072 original_MaxNewSize = MaxNewSize; 1073 original_MinHeapDeltaBytes = MinHeapDeltaBytes; 1074 original_NewSize = NewSize; 1075 original_OldSize = OldSize; 1076 } 1077 1078 static void restore_flags() { 1079 InitialHeapSize = original_InitialHeapSize; 1080 MaxHeapSize = original_MaxHeapSize; 1081 MaxNewSize = original_MaxNewSize; 1082 MinHeapDeltaBytes = original_MinHeapDeltaBytes; 1083 NewSize = original_NewSize; 1084 OldSize = original_OldSize; 1085 } 1086 }; 1087 1088 size_t TestGenCollectorPolicy::original_InitialHeapSize = 0; 1089 size_t TestGenCollectorPolicy::original_MaxHeapSize = 0; 1090 size_t TestGenCollectorPolicy::original_MaxNewSize = 0; 1091 size_t TestGenCollectorPolicy::original_MinHeapDeltaBytes = 0; 1092 size_t TestGenCollectorPolicy::original_NewSize = 0; 1093 size_t TestGenCollectorPolicy::original_OldSize = 0; 1094 1095 void TestNewSize_test() { 1096 TestGenCollectorPolicy::test_new_size(); 1097 } 1098 1099 void TestOldSize_test() { 1100 TestGenCollectorPolicy::test_old_size(); 1101 } 1102 1103 #endif