433 };
434
435 class DoubleFlagSetting {
436 double val;
437 double* flag;
438 public:
439 DoubleFlagSetting(double& fl, double newValue) { flag = &fl; val = fl; fl = newValue; }
440 ~DoubleFlagSetting() { *flag = val; }
441 };
442
443 class SizeTFlagSetting {
444 size_t val;
445 size_t* flag;
446 public:
447 SizeTFlagSetting(size_t& fl, size_t newValue) { flag = &fl; val = fl; fl = newValue; }
448 ~SizeTFlagSetting() { *flag = val; }
449 };
450
451
452 class CommandLineFlags {
453 static bool _finished_initializing;
454 public:
455 static Flag::Error boolAt(const char* name, size_t len, bool* value, bool allow_locked = false, bool return_flag = false);
456 static Flag::Error boolAt(const char* name, bool* value, bool allow_locked = false, bool return_flag = false) { return boolAt(name, strlen(name), value, allow_locked, return_flag); }
457 static Flag::Error boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin);
458 static Flag::Error boolAtPut(const char* name, bool* value, Flag::Flags origin) { return boolAtPut(name, strlen(name), value, origin); }
459
460 static Flag::Error intAt(const char* name, size_t len, int* value, bool allow_locked = false, bool return_flag = false);
461 static Flag::Error intAt(const char* name, int* value, bool allow_locked = false, bool return_flag = false) { return intAt(name, strlen(name), value, allow_locked, return_flag); }
462 static Flag::Error intAtPut(const char* name, size_t len, int* value, Flag::Flags origin);
463 static Flag::Error intAtPut(const char* name, int* value, Flag::Flags origin) { return intAtPut(name, strlen(name), value, origin); }
464
465 static Flag::Error uintAt(const char* name, size_t len, uint* value, bool allow_locked = false, bool return_flag = false);
466 static Flag::Error uintAt(const char* name, uint* value, bool allow_locked = false, bool return_flag = false) { return uintAt(name, strlen(name), value, allow_locked, return_flag); }
467 static Flag::Error uintAtPut(const char* name, size_t len, uint* value, Flag::Flags origin);
468 static Flag::Error uintAtPut(const char* name, uint* value, Flag::Flags origin) { return uintAtPut(name, strlen(name), value, origin); }
469
470 static Flag::Error intxAt(const char* name, size_t len, intx* value, bool allow_locked = false, bool return_flag = false);
471 static Flag::Error intxAt(const char* name, intx* value, bool allow_locked = false, bool return_flag = false) { return intxAt(name, strlen(name), value, allow_locked, return_flag); }
472 static Flag::Error intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin);
473 static Flag::Error intxAtPut(const char* name, intx* value, Flag::Flags origin) { return intxAtPut(name, strlen(name), value, origin); }
489
490 static Flag::Error doubleAt(const char* name, size_t len, double* value, bool allow_locked = false, bool return_flag = false);
491 static Flag::Error doubleAt(const char* name, double* value, bool allow_locked = false, bool return_flag = false) { return doubleAt(name, strlen(name), value, allow_locked, return_flag); }
492 static Flag::Error doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin);
493 static Flag::Error doubleAtPut(const char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); }
494
495 static Flag::Error ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked = false, bool return_flag = false);
496 static Flag::Error ccstrAt(const char* name, ccstr* value, bool allow_locked = false, bool return_flag = false) { return ccstrAt(name, strlen(name), value, allow_locked, return_flag); }
497 // Contract: Flag will make private copy of the incoming value.
498 // Outgoing value is always malloc-ed, and caller MUST call free.
499 static Flag::Error ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin);
500 static Flag::Error ccstrAtPut(const char* name, ccstr* value, Flag::Flags origin) { return ccstrAtPut(name, strlen(name), value, origin); }
501
502 // Returns false if name is not a command line flag.
503 static bool wasSetOnCmdline(const char* name, bool* value);
504 static void printSetFlags(outputStream* out);
505
506 // printRanges will print out flags type, name and range values as expected by -XX:+PrintFlagsRanges
507 static void printFlags(outputStream* out, bool withComments, bool printRanges = false);
508
509 // Returns true if all flags have their final values set (ready for ranges and constraint check)
510 static bool finishedInitializing() { return _finished_initializing; }
511
512 // Check the final values of all flags for ranges and constraints
513 static bool check_all_ranges_and_constraints();
514
515 static void verify() PRODUCT_RETURN;
516 };
517
518 // use this for flags that are true by default in the debug version but
519 // false in the optimized version, and vice versa
520 #ifdef ASSERT
521 #define trueInDebug true
522 #define falseInDebug false
523 #else
524 #define trueInDebug false
525 #define falseInDebug true
526 #endif
527
528 // use this for flags that are true per default in the product build
529 // but false in development builds, and vice versa
530 #ifdef PRODUCT
531 #define trueInProduct true
532 #define falseInProduct false
533 #else
623 "lp64_product means flag is always constant in 32 bit VM") \
624 \
625 notproduct(bool, CheckCompressedOops, true, \
626 "Generate checks in encoding/decoding code in debug VM") \
627 \
628 product_pd(size_t, HeapBaseMinAddress, \
629 "OS specific low limit for heap base address") \
630 \
631 product(uintx, HeapSearchSteps, 3 PPC64_ONLY(+17), \
632 "Heap allocation steps through preferred address regions to find" \
633 " where it can allocate the heap. Number of steps to take per " \
634 "region.") \
635 range(1, max_uintx) \
636 \
637 diagnostic(bool, PrintCompressedOopsMode, false, \
638 "Print compressed oops base address and encoding mode") \
639 \
640 lp64_product(intx, ObjectAlignmentInBytes, 8, \
641 "Default object alignment in bytes, 8 is minimum") \
642 range(8, 256) \
643 constraint(ObjectAlignmentInBytesConstraintFunc) \
644 \
645 product(bool, AssumeMP, false, \
646 "Instruct the VM to assume multiple processors are available") \
647 \
648 /* UseMembar is theoretically a temp flag used for memory barrier */ \
649 /* removal testing. It was supposed to be removed before FCS but has */ \
650 /* been re-added (see 6401008) */ \
651 product_pd(bool, UseMembar, \
652 "(Unstable) Issues membars on thread state transitions") \
653 \
654 develop(bool, CleanChunkPoolAsync, falseInEmbedded, \
655 "Clean the chunk pool asynchronously") \
656 \
657 experimental(bool, AlwaysSafeConstructors, false, \
658 "Force safe construction, as if all fields are final.") \
659 \
660 /* Temporary: See 6948537 */ \
661 experimental(bool, UseMemSetInBOT, true, \
662 "(Unstable) uses memset in BOT updates in GC code") \
663 \
1377 \
1378 product(intx, FieldsAllocationStyle, 1, \
1379 "0 - type based with oops first, " \
1380 "1 - with oops last, " \
1381 "2 - oops in super and sub classes are together") \
1382 range(0, 2) \
1383 \
1384 product(bool, CompactFields, true, \
1385 "Allocate nonstatic fields in gaps between previous fields") \
1386 \
1387 notproduct(bool, PrintFieldLayout, false, \
1388 "Print field layout for each class") \
1389 \
1390 /* Need to limit the extent of the padding to reasonable size. */\
1391 /* 8K is well beyond the reasonable HW cache line size, even with */\
1392 /* aggressive prefetching, while still leaving the room for segregating */\
1393 /* among the distinct pages. */\
1394 product(intx, ContendedPaddingWidth, 128, \
1395 "How many bytes to pad the fields/classes marked @Contended with")\
1396 range(0, 8192) \
1397 constraint(ContendedPaddingWidthConstraintFunc) \
1398 \
1399 product(bool, EnableContended, true, \
1400 "Enable @Contended annotation support") \
1401 \
1402 product(bool, RestrictContended, true, \
1403 "Restrict @Contended to trusted classes") \
1404 \
1405 product(bool, UseBiasedLocking, true, \
1406 "Enable biased locking in JVM") \
1407 \
1408 product(intx, BiasedLockingStartupDelay, 4000, \
1409 "Number of milliseconds to wait before enabling biased locking") \
1410 \
1411 diagnostic(bool, PrintBiasedLockingStatistics, false, \
1412 "Print statistics of biased locking in JVM") \
1413 \
1414 product(intx, BiasedLockingBulkRebiasThreshold, 20, \
1415 "Threshold of number of revocations per type to try to " \
1416 "rebias all objects in the heap of that type") \
1417 \
1578 "Size of heap (bytes) per GC thread used in calculating the " \
1579 "number of GC threads") \
1580 range((uintx)os::vm_page_size(), max_uintx) \
1581 \
1582 product(bool, TraceDynamicGCThreads, false, \
1583 "Trace the dynamic GC thread usage") \
1584 \
1585 develop(bool, ParallelOldGCSplitALot, false, \
1586 "Provoke splitting (copying data from a young gen space to " \
1587 "multiple destination spaces)") \
1588 \
1589 develop(uintx, ParallelOldGCSplitInterval, 3, \
1590 "How often to provoke splitting a young gen space") \
1591 range(0, max_uintx) \
1592 \
1593 product(uint, ConcGCThreads, 0, \
1594 "Number of threads concurrent gc will use") \
1595 \
1596 product(size_t, YoungPLABSize, 4096, \
1597 "Size of young gen promotion LAB's (in HeapWords)") \
1598 \
1599 product(size_t, OldPLABSize, 1024, \
1600 "Size of old gen promotion LAB's (in HeapWords), or Number \
1601 of blocks to attempt to claim when refilling CMS LAB's") \
1602 \
1603 product(uintx, GCTaskTimeStampEntries, 200, \
1604 "Number of time stamp entries per gc worker thread") \
1605 range(1, max_uintx) \
1606 \
1607 product(bool, AlwaysTenure, false, \
1608 "Always tenure objects in eden (ParallelGC only)") \
1609 \
1610 product(bool, NeverTenure, false, \
1611 "Never tenure objects in eden, may tenure on overflow " \
1612 "(ParallelGC only)") \
1613 \
1614 product(bool, ScavengeBeforeFullGC, true, \
1615 "Scavenge youngest generation before each full GC.") \
1616 \
1617 develop(bool, ScavengeWithObjectsInToSpace, false, \
1716 "Percentage (0-100) used to weight the current sample when " \
1717 "computing exponentially decaying average for resizing " \
1718 "OldPLABSize") \
1719 range(0, 100) \
1720 \
1721 product(bool, ResizeOldPLAB, true, \
1722 "Dynamically resize (old gen) promotion LAB's") \
1723 \
1724 product(bool, PrintOldPLAB, false, \
1725 "Print (old gen) promotion LAB's sizing decisions") \
1726 \
1727 product(size_t, CMSOldPLABMax, 1024, \
1728 "Maximum size of CMS gen promotion LAB caches per worker " \
1729 "per block size") \
1730 range(1, max_uintx) \
1731 \
1732 product(size_t, CMSOldPLABMin, 16, \
1733 "Minimum size of CMS gen promotion LAB caches per worker " \
1734 "per block size") \
1735 range(1, max_uintx) \
1736 constraint(CMSOldPLABMinConstraintFunc) \
1737 \
1738 product(uintx, CMSOldPLABNumRefills, 4, \
1739 "Nominal number of refills of CMS gen promotion LAB cache " \
1740 "per worker per block size") \
1741 range(1, max_uintx) \
1742 \
1743 product(bool, CMSOldPLABResizeQuicker, false, \
1744 "React on-the-fly during a scavenge to a sudden " \
1745 "change in block demand rate") \
1746 \
1747 product(uintx, CMSOldPLABToleranceFactor, 4, \
1748 "The tolerance of the phase-change detector for on-the-fly " \
1749 "PLAB resizing during a scavenge") \
1750 range(1, max_uintx) \
1751 \
1752 product(uintx, CMSOldPLABReactivityFactor, 2, \
1753 "The gain in the feedback loop for on-the-fly PLAB resizing " \
1754 "during a scavenge") \
1755 \
1756 product(bool, AlwaysPreTouch, false, \
1912 \
1913 product(bool, CMSPrintEdenSurvivorChunks, false, \
1914 "Print the eden and the survivor chunks used for the parallel " \
1915 "initial mark or remark of the eden/survivor spaces") \
1916 \
1917 product(bool, CMSConcurrentMTEnabled, true, \
1918 "Whether multi-threaded concurrent work enabled " \
1919 "(effective only if ParNewGC)") \
1920 \
1921 product(bool, CMSPrecleaningEnabled, true, \
1922 "Whether concurrent precleaning enabled") \
1923 \
1924 product(uintx, CMSPrecleanIter, 3, \
1925 "Maximum number of precleaning iteration passes") \
1926 range(0, 9) \
1927 \
1928 product(uintx, CMSPrecleanDenominator, 3, \
1929 "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \
1930 "ratio") \
1931 range(1, max_uintx) \
1932 constraint(CMSPrecleanDenominatorConstraintFunc) \
1933 \
1934 product(uintx, CMSPrecleanNumerator, 2, \
1935 "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \
1936 "ratio") \
1937 range(0, max_uintx-1) \
1938 constraint(CMSPrecleanNumeratorConstraintFunc) \
1939 \
1940 product(bool, CMSPrecleanRefLists1, true, \
1941 "Preclean ref lists during (initial) preclean phase") \
1942 \
1943 product(bool, CMSPrecleanRefLists2, false, \
1944 "Preclean ref lists during abortable preclean phase") \
1945 \
1946 product(bool, CMSPrecleanSurvivors1, false, \
1947 "Preclean survivors during (initial) preclean phase") \
1948 \
1949 product(bool, CMSPrecleanSurvivors2, true, \
1950 "Preclean survivors during abortable preclean phase") \
1951 \
1952 product(uintx, CMSPrecleanThreshold, 1000, \
1953 "Do not iterate again if number of dirty cards is less than this")\
1954 range(100, max_uintx) \
1955 \
1956 product(bool, CMSCleanOnEnter, true, \
1957 "Clean-on-enter optimization for reducing number of dirty cards") \
1958 \
3336 product_pd(size_t, NewSizeThreadIncrease, \
3337 "Additional size added to desired new generation size per " \
3338 "non-daemon thread (in bytes)") \
3339 \
3340 product_pd(size_t, MetaspaceSize, \
3341 "Initial size of Metaspaces (in bytes)") \
3342 \
3343 product(size_t, MaxMetaspaceSize, max_uintx, \
3344 "Maximum size of Metaspaces (in bytes)") \
3345 \
3346 product(size_t, CompressedClassSpaceSize, 1*G, \
3347 "Maximum size of class area in Metaspace when compressed " \
3348 "class pointers are used") \
3349 range(1*M, 3*G) \
3350 \
3351 manageable(uintx, MinHeapFreeRatio, 40, \
3352 "The minimum percentage of heap free after GC to avoid expansion."\
3353 " For most GCs this applies to the old generation. In G1 and" \
3354 " ParallelGC it applies to the whole heap.") \
3355 range(0, 100) \
3356 constraint(MinHeapFreeRatioConstraintFunc) \
3357 \
3358 manageable(uintx, MaxHeapFreeRatio, 70, \
3359 "The maximum percentage of heap free after GC to avoid shrinking."\
3360 " For most GCs this applies to the old generation. In G1 and" \
3361 " ParallelGC it applies to the whole heap.") \
3362 range(0, 100) \
3363 constraint(MaxHeapFreeRatioConstraintFunc) \
3364 \
3365 product(intx, SoftRefLRUPolicyMSPerMB, 1000, \
3366 "Number of milliseconds per MB of free space in the heap") \
3367 \
3368 product(size_t, MinHeapDeltaBytes, ScaleForWordSize(128*K), \
3369 "The minimum change in heap space due to GC (in bytes)") \
3370 \
3371 product(size_t, MinMetaspaceExpansion, ScaleForWordSize(256*K), \
3372 "The minimum expansion of Metaspace (in bytes)") \
3373 \
3374 product(uintx, MaxMetaspaceFreeRatio, 70, \
3375 "The maximum percentage of Metaspace free after GC to avoid " \
3376 "shrinking") \
3377 range(0, 100) \
3378 constraint(MaxMetaspaceFreeRatioConstraintFunc) \
3379 \
3380 product(uintx, MinMetaspaceFreeRatio, 40, \
3381 "The minimum percentage of Metaspace free after GC to avoid " \
3382 "expansion") \
3383 range(0, 99) \
3384 constraint(MinMetaspaceFreeRatioConstraintFunc) \
3385 \
3386 product(size_t, MaxMetaspaceExpansion, ScaleForWordSize(4*M), \
3387 "The maximum expansion of Metaspace without full GC (in bytes)") \
3388 \
3389 product(uintx, QueuedAllocationWarningCount, 0, \
3390 "Number of times an allocation that queues behind a GC " \
3391 "will retry before printing a warning") \
3392 \
3393 diagnostic(uintx, VerifyGCStartAt, 0, \
3394 "GC invoke count where +VerifyBefore/AfterGC kicks in") \
3395 \
3396 diagnostic(intx, VerifyGCLevel, 0, \
3397 "Generation level at which to start +VerifyBefore/AfterGC") \
3398 \
3399 product(uintx, MaxTenuringThreshold, 15, \
3400 "Maximum value for tenuring threshold") \
3401 range(0, markOopDesc::max_age + 1) \
3402 constraint(MaxTenuringThresholdConstraintFunc) \
3403 \
3404 product(uintx, InitialTenuringThreshold, 7, \
3405 "Initial value for tenuring threshold") \
3406 range(0, markOopDesc::max_age + 1) \
3407 constraint(InitialTenuringThresholdConstraintFunc) \
3408 \
3409 product(uintx, TargetSurvivorRatio, 50, \
3410 "Desired percentage of survivor space used after scavenge") \
3411 range(0, 100) \
3412 \
3413 product(uintx, MarkSweepDeadRatio, 5, \
3414 "Percentage (0-100) of the old gen allowed as dead wood. " \
3415 "Serial mark sweep treats this as both the minimum and maximum " \
3416 "value. " \
3417 "CMS uses this value only if it falls back to mark sweep. " \
3418 "Par compact uses a variable scale based on the density of the " \
3419 "generation and treats this as the maximum value when the heap " \
3420 "is either completely full or completely empty. Par compact " \
3421 "also has a smaller default value; see arguments.cpp.") \
3422 range(0, 100) \
3423 \
3424 product(uintx, MarkSweepAlwaysCompactCount, 4, \
3425 "How often should we fully compact the heap (ignoring the dead " \
3426 "space parameters)") \
3427 range(1, max_uintx) \
4065 "to be considered for deduplication") \
4066 range(1, markOopDesc::max_age) \
4067 \
4068 diagnostic(bool, StringDeduplicationResizeALot, false, \
4069 "Force table resize every time the table is scanned") \
4070 \
4071 diagnostic(bool, StringDeduplicationRehashALot, false, \
4072 "Force table rehash every time the table is scanned") \
4073 \
4074 develop(bool, TraceDefaultMethods, false, \
4075 "Trace the default method processing steps") \
4076 \
4077 diagnostic(bool, WhiteBoxAPI, false, \
4078 "Enable internal testing APIs") \
4079 \
4080 product(bool, PrintGCCause, true, \
4081 "Include GC cause in GC logging") \
4082 \
4083 experimental(intx, SurvivorAlignmentInBytes, 0, \
4084 "Default survivor space alignment in bytes") \
4085 constraint(SurvivorAlignmentInBytesConstraintFunc) \
4086 \
4087 product(bool , AllowNonVirtualCalls, false, \
4088 "Obey the ACC_SUPER flag and allow invokenonvirtual calls") \
4089 \
4090 product(ccstr, DumpLoadedClassList, NULL, \
4091 "Dump the names all loaded classes, that could be stored into " \
4092 "the CDS archive, in the specified file") \
4093 \
4094 product(ccstr, SharedClassListFile, NULL, \
4095 "Override the default CDS class list") \
4096 \
4097 diagnostic(ccstr, SharedArchiveFile, NULL, \
4098 "Override the default location of the CDS archive file") \
4099 \
4100 product(ccstr, ExtraSharedClassListFile, NULL, \
4101 "Extra classlist for building the CDS archive file") \
4102 \
4103 experimental(size_t, ArrayAllocatorMallocLimit, \
4104 SOLARIS_ONLY(64*K) NOT_SOLARIS((size_t)-1), \
4105 "Allocation less than this value will be allocated " \
4165 #define MATERIALIZE_MANAGEABLE_FLAG(type, name, value, doc) type name = value;
4166 #define MATERIALIZE_PRODUCT_RW_FLAG(type, name, value, doc) type name = value;
4167 #ifdef PRODUCT
4168 #define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type CONST_##name = value;
4169 #define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) type CONST_##name = pd_##name;
4170 #define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type CONST_##name = value;
4171 #else
4172 #define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type name = value;
4173 #define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) type name = pd_##name;
4174 #define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type name = value;
4175 #endif // PRODUCT
4176 #ifdef _LP64
4177 #define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) type name = value;
4178 #else
4179 #define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) /* flag is constant */
4180 #endif // _LP64
4181
4182 // Only materialize src code for range checking when required, ignore otherwise
4183 #define IGNORE_RANGE(a, b)
4184 // Only materialize src code for contraint checking when required, ignore otherwise
4185 #define IGNORE_CONSTRAINT(func)
4186
4187 RUNTIME_FLAGS(DECLARE_DEVELOPER_FLAG, \
4188 DECLARE_PD_DEVELOPER_FLAG, \
4189 DECLARE_PRODUCT_FLAG, \
4190 DECLARE_PD_PRODUCT_FLAG, \
4191 DECLARE_DIAGNOSTIC_FLAG, \
4192 DECLARE_EXPERIMENTAL_FLAG, \
4193 DECLARE_NOTPRODUCT_FLAG, \
4194 DECLARE_MANAGEABLE_FLAG, \
4195 DECLARE_PRODUCT_RW_FLAG, \
4196 DECLARE_LP64_PRODUCT_FLAG, \
4197 IGNORE_RANGE, \
4198 IGNORE_CONSTRAINT)
4199
4200 RUNTIME_OS_FLAGS(DECLARE_DEVELOPER_FLAG, \
4201 DECLARE_PD_DEVELOPER_FLAG, \
4202 DECLARE_PRODUCT_FLAG, \
4203 DECLARE_PD_PRODUCT_FLAG, \
4204 DECLARE_DIAGNOSTIC_FLAG, \
4205 DECLARE_NOTPRODUCT_FLAG, \
|
433 };
434
435 class DoubleFlagSetting {
436 double val;
437 double* flag;
438 public:
439 DoubleFlagSetting(double& fl, double newValue) { flag = &fl; val = fl; fl = newValue; }
440 ~DoubleFlagSetting() { *flag = val; }
441 };
442
443 class SizeTFlagSetting {
444 size_t val;
445 size_t* flag;
446 public:
447 SizeTFlagSetting(size_t& fl, size_t newValue) { flag = &fl; val = fl; fl = newValue; }
448 ~SizeTFlagSetting() { *flag = val; }
449 };
450
451
452 class CommandLineFlags {
453 public:
454 static Flag::Error boolAt(const char* name, size_t len, bool* value, bool allow_locked = false, bool return_flag = false);
455 static Flag::Error boolAt(const char* name, bool* value, bool allow_locked = false, bool return_flag = false) { return boolAt(name, strlen(name), value, allow_locked, return_flag); }
456 static Flag::Error boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin);
457 static Flag::Error boolAtPut(const char* name, bool* value, Flag::Flags origin) { return boolAtPut(name, strlen(name), value, origin); }
458
459 static Flag::Error intAt(const char* name, size_t len, int* value, bool allow_locked = false, bool return_flag = false);
460 static Flag::Error intAt(const char* name, int* value, bool allow_locked = false, bool return_flag = false) { return intAt(name, strlen(name), value, allow_locked, return_flag); }
461 static Flag::Error intAtPut(const char* name, size_t len, int* value, Flag::Flags origin);
462 static Flag::Error intAtPut(const char* name, int* value, Flag::Flags origin) { return intAtPut(name, strlen(name), value, origin); }
463
464 static Flag::Error uintAt(const char* name, size_t len, uint* value, bool allow_locked = false, bool return_flag = false);
465 static Flag::Error uintAt(const char* name, uint* value, bool allow_locked = false, bool return_flag = false) { return uintAt(name, strlen(name), value, allow_locked, return_flag); }
466 static Flag::Error uintAtPut(const char* name, size_t len, uint* value, Flag::Flags origin);
467 static Flag::Error uintAtPut(const char* name, uint* value, Flag::Flags origin) { return uintAtPut(name, strlen(name), value, origin); }
468
469 static Flag::Error intxAt(const char* name, size_t len, intx* value, bool allow_locked = false, bool return_flag = false);
470 static Flag::Error intxAt(const char* name, intx* value, bool allow_locked = false, bool return_flag = false) { return intxAt(name, strlen(name), value, allow_locked, return_flag); }
471 static Flag::Error intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin);
472 static Flag::Error intxAtPut(const char* name, intx* value, Flag::Flags origin) { return intxAtPut(name, strlen(name), value, origin); }
488
489 static Flag::Error doubleAt(const char* name, size_t len, double* value, bool allow_locked = false, bool return_flag = false);
490 static Flag::Error doubleAt(const char* name, double* value, bool allow_locked = false, bool return_flag = false) { return doubleAt(name, strlen(name), value, allow_locked, return_flag); }
491 static Flag::Error doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin);
492 static Flag::Error doubleAtPut(const char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); }
493
494 static Flag::Error ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked = false, bool return_flag = false);
495 static Flag::Error ccstrAt(const char* name, ccstr* value, bool allow_locked = false, bool return_flag = false) { return ccstrAt(name, strlen(name), value, allow_locked, return_flag); }
496 // Contract: Flag will make private copy of the incoming value.
497 // Outgoing value is always malloc-ed, and caller MUST call free.
498 static Flag::Error ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin);
499 static Flag::Error ccstrAtPut(const char* name, ccstr* value, Flag::Flags origin) { return ccstrAtPut(name, strlen(name), value, origin); }
500
501 // Returns false if name is not a command line flag.
502 static bool wasSetOnCmdline(const char* name, bool* value);
503 static void printSetFlags(outputStream* out);
504
505 // printRanges will print out flags type, name and range values as expected by -XX:+PrintFlagsRanges
506 static void printFlags(outputStream* out, bool withComments, bool printRanges = false);
507
508 // Check the final values of all flags for ranges and all flags for 'AfterParse' constraints
509 static bool check_ranges_and_constraints_of_after_parse();
510
511 // Check the final values of all flags for 'AfterMemoryInit' constraints
512 static bool check_constraints_of_after_memory_init();
513
514 static void verify() PRODUCT_RETURN;
515 };
516
517 // use this for flags that are true by default in the debug version but
518 // false in the optimized version, and vice versa
519 #ifdef ASSERT
520 #define trueInDebug true
521 #define falseInDebug false
522 #else
523 #define trueInDebug false
524 #define falseInDebug true
525 #endif
526
527 // use this for flags that are true per default in the product build
528 // but false in development builds, and vice versa
529 #ifdef PRODUCT
530 #define trueInProduct true
531 #define falseInProduct false
532 #else
622 "lp64_product means flag is always constant in 32 bit VM") \
623 \
624 notproduct(bool, CheckCompressedOops, true, \
625 "Generate checks in encoding/decoding code in debug VM") \
626 \
627 product_pd(size_t, HeapBaseMinAddress, \
628 "OS specific low limit for heap base address") \
629 \
630 product(uintx, HeapSearchSteps, 3 PPC64_ONLY(+17), \
631 "Heap allocation steps through preferred address regions to find" \
632 " where it can allocate the heap. Number of steps to take per " \
633 "region.") \
634 range(1, max_uintx) \
635 \
636 diagnostic(bool, PrintCompressedOopsMode, false, \
637 "Print compressed oops base address and encoding mode") \
638 \
639 lp64_product(intx, ObjectAlignmentInBytes, 8, \
640 "Default object alignment in bytes, 8 is minimum") \
641 range(8, 256) \
642 constraint(ObjectAlignmentInBytesConstraintFunc,Anytime) \
643 \
644 product(bool, AssumeMP, false, \
645 "Instruct the VM to assume multiple processors are available") \
646 \
647 /* UseMembar is theoretically a temp flag used for memory barrier */ \
648 /* removal testing. It was supposed to be removed before FCS but has */ \
649 /* been re-added (see 6401008) */ \
650 product_pd(bool, UseMembar, \
651 "(Unstable) Issues membars on thread state transitions") \
652 \
653 develop(bool, CleanChunkPoolAsync, falseInEmbedded, \
654 "Clean the chunk pool asynchronously") \
655 \
656 experimental(bool, AlwaysSafeConstructors, false, \
657 "Force safe construction, as if all fields are final.") \
658 \
659 /* Temporary: See 6948537 */ \
660 experimental(bool, UseMemSetInBOT, true, \
661 "(Unstable) uses memset in BOT updates in GC code") \
662 \
1376 \
1377 product(intx, FieldsAllocationStyle, 1, \
1378 "0 - type based with oops first, " \
1379 "1 - with oops last, " \
1380 "2 - oops in super and sub classes are together") \
1381 range(0, 2) \
1382 \
1383 product(bool, CompactFields, true, \
1384 "Allocate nonstatic fields in gaps between previous fields") \
1385 \
1386 notproduct(bool, PrintFieldLayout, false, \
1387 "Print field layout for each class") \
1388 \
1389 /* Need to limit the extent of the padding to reasonable size. */\
1390 /* 8K is well beyond the reasonable HW cache line size, even with */\
1391 /* aggressive prefetching, while still leaving the room for segregating */\
1392 /* among the distinct pages. */\
1393 product(intx, ContendedPaddingWidth, 128, \
1394 "How many bytes to pad the fields/classes marked @Contended with")\
1395 range(0, 8192) \
1396 constraint(ContendedPaddingWidthConstraintFunc,Anytime) \
1397 \
1398 product(bool, EnableContended, true, \
1399 "Enable @Contended annotation support") \
1400 \
1401 product(bool, RestrictContended, true, \
1402 "Restrict @Contended to trusted classes") \
1403 \
1404 product(bool, UseBiasedLocking, true, \
1405 "Enable biased locking in JVM") \
1406 \
1407 product(intx, BiasedLockingStartupDelay, 4000, \
1408 "Number of milliseconds to wait before enabling biased locking") \
1409 \
1410 diagnostic(bool, PrintBiasedLockingStatistics, false, \
1411 "Print statistics of biased locking in JVM") \
1412 \
1413 product(intx, BiasedLockingBulkRebiasThreshold, 20, \
1414 "Threshold of number of revocations per type to try to " \
1415 "rebias all objects in the heap of that type") \
1416 \
1577 "Size of heap (bytes) per GC thread used in calculating the " \
1578 "number of GC threads") \
1579 range((uintx)os::vm_page_size(), max_uintx) \
1580 \
1581 product(bool, TraceDynamicGCThreads, false, \
1582 "Trace the dynamic GC thread usage") \
1583 \
1584 develop(bool, ParallelOldGCSplitALot, false, \
1585 "Provoke splitting (copying data from a young gen space to " \
1586 "multiple destination spaces)") \
1587 \
1588 develop(uintx, ParallelOldGCSplitInterval, 3, \
1589 "How often to provoke splitting a young gen space") \
1590 range(0, max_uintx) \
1591 \
1592 product(uint, ConcGCThreads, 0, \
1593 "Number of threads concurrent gc will use") \
1594 \
1595 product(size_t, YoungPLABSize, 4096, \
1596 "Size of young gen promotion LAB's (in HeapWords)") \
1597 constraint(YoungPLABSizeConstraintFunc,AfterMemoryInit) \
1598 \
1599 product(size_t, OldPLABSize, 1024, \
1600 "Size of old gen promotion LAB's (in HeapWords), or Number \
1601 of blocks to attempt to claim when refilling CMS LAB's") \
1602 \
1603 product(uintx, GCTaskTimeStampEntries, 200, \
1604 "Number of time stamp entries per gc worker thread") \
1605 range(1, max_uintx) \
1606 \
1607 product(bool, AlwaysTenure, false, \
1608 "Always tenure objects in eden (ParallelGC only)") \
1609 \
1610 product(bool, NeverTenure, false, \
1611 "Never tenure objects in eden, may tenure on overflow " \
1612 "(ParallelGC only)") \
1613 \
1614 product(bool, ScavengeBeforeFullGC, true, \
1615 "Scavenge youngest generation before each full GC.") \
1616 \
1617 develop(bool, ScavengeWithObjectsInToSpace, false, \
1716 "Percentage (0-100) used to weight the current sample when " \
1717 "computing exponentially decaying average for resizing " \
1718 "OldPLABSize") \
1719 range(0, 100) \
1720 \
1721 product(bool, ResizeOldPLAB, true, \
1722 "Dynamically resize (old gen) promotion LAB's") \
1723 \
1724 product(bool, PrintOldPLAB, false, \
1725 "Print (old gen) promotion LAB's sizing decisions") \
1726 \
1727 product(size_t, CMSOldPLABMax, 1024, \
1728 "Maximum size of CMS gen promotion LAB caches per worker " \
1729 "per block size") \
1730 range(1, max_uintx) \
1731 \
1732 product(size_t, CMSOldPLABMin, 16, \
1733 "Minimum size of CMS gen promotion LAB caches per worker " \
1734 "per block size") \
1735 range(1, max_uintx) \
1736 constraint(CMSOldPLABMinConstraintFunc,AfterParse) \
1737 \
1738 product(uintx, CMSOldPLABNumRefills, 4, \
1739 "Nominal number of refills of CMS gen promotion LAB cache " \
1740 "per worker per block size") \
1741 range(1, max_uintx) \
1742 \
1743 product(bool, CMSOldPLABResizeQuicker, false, \
1744 "React on-the-fly during a scavenge to a sudden " \
1745 "change in block demand rate") \
1746 \
1747 product(uintx, CMSOldPLABToleranceFactor, 4, \
1748 "The tolerance of the phase-change detector for on-the-fly " \
1749 "PLAB resizing during a scavenge") \
1750 range(1, max_uintx) \
1751 \
1752 product(uintx, CMSOldPLABReactivityFactor, 2, \
1753 "The gain in the feedback loop for on-the-fly PLAB resizing " \
1754 "during a scavenge") \
1755 \
1756 product(bool, AlwaysPreTouch, false, \
1912 \
1913 product(bool, CMSPrintEdenSurvivorChunks, false, \
1914 "Print the eden and the survivor chunks used for the parallel " \
1915 "initial mark or remark of the eden/survivor spaces") \
1916 \
1917 product(bool, CMSConcurrentMTEnabled, true, \
1918 "Whether multi-threaded concurrent work enabled " \
1919 "(effective only if ParNewGC)") \
1920 \
1921 product(bool, CMSPrecleaningEnabled, true, \
1922 "Whether concurrent precleaning enabled") \
1923 \
1924 product(uintx, CMSPrecleanIter, 3, \
1925 "Maximum number of precleaning iteration passes") \
1926 range(0, 9) \
1927 \
1928 product(uintx, CMSPrecleanDenominator, 3, \
1929 "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \
1930 "ratio") \
1931 range(1, max_uintx) \
1932 constraint(CMSPrecleanDenominatorConstraintFunc,AfterParse) \
1933 \
1934 product(uintx, CMSPrecleanNumerator, 2, \
1935 "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \
1936 "ratio") \
1937 range(0, max_uintx-1) \
1938 constraint(CMSPrecleanNumeratorConstraintFunc,AfterParse) \
1939 \
1940 product(bool, CMSPrecleanRefLists1, true, \
1941 "Preclean ref lists during (initial) preclean phase") \
1942 \
1943 product(bool, CMSPrecleanRefLists2, false, \
1944 "Preclean ref lists during abortable preclean phase") \
1945 \
1946 product(bool, CMSPrecleanSurvivors1, false, \
1947 "Preclean survivors during (initial) preclean phase") \
1948 \
1949 product(bool, CMSPrecleanSurvivors2, true, \
1950 "Preclean survivors during abortable preclean phase") \
1951 \
1952 product(uintx, CMSPrecleanThreshold, 1000, \
1953 "Do not iterate again if number of dirty cards is less than this")\
1954 range(100, max_uintx) \
1955 \
1956 product(bool, CMSCleanOnEnter, true, \
1957 "Clean-on-enter optimization for reducing number of dirty cards") \
1958 \
3336 product_pd(size_t, NewSizeThreadIncrease, \
3337 "Additional size added to desired new generation size per " \
3338 "non-daemon thread (in bytes)") \
3339 \
3340 product_pd(size_t, MetaspaceSize, \
3341 "Initial size of Metaspaces (in bytes)") \
3342 \
3343 product(size_t, MaxMetaspaceSize, max_uintx, \
3344 "Maximum size of Metaspaces (in bytes)") \
3345 \
3346 product(size_t, CompressedClassSpaceSize, 1*G, \
3347 "Maximum size of class area in Metaspace when compressed " \
3348 "class pointers are used") \
3349 range(1*M, 3*G) \
3350 \
3351 manageable(uintx, MinHeapFreeRatio, 40, \
3352 "The minimum percentage of heap free after GC to avoid expansion."\
3353 " For most GCs this applies to the old generation. In G1 and" \
3354 " ParallelGC it applies to the whole heap.") \
3355 range(0, 100) \
3356 constraint(MinHeapFreeRatioConstraintFunc,AfterParse) \
3357 \
3358 manageable(uintx, MaxHeapFreeRatio, 70, \
3359 "The maximum percentage of heap free after GC to avoid shrinking."\
3360 " For most GCs this applies to the old generation. In G1 and" \
3361 " ParallelGC it applies to the whole heap.") \
3362 range(0, 100) \
3363 constraint(MaxHeapFreeRatioConstraintFunc,AfterParse) \
3364 \
3365 product(intx, SoftRefLRUPolicyMSPerMB, 1000, \
3366 "Number of milliseconds per MB of free space in the heap") \
3367 \
3368 product(size_t, MinHeapDeltaBytes, ScaleForWordSize(128*K), \
3369 "The minimum change in heap space due to GC (in bytes)") \
3370 \
3371 product(size_t, MinMetaspaceExpansion, ScaleForWordSize(256*K), \
3372 "The minimum expansion of Metaspace (in bytes)") \
3373 \
3374 product(uintx, MaxMetaspaceFreeRatio, 70, \
3375 "The maximum percentage of Metaspace free after GC to avoid " \
3376 "shrinking") \
3377 range(0, 100) \
3378 constraint(MaxMetaspaceFreeRatioConstraintFunc,AfterParse) \
3379 \
3380 product(uintx, MinMetaspaceFreeRatio, 40, \
3381 "The minimum percentage of Metaspace free after GC to avoid " \
3382 "expansion") \
3383 range(0, 99) \
3384 constraint(MinMetaspaceFreeRatioConstraintFunc,AfterParse) \
3385 \
3386 product(size_t, MaxMetaspaceExpansion, ScaleForWordSize(4*M), \
3387 "The maximum expansion of Metaspace without full GC (in bytes)") \
3388 \
3389 product(uintx, QueuedAllocationWarningCount, 0, \
3390 "Number of times an allocation that queues behind a GC " \
3391 "will retry before printing a warning") \
3392 \
3393 diagnostic(uintx, VerifyGCStartAt, 0, \
3394 "GC invoke count where +VerifyBefore/AfterGC kicks in") \
3395 \
3396 diagnostic(intx, VerifyGCLevel, 0, \
3397 "Generation level at which to start +VerifyBefore/AfterGC") \
3398 \
3399 product(uintx, MaxTenuringThreshold, 15, \
3400 "Maximum value for tenuring threshold") \
3401 range(0, markOopDesc::max_age + 1) \
3402 constraint(MaxTenuringThresholdConstraintFunc,AfterParse) \
3403 \
3404 product(uintx, InitialTenuringThreshold, 7, \
3405 "Initial value for tenuring threshold") \
3406 range(0, markOopDesc::max_age + 1) \
3407 constraint(InitialTenuringThresholdConstraintFunc,AfterParse) \
3408 \
3409 product(uintx, TargetSurvivorRatio, 50, \
3410 "Desired percentage of survivor space used after scavenge") \
3411 range(0, 100) \
3412 \
3413 product(uintx, MarkSweepDeadRatio, 5, \
3414 "Percentage (0-100) of the old gen allowed as dead wood. " \
3415 "Serial mark sweep treats this as both the minimum and maximum " \
3416 "value. " \
3417 "CMS uses this value only if it falls back to mark sweep. " \
3418 "Par compact uses a variable scale based on the density of the " \
3419 "generation and treats this as the maximum value when the heap " \
3420 "is either completely full or completely empty. Par compact " \
3421 "also has a smaller default value; see arguments.cpp.") \
3422 range(0, 100) \
3423 \
3424 product(uintx, MarkSweepAlwaysCompactCount, 4, \
3425 "How often should we fully compact the heap (ignoring the dead " \
3426 "space parameters)") \
3427 range(1, max_uintx) \
4065 "to be considered for deduplication") \
4066 range(1, markOopDesc::max_age) \
4067 \
4068 diagnostic(bool, StringDeduplicationResizeALot, false, \
4069 "Force table resize every time the table is scanned") \
4070 \
4071 diagnostic(bool, StringDeduplicationRehashALot, false, \
4072 "Force table rehash every time the table is scanned") \
4073 \
4074 develop(bool, TraceDefaultMethods, false, \
4075 "Trace the default method processing steps") \
4076 \
4077 diagnostic(bool, WhiteBoxAPI, false, \
4078 "Enable internal testing APIs") \
4079 \
4080 product(bool, PrintGCCause, true, \
4081 "Include GC cause in GC logging") \
4082 \
4083 experimental(intx, SurvivorAlignmentInBytes, 0, \
4084 "Default survivor space alignment in bytes") \
4085 constraint(SurvivorAlignmentInBytesConstraintFunc,AfterParse) \
4086 \
4087 product(bool , AllowNonVirtualCalls, false, \
4088 "Obey the ACC_SUPER flag and allow invokenonvirtual calls") \
4089 \
4090 product(ccstr, DumpLoadedClassList, NULL, \
4091 "Dump the names all loaded classes, that could be stored into " \
4092 "the CDS archive, in the specified file") \
4093 \
4094 product(ccstr, SharedClassListFile, NULL, \
4095 "Override the default CDS class list") \
4096 \
4097 diagnostic(ccstr, SharedArchiveFile, NULL, \
4098 "Override the default location of the CDS archive file") \
4099 \
4100 product(ccstr, ExtraSharedClassListFile, NULL, \
4101 "Extra classlist for building the CDS archive file") \
4102 \
4103 experimental(size_t, ArrayAllocatorMallocLimit, \
4104 SOLARIS_ONLY(64*K) NOT_SOLARIS((size_t)-1), \
4105 "Allocation less than this value will be allocated " \
4165 #define MATERIALIZE_MANAGEABLE_FLAG(type, name, value, doc) type name = value;
4166 #define MATERIALIZE_PRODUCT_RW_FLAG(type, name, value, doc) type name = value;
4167 #ifdef PRODUCT
4168 #define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type CONST_##name = value;
4169 #define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) type CONST_##name = pd_##name;
4170 #define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type CONST_##name = value;
4171 #else
4172 #define MATERIALIZE_DEVELOPER_FLAG(type, name, value, doc) type name = value;
4173 #define MATERIALIZE_PD_DEVELOPER_FLAG(type, name, doc) type name = pd_##name;
4174 #define MATERIALIZE_NOTPRODUCT_FLAG(type, name, value, doc) type name = value;
4175 #endif // PRODUCT
4176 #ifdef _LP64
4177 #define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) type name = value;
4178 #else
4179 #define MATERIALIZE_LP64_PRODUCT_FLAG(type, name, value, doc) /* flag is constant */
4180 #endif // _LP64
4181
4182 // Only materialize src code for range checking when required, ignore otherwise
4183 #define IGNORE_RANGE(a, b)
4184 // Only materialize src code for contraint checking when required, ignore otherwise
4185 #define IGNORE_CONSTRAINT(func,type)
4186
4187 RUNTIME_FLAGS(DECLARE_DEVELOPER_FLAG, \
4188 DECLARE_PD_DEVELOPER_FLAG, \
4189 DECLARE_PRODUCT_FLAG, \
4190 DECLARE_PD_PRODUCT_FLAG, \
4191 DECLARE_DIAGNOSTIC_FLAG, \
4192 DECLARE_EXPERIMENTAL_FLAG, \
4193 DECLARE_NOTPRODUCT_FLAG, \
4194 DECLARE_MANAGEABLE_FLAG, \
4195 DECLARE_PRODUCT_RW_FLAG, \
4196 DECLARE_LP64_PRODUCT_FLAG, \
4197 IGNORE_RANGE, \
4198 IGNORE_CONSTRAINT)
4199
4200 RUNTIME_OS_FLAGS(DECLARE_DEVELOPER_FLAG, \
4201 DECLARE_PD_DEVELOPER_FLAG, \
4202 DECLARE_PRODUCT_FLAG, \
4203 DECLARE_PD_PRODUCT_FLAG, \
4204 DECLARE_DIAGNOSTIC_FLAG, \
4205 DECLARE_NOTPRODUCT_FLAG, \
|