41 #include "oops/oop.inline.hpp"
42 #include "prims/jvmtiExport.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/arguments_ext.hpp"
45 #include "runtime/commandLineFlagConstraintList.hpp"
46 #include "runtime/commandLineFlagWriteableList.hpp"
47 #include "runtime/commandLineFlagRangeList.hpp"
48 #include "runtime/globals.hpp"
49 #include "runtime/globals_extension.hpp"
50 #include "runtime/java.hpp"
51 #include "runtime/os.hpp"
52 #include "runtime/vm_version.hpp"
53 #include "services/management.hpp"
54 #include "services/memTracker.hpp"
55 #include "utilities/defaultStream.hpp"
56 #include "utilities/macros.hpp"
57 #include "utilities/stringUtils.hpp"
58 #if INCLUDE_JVMCI
59 #include "jvmci/jvmciRuntime.hpp"
60 #endif
61 #if INCLUDE_ALL_GCS
62 #include "gc/cms/compactibleFreeListSpace.hpp"
63 #include "gc/g1/g1CollectedHeap.inline.hpp"
64 #include "gc/parallel/parallelScavengeHeap.hpp"
65 #endif // INCLUDE_ALL_GCS
66
67 // Note: This is a special bug reporting site for the JVM
68 #define DEFAULT_VENDOR_URL_BUG "http://bugreport.java.com/bugreport/crash.jsp"
69 #define DEFAULT_JAVA_LAUNCHER "generic"
70
71 char* Arguments::_jvm_flags_file = NULL;
72 char** Arguments::_jvm_flags_array = NULL;
73 int Arguments::_num_jvm_flags = 0;
74 char** Arguments::_jvm_args_array = NULL;
75 int Arguments::_num_jvm_args = 0;
76 char* Arguments::_java_command = NULL;
77 SystemProperty* Arguments::_system_properties = NULL;
78 const char* Arguments::_gc_log_filename = NULL;
79 bool Arguments::_has_profile = false;
80 size_t Arguments::_conservative_max_heap_alignment = 0;
81 size_t Arguments::_min_heap_size = 0;
82 Arguments::Mode Arguments::_mode = _mixed;
83 bool Arguments::_java_compiler = false;
84 bool Arguments::_xdebug_mode = false;
85 const char* Arguments::_java_vendor_url_bug = DEFAULT_VENDOR_URL_BUG;
1477 FLAG_SET_ERGO(intx, Tier3BackEdgeThreshold, scaled_compile_threshold(Tier3BackEdgeThreshold));
1478
1479 // Tier2{Invocation,MinInvocation,Compile,Backedge}Threshold should be scaled here
1480 // once these thresholds become supported.
1481
1482 FLAG_SET_ERGO(intx, Tier2InvokeNotifyFreqLog, scaled_freq_log(Tier2InvokeNotifyFreqLog));
1483 FLAG_SET_ERGO(intx, Tier2BackedgeNotifyFreqLog, scaled_freq_log(Tier2BackedgeNotifyFreqLog));
1484
1485 FLAG_SET_ERGO(intx, Tier3InvokeNotifyFreqLog, scaled_freq_log(Tier3InvokeNotifyFreqLog));
1486 FLAG_SET_ERGO(intx, Tier3BackedgeNotifyFreqLog, scaled_freq_log(Tier3BackedgeNotifyFreqLog));
1487
1488 FLAG_SET_ERGO(intx, Tier23InlineeNotifyFreqLog, scaled_freq_log(Tier23InlineeNotifyFreqLog));
1489
1490 FLAG_SET_ERGO(intx, Tier4InvocationThreshold, scaled_compile_threshold(Tier4InvocationThreshold));
1491 FLAG_SET_ERGO(intx, Tier4MinInvocationThreshold, scaled_compile_threshold(Tier4MinInvocationThreshold));
1492 FLAG_SET_ERGO(intx, Tier4CompileThreshold, scaled_compile_threshold(Tier4CompileThreshold));
1493 FLAG_SET_ERGO(intx, Tier4BackEdgeThreshold, scaled_compile_threshold(Tier4BackEdgeThreshold));
1494 }
1495 }
1496
1497 #if INCLUDE_ALL_GCS
1498 static void disable_adaptive_size_policy(const char* collector_name) {
1499 if (UseAdaptiveSizePolicy) {
1500 if (FLAG_IS_CMDLINE(UseAdaptiveSizePolicy)) {
1501 warning("Disabling UseAdaptiveSizePolicy; it is incompatible with %s.",
1502 collector_name);
1503 }
1504 FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
1505 }
1506 }
1507
1508 void Arguments::set_parnew_gc_flags() {
1509 assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC && !UseG1GC,
1510 "control point invariant");
1511 assert(UseConcMarkSweepGC, "CMS is expected to be on here");
1512
1513 if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
1514 FLAG_SET_DEFAULT(ParallelGCThreads, Abstract_VM_Version::parallel_worker_threads());
1515 assert(ParallelGCThreads > 0, "We should always have at least one thread by default");
1516 } else if (ParallelGCThreads == 0) {
1517 jio_fprintf(defaultStream::error_stream(),
1518 "The ParNew GC can not be combined with -XX:ParallelGCThreads=0\n");
1519 vm_exit(1);
1520 }
1521
1522 // By default YoungPLABSize and OldPLABSize are set to 4096 and 1024 respectively,
1523 // these settings are default for Parallel Scavenger. For ParNew+Tenured configuration
1524 // we set them to 1024 and 1024.
1525 // See CR 6362902.
1526 if (FLAG_IS_DEFAULT(YoungPLABSize)) {
1527 FLAG_SET_DEFAULT(YoungPLABSize, (intx)1024);
1528 }
1529 if (FLAG_IS_DEFAULT(OldPLABSize)) {
1530 FLAG_SET_DEFAULT(OldPLABSize, (intx)1024);
1531 }
1532
1533 // When using compressed oops, we use local overflow stacks,
1534 // rather than using a global overflow list chained through
1535 // the klass word of the object's pre-image.
1536 if (UseCompressedOops && !ParGCUseLocalOverflow) {
1537 if (!FLAG_IS_DEFAULT(ParGCUseLocalOverflow)) {
1538 warning("Forcing +ParGCUseLocalOverflow: needed if using compressed references");
1539 }
1540 FLAG_SET_DEFAULT(ParGCUseLocalOverflow, true);
1541 }
1542 assert(ParGCUseLocalOverflow || !UseCompressedOops, "Error");
1543 }
1544
1545 // Adjust some sizes to suit CMS and/or ParNew needs; these work well on
1546 // sparc/solaris for certain applications, but would gain from
1547 // further optimization and tuning efforts, and would almost
1548 // certainly gain from analysis of platform and environment.
1549 void Arguments::set_cms_and_parnew_gc_flags() {
1550 assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC, "Error");
1551 assert(UseConcMarkSweepGC, "CMS is expected to be on here");
1552
1553 // Turn off AdaptiveSizePolicy by default for cms until it is complete.
1554 disable_adaptive_size_policy("UseConcMarkSweepGC");
1555
1556 set_parnew_gc_flags();
1557
1558 size_t max_heap = align_size_down(MaxHeapSize,
1559 CardTableRS::ct_max_alignment_constraint());
1560
1561 // Now make adjustments for CMS
1562 intx tenuring_default = (intx)6;
1563 size_t young_gen_per_worker = CMSYoungGenPerWorker;
1564
1565 // Preferred young gen size for "short" pauses:
1566 // upper bound depends on # of threads and NewRatio.
1567 const size_t preferred_max_new_size_unaligned =
1568 MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * ParallelGCThreads));
1569 size_t preferred_max_new_size =
1570 align_size_up(preferred_max_new_size_unaligned, os::vm_page_size());
1571
1572 // Unless explicitly requested otherwise, size young gen
1573 // for "short" pauses ~ CMSYoungGenPerWorker*ParallelGCThreads
1574
1575 // If either MaxNewSize or NewRatio is set on the command line,
1576 // assume the user is trying to set the size of the young gen.
1577 if (FLAG_IS_DEFAULT(MaxNewSize) && FLAG_IS_DEFAULT(NewRatio)) {
1578
1579 // Set MaxNewSize to our calculated preferred_max_new_size unless
1580 // NewSize was set on the command line and it is larger than
1581 // preferred_max_new_size.
1582 if (!FLAG_IS_DEFAULT(NewSize)) { // NewSize explicitly set at command-line
1583 FLAG_SET_ERGO(size_t, MaxNewSize, MAX2(NewSize, preferred_max_new_size));
1584 } else {
1585 FLAG_SET_ERGO(size_t, MaxNewSize, preferred_max_new_size);
1586 }
1587 log_trace(gc, heap)("CMS ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
1588
1589 // Code along this path potentially sets NewSize and OldSize
1590 log_trace(gc, heap)("CMS set min_heap_size: " SIZE_FORMAT " initial_heap_size: " SIZE_FORMAT " max_heap: " SIZE_FORMAT,
1591 min_heap_size(), InitialHeapSize, max_heap);
1592 size_t min_new = preferred_max_new_size;
1593 if (FLAG_IS_CMDLINE(NewSize)) {
1594 min_new = NewSize;
1595 }
1596 if (max_heap > min_new && min_heap_size() > min_new) {
1597 // Unless explicitly requested otherwise, make young gen
1598 // at least min_new, and at most preferred_max_new_size.
1599 if (FLAG_IS_DEFAULT(NewSize)) {
1600 FLAG_SET_ERGO(size_t, NewSize, MAX2(NewSize, min_new));
1601 FLAG_SET_ERGO(size_t, NewSize, MIN2(preferred_max_new_size, NewSize));
1602 log_trace(gc, heap)("CMS ergo set NewSize: " SIZE_FORMAT, NewSize);
1603 }
1604 // Unless explicitly requested otherwise, size old gen
1605 // so it's NewRatio x of NewSize.
1606 if (FLAG_IS_DEFAULT(OldSize)) {
1607 if (max_heap > NewSize) {
1608 FLAG_SET_ERGO(size_t, OldSize, MIN2(NewRatio*NewSize, max_heap - NewSize));
1609 log_trace(gc, heap)("CMS ergo set OldSize: " SIZE_FORMAT, OldSize);
1610 }
1611 }
1612 }
1613 }
1614 // Unless explicitly requested otherwise, definitely
1615 // promote all objects surviving "tenuring_default" scavenges.
1616 if (FLAG_IS_DEFAULT(MaxTenuringThreshold) &&
1617 FLAG_IS_DEFAULT(SurvivorRatio)) {
1618 FLAG_SET_ERGO(uintx, MaxTenuringThreshold, tenuring_default);
1619 }
1620 // If we decided above (or user explicitly requested)
1621 // `promote all' (via MaxTenuringThreshold := 0),
1622 // prefer minuscule survivor spaces so as not to waste
1623 // space for (non-existent) survivors
1624 if (FLAG_IS_DEFAULT(SurvivorRatio) && MaxTenuringThreshold == 0) {
1625 FLAG_SET_ERGO(uintx, SurvivorRatio, MAX2((uintx)1024, SurvivorRatio));
1626 }
1627
1628 // OldPLABSize is interpreted in CMS as not the size of the PLAB in words,
1629 // but rather the number of free blocks of a given size that are used when
1630 // replenishing the local per-worker free list caches.
1631 if (FLAG_IS_DEFAULT(OldPLABSize)) {
1632 if (!FLAG_IS_DEFAULT(ResizeOldPLAB) && !ResizeOldPLAB) {
1633 // OldPLAB sizing manually turned off: Use a larger default setting,
1634 // unless it was manually specified. This is because a too-low value
1635 // will slow down scavenges.
1636 FLAG_SET_ERGO(size_t, OldPLABSize, CompactibleFreeListSpaceLAB::_default_static_old_plab_size); // default value before 6631166
1637 } else {
1638 FLAG_SET_DEFAULT(OldPLABSize, CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size); // old CMSParPromoteBlocksToClaim default
1639 }
1640 }
1641
1642 // If either of the static initialization defaults have changed, note this
1643 // modification.
1644 if (!FLAG_IS_DEFAULT(OldPLABSize) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
1645 CompactibleFreeListSpaceLAB::modify_initialization(OldPLABSize, OldPLABWeight);
1646 }
1647
1648 log_trace(gc)("MarkStackSize: %uk MarkStackSizeMax: %uk", (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
1649 }
1650 #endif // INCLUDE_ALL_GCS
1651
1652 void set_object_alignment() {
1653 // Object alignment.
1654 assert(is_power_of_2(ObjectAlignmentInBytes), "ObjectAlignmentInBytes must be power of 2");
1655 MinObjAlignmentInBytes = ObjectAlignmentInBytes;
1656 assert(MinObjAlignmentInBytes >= HeapWordsPerLong * HeapWordSize, "ObjectAlignmentInBytes value is too small");
1657 MinObjAlignment = MinObjAlignmentInBytes / HeapWordSize;
1658 assert(MinObjAlignmentInBytes == MinObjAlignment * HeapWordSize, "ObjectAlignmentInBytes value is incorrect");
1659 MinObjAlignmentInBytesMask = MinObjAlignmentInBytes - 1;
1660
1661 LogMinObjAlignmentInBytes = exact_log2(ObjectAlignmentInBytes);
1662 LogMinObjAlignment = LogMinObjAlignmentInBytes - LogHeapWordSize;
1663
1664 // Oop encoding heap max
1665 OopEncodingHeapMax = (uint64_t(max_juint) + 1) << LogMinObjAlignmentInBytes;
1666
1667 if (SurvivorAlignmentInBytes == 0) {
1668 SurvivorAlignmentInBytes = ObjectAlignmentInBytes;
1669 }
1670
1671 #if INCLUDE_ALL_GCS
1672 // Set CMS global values
1673 CompactibleFreeListSpace::set_cms_values();
1674 #endif // INCLUDE_ALL_GCS
1675 }
1676
1677 size_t Arguments::max_heap_for_compressed_oops() {
1678 // Avoid sign flip.
1679 assert(OopEncodingHeapMax > (uint64_t)os::vm_page_size(), "Unusual page size");
1680 // We need to fit both the NULL page and the heap into the memory budget, while
1681 // keeping alignment constraints of the heap. To guarantee the latter, as the
1682 // NULL page is located before the heap, we pad the NULL page to the conservative
1683 // maximum alignment that the GC may ever impose upon the heap.
1684 size_t displacement_due_to_null_page = align_size_up_(os::vm_page_size(),
1685 _conservative_max_heap_alignment);
1686
1687 LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page);
1688 NOT_LP64(ShouldNotReachHere(); return 0);
1689 }
1690
1691 void Arguments::set_use_compressed_oops() {
1692 #ifndef ZERO
1693 #ifdef _LP64
1694 // MaxHeapSize is not set up properly at this point, but
1730 if (FLAG_IS_DEFAULT(UseCompressedClassPointers)) {
1731 FLAG_SET_ERGO(bool, UseCompressedClassPointers, true);
1732 }
1733 // Check the CompressedClassSpaceSize to make sure we use compressed klass ptrs.
1734 if (UseCompressedClassPointers) {
1735 if (CompressedClassSpaceSize > KlassEncodingMetaspaceMax) {
1736 warning("CompressedClassSpaceSize is too large for UseCompressedClassPointers");
1737 FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
1738 }
1739 }
1740 }
1741 #endif // _LP64
1742 #endif // !ZERO
1743 }
1744
1745 void Arguments::set_conservative_max_heap_alignment() {
1746 // The conservative maximum required alignment for the heap is the maximum of
1747 // the alignments imposed by several sources: any requirements from the heap
1748 // itself, the collector policy and the maximum page size we may run the VM
1749 // with.
1750 size_t heap_alignment = GenCollectedHeap::conservative_max_heap_alignment();
1751 #if INCLUDE_ALL_GCS
1752 if (UseParallelGC) {
1753 heap_alignment = ParallelScavengeHeap::conservative_max_heap_alignment();
1754 } else if (UseG1GC) {
1755 heap_alignment = G1CollectedHeap::conservative_max_heap_alignment();
1756 }
1757 #endif // INCLUDE_ALL_GCS
1758 _conservative_max_heap_alignment = MAX4(heap_alignment,
1759 (size_t)os::vm_allocation_granularity(),
1760 os::max_page_size(),
1761 CollectorPolicy::compute_heap_alignment());
1762 }
1763
1764 bool Arguments::gc_selected() {
1765 #if INCLUDE_ALL_GCS
1766 return UseSerialGC || UseParallelGC || UseParallelOldGC || UseConcMarkSweepGC || UseG1GC;
1767 #else
1768 return UseSerialGC;
1769 #endif // INCLUDE_ALL_GCS
1770 }
1771
1772 #ifdef TIERED
1773 bool Arguments::compilation_mode_selected() {
1774 return !FLAG_IS_DEFAULT(TieredCompilation) || !FLAG_IS_DEFAULT(TieredStopAtLevel) ||
1775 !FLAG_IS_DEFAULT(UseAOT) JVMCI_ONLY(|| !FLAG_IS_DEFAULT(EnableJVMCI) || !FLAG_IS_DEFAULT(UseJVMCICompiler));
1776
1777 }
1866 }
1867 #endif
1868
1869 set_conservative_max_heap_alignment();
1870
1871 #ifndef ZERO
1872 #ifdef _LP64
1873 set_use_compressed_oops();
1874
1875 // set_use_compressed_klass_ptrs() must be called after calling
1876 // set_use_compressed_oops().
1877 set_use_compressed_klass_ptrs();
1878
1879 // Also checks that certain machines are slower with compressed oops
1880 // in vm_version initialization code.
1881 #endif // _LP64
1882 #endif // !ZERO
1883
1884 }
1885
1886 void Arguments::set_parallel_gc_flags() {
1887 assert(UseParallelGC || UseParallelOldGC, "Error");
1888 // Enable ParallelOld unless it was explicitly disabled (cmd line or rc file).
1889 if (FLAG_IS_DEFAULT(UseParallelOldGC)) {
1890 FLAG_SET_DEFAULT(UseParallelOldGC, true);
1891 }
1892 FLAG_SET_DEFAULT(UseParallelGC, true);
1893
1894 // If no heap maximum was requested explicitly, use some reasonable fraction
1895 // of the physical memory, up to a maximum of 1GB.
1896 FLAG_SET_DEFAULT(ParallelGCThreads,
1897 Abstract_VM_Version::parallel_worker_threads());
1898 if (ParallelGCThreads == 0) {
1899 jio_fprintf(defaultStream::error_stream(),
1900 "The Parallel GC can not be combined with -XX:ParallelGCThreads=0\n");
1901 vm_exit(1);
1902 }
1903
1904 if (UseAdaptiveSizePolicy) {
1905 // We don't want to limit adaptive heap sizing's freedom to adjust the heap
1906 // unless the user actually sets these flags.
1907 if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
1908 FLAG_SET_DEFAULT(MinHeapFreeRatio, 0);
1909 }
1910 if (FLAG_IS_DEFAULT(MaxHeapFreeRatio)) {
1911 FLAG_SET_DEFAULT(MaxHeapFreeRatio, 100);
1912 }
1913 }
1914
1915 // If InitialSurvivorRatio or MinSurvivorRatio were not specified, but the
1916 // SurvivorRatio has been set, reset their default values to SurvivorRatio +
1917 // 2. By doing this we make SurvivorRatio also work for Parallel Scavenger.
1918 // See CR 6362902 for details.
1919 if (!FLAG_IS_DEFAULT(SurvivorRatio)) {
1920 if (FLAG_IS_DEFAULT(InitialSurvivorRatio)) {
1921 FLAG_SET_DEFAULT(InitialSurvivorRatio, SurvivorRatio + 2);
1922 }
1923 if (FLAG_IS_DEFAULT(MinSurvivorRatio)) {
1924 FLAG_SET_DEFAULT(MinSurvivorRatio, SurvivorRatio + 2);
1925 }
1926 }
1927
1928 if (UseParallelOldGC) {
1929 // Par compact uses lower default values since they are treated as
1930 // minimums. These are different defaults because of the different
1931 // interpretation and are not ergonomically set.
1932 if (FLAG_IS_DEFAULT(MarkSweepDeadRatio)) {
1933 FLAG_SET_DEFAULT(MarkSweepDeadRatio, 1);
1934 }
1935 }
1936 }
1937
1938 void Arguments::set_g1_gc_flags() {
1939 assert(UseG1GC, "Error");
1940 #if defined(COMPILER1) || INCLUDE_JVMCI
1941 FastTLABRefill = false;
1942 #endif
1943 FLAG_SET_DEFAULT(ParallelGCThreads, Abstract_VM_Version::parallel_worker_threads());
1944 if (ParallelGCThreads == 0) {
1945 assert(!FLAG_IS_DEFAULT(ParallelGCThreads), "The default value for ParallelGCThreads should not be 0.");
1946 vm_exit_during_initialization("The flag -XX:+UseG1GC can not be combined with -XX:ParallelGCThreads=0", NULL);
1947 }
1948
1949 #if INCLUDE_ALL_GCS
1950 if (FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
1951 FLAG_SET_ERGO(uint, G1ConcRefinementThreads, ParallelGCThreads);
1952 }
1953 #endif
1954
1955 // MarkStackSize will be set (if it hasn't been set by the user)
1956 // when concurrent marking is initialized.
1957 // Its value will be based upon the number of parallel marking threads.
1958 // But we do set the maximum mark stack size here.
1959 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
1960 FLAG_SET_DEFAULT(MarkStackSizeMax, 128 * TASKQUEUE_SIZE);
1961 }
1962
1963 if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) {
1964 // In G1, we want the default GC overhead goal to be higher than
1965 // it is for PS, or the heap might be expanded too aggressively.
1966 // We set it here to ~8%.
1967 FLAG_SET_DEFAULT(GCTimeRatio, 12);
1968 }
1969
1970 // Below, we might need to calculate the pause time interval based on
1971 // the pause target. When we do so we are going to give G1 maximum
1972 // flexibility and allow it to do pauses when it needs to. So, we'll
1973 // arrange that the pause interval to be pause time target + 1 to
1974 // ensure that a) the pause time target is maximized with respect to
1975 // the pause interval and b) we maintain the invariant that pause
1976 // time target < pause interval. If the user does not want this
1977 // maximum flexibility, they will have to set the pause interval
1978 // explicitly.
1979
1980 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
1981 // The default pause time target in G1 is 200ms
1982 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
1983 }
1984
1985 // Then, if the interval parameter was not set, set it according to
1986 // the pause time target (this will also deal with the case when the
1987 // pause time target is the default value).
1988 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
1989 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
1990 }
1991
1992 log_trace(gc)("MarkStackSize: %uk MarkStackSizeMax: %uk", (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
1993 }
1994
1995 void Arguments::set_gc_specific_flags() {
1996 #if INCLUDE_ALL_GCS
1997 // Set per-collector flags
1998 if (UseParallelGC || UseParallelOldGC) {
1999 set_parallel_gc_flags();
2000 } else if (UseConcMarkSweepGC) {
2001 set_cms_and_parnew_gc_flags();
2002 } else if (UseG1GC) {
2003 set_g1_gc_flags();
2004 }
2005 if (AssumeMP && !UseSerialGC) {
2006 if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
2007 warning("If the number of processors is expected to increase from one, then"
2008 " you should configure the number of parallel GC threads appropriately"
2009 " using -XX:ParallelGCThreads=N");
2010 }
2011 }
2012 if (MinHeapFreeRatio == 100) {
2013 // Keeping the heap 100% free is hard ;-) so limit it to 99%.
2014 FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99);
2015 }
2016
2017 // If class unloading is disabled, also disable concurrent class unloading.
2018 if (!ClassUnloading) {
2019 FLAG_SET_CMDLINE(bool, CMSClassUnloadingEnabled, false);
2020 FLAG_SET_CMDLINE(bool, ClassUnloadingWithConcurrentMark, false);
2021 }
2022 #endif // INCLUDE_ALL_GCS
2023 }
2024
|
41 #include "oops/oop.inline.hpp"
42 #include "prims/jvmtiExport.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/arguments_ext.hpp"
45 #include "runtime/commandLineFlagConstraintList.hpp"
46 #include "runtime/commandLineFlagWriteableList.hpp"
47 #include "runtime/commandLineFlagRangeList.hpp"
48 #include "runtime/globals.hpp"
49 #include "runtime/globals_extension.hpp"
50 #include "runtime/java.hpp"
51 #include "runtime/os.hpp"
52 #include "runtime/vm_version.hpp"
53 #include "services/management.hpp"
54 #include "services/memTracker.hpp"
55 #include "utilities/defaultStream.hpp"
56 #include "utilities/macros.hpp"
57 #include "utilities/stringUtils.hpp"
58 #if INCLUDE_JVMCI
59 #include "jvmci/jvmciRuntime.hpp"
60 #endif
61
62 // Note: This is a special bug reporting site for the JVM
63 #define DEFAULT_VENDOR_URL_BUG "http://bugreport.java.com/bugreport/crash.jsp"
64 #define DEFAULT_JAVA_LAUNCHER "generic"
65
66 char* Arguments::_jvm_flags_file = NULL;
67 char** Arguments::_jvm_flags_array = NULL;
68 int Arguments::_num_jvm_flags = 0;
69 char** Arguments::_jvm_args_array = NULL;
70 int Arguments::_num_jvm_args = 0;
71 char* Arguments::_java_command = NULL;
72 SystemProperty* Arguments::_system_properties = NULL;
73 const char* Arguments::_gc_log_filename = NULL;
74 bool Arguments::_has_profile = false;
75 size_t Arguments::_conservative_max_heap_alignment = 0;
76 size_t Arguments::_min_heap_size = 0;
77 Arguments::Mode Arguments::_mode = _mixed;
78 bool Arguments::_java_compiler = false;
79 bool Arguments::_xdebug_mode = false;
80 const char* Arguments::_java_vendor_url_bug = DEFAULT_VENDOR_URL_BUG;
1472 FLAG_SET_ERGO(intx, Tier3BackEdgeThreshold, scaled_compile_threshold(Tier3BackEdgeThreshold));
1473
1474 // Tier2{Invocation,MinInvocation,Compile,Backedge}Threshold should be scaled here
1475 // once these thresholds become supported.
1476
1477 FLAG_SET_ERGO(intx, Tier2InvokeNotifyFreqLog, scaled_freq_log(Tier2InvokeNotifyFreqLog));
1478 FLAG_SET_ERGO(intx, Tier2BackedgeNotifyFreqLog, scaled_freq_log(Tier2BackedgeNotifyFreqLog));
1479
1480 FLAG_SET_ERGO(intx, Tier3InvokeNotifyFreqLog, scaled_freq_log(Tier3InvokeNotifyFreqLog));
1481 FLAG_SET_ERGO(intx, Tier3BackedgeNotifyFreqLog, scaled_freq_log(Tier3BackedgeNotifyFreqLog));
1482
1483 FLAG_SET_ERGO(intx, Tier23InlineeNotifyFreqLog, scaled_freq_log(Tier23InlineeNotifyFreqLog));
1484
1485 FLAG_SET_ERGO(intx, Tier4InvocationThreshold, scaled_compile_threshold(Tier4InvocationThreshold));
1486 FLAG_SET_ERGO(intx, Tier4MinInvocationThreshold, scaled_compile_threshold(Tier4MinInvocationThreshold));
1487 FLAG_SET_ERGO(intx, Tier4CompileThreshold, scaled_compile_threshold(Tier4CompileThreshold));
1488 FLAG_SET_ERGO(intx, Tier4BackEdgeThreshold, scaled_compile_threshold(Tier4BackEdgeThreshold));
1489 }
1490 }
1491
1492 void set_object_alignment() {
1493 // Object alignment.
1494 assert(is_power_of_2(ObjectAlignmentInBytes), "ObjectAlignmentInBytes must be power of 2");
1495 MinObjAlignmentInBytes = ObjectAlignmentInBytes;
1496 assert(MinObjAlignmentInBytes >= HeapWordsPerLong * HeapWordSize, "ObjectAlignmentInBytes value is too small");
1497 MinObjAlignment = MinObjAlignmentInBytes / HeapWordSize;
1498 assert(MinObjAlignmentInBytes == MinObjAlignment * HeapWordSize, "ObjectAlignmentInBytes value is incorrect");
1499 MinObjAlignmentInBytesMask = MinObjAlignmentInBytes - 1;
1500
1501 LogMinObjAlignmentInBytes = exact_log2(ObjectAlignmentInBytes);
1502 LogMinObjAlignment = LogMinObjAlignmentInBytes - LogHeapWordSize;
1503
1504 // Oop encoding heap max
1505 OopEncodingHeapMax = (uint64_t(max_juint) + 1) << LogMinObjAlignmentInBytes;
1506
1507 if (SurvivorAlignmentInBytes == 0) {
1508 SurvivorAlignmentInBytes = ObjectAlignmentInBytes;
1509 }
1510
1511 }
1512
1513 size_t Arguments::max_heap_for_compressed_oops() {
1514 // Avoid sign flip.
1515 assert(OopEncodingHeapMax > (uint64_t)os::vm_page_size(), "Unusual page size");
1516 // We need to fit both the NULL page and the heap into the memory budget, while
1517 // keeping alignment constraints of the heap. To guarantee the latter, as the
1518 // NULL page is located before the heap, we pad the NULL page to the conservative
1519 // maximum alignment that the GC may ever impose upon the heap.
1520 size_t displacement_due_to_null_page = align_size_up_(os::vm_page_size(),
1521 _conservative_max_heap_alignment);
1522
1523 LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page);
1524 NOT_LP64(ShouldNotReachHere(); return 0);
1525 }
1526
1527 void Arguments::set_use_compressed_oops() {
1528 #ifndef ZERO
1529 #ifdef _LP64
1530 // MaxHeapSize is not set up properly at this point, but
1566 if (FLAG_IS_DEFAULT(UseCompressedClassPointers)) {
1567 FLAG_SET_ERGO(bool, UseCompressedClassPointers, true);
1568 }
1569 // Check the CompressedClassSpaceSize to make sure we use compressed klass ptrs.
1570 if (UseCompressedClassPointers) {
1571 if (CompressedClassSpaceSize > KlassEncodingMetaspaceMax) {
1572 warning("CompressedClassSpaceSize is too large for UseCompressedClassPointers");
1573 FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
1574 }
1575 }
1576 }
1577 #endif // _LP64
1578 #endif // !ZERO
1579 }
1580
1581 void Arguments::set_conservative_max_heap_alignment() {
1582 // The conservative maximum required alignment for the heap is the maximum of
1583 // the alignments imposed by several sources: any requirements from the heap
1584 // itself, the collector policy and the maximum page size we may run the VM
1585 // with.
1586 size_t heap_alignment = GC::gc()->conservative_max_heap_alignment();
1587 _conservative_max_heap_alignment = MAX4(heap_alignment,
1588 (size_t)os::vm_allocation_granularity(),
1589 os::max_page_size(),
1590 CollectorPolicy::compute_heap_alignment());
1591 }
1592
1593 bool Arguments::gc_selected() {
1594 #if INCLUDE_ALL_GCS
1595 return UseSerialGC || UseParallelGC || UseParallelOldGC || UseConcMarkSweepGC || UseG1GC;
1596 #else
1597 return UseSerialGC;
1598 #endif // INCLUDE_ALL_GCS
1599 }
1600
1601 #ifdef TIERED
1602 bool Arguments::compilation_mode_selected() {
1603 return !FLAG_IS_DEFAULT(TieredCompilation) || !FLAG_IS_DEFAULT(TieredStopAtLevel) ||
1604 !FLAG_IS_DEFAULT(UseAOT) JVMCI_ONLY(|| !FLAG_IS_DEFAULT(EnableJVMCI) || !FLAG_IS_DEFAULT(UseJVMCICompiler));
1605
1606 }
1695 }
1696 #endif
1697
1698 set_conservative_max_heap_alignment();
1699
1700 #ifndef ZERO
1701 #ifdef _LP64
1702 set_use_compressed_oops();
1703
1704 // set_use_compressed_klass_ptrs() must be called after calling
1705 // set_use_compressed_oops().
1706 set_use_compressed_klass_ptrs();
1707
1708 // Also checks that certain machines are slower with compressed oops
1709 // in vm_version initialization code.
1710 #endif // _LP64
1711 #endif // !ZERO
1712
1713 }
1714
1715 void Arguments::set_gc_specific_flags() {
1716 GC::gc()->initialize_flags();
1717 #ifdef INCLUDE_ALL_GCS
1718 if (AssumeMP && !UseSerialGC) {
1719 if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
1720 warning("If the number of processors is expected to increase from one, then"
1721 " you should configure the number of parallel GC threads appropriately"
1722 " using -XX:ParallelGCThreads=N");
1723 }
1724 }
1725 if (MinHeapFreeRatio == 100) {
1726 // Keeping the heap 100% free is hard ;-) so limit it to 99%.
1727 FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99);
1728 }
1729
1730 // If class unloading is disabled, also disable concurrent class unloading.
1731 if (!ClassUnloading) {
1732 FLAG_SET_CMDLINE(bool, CMSClassUnloadingEnabled, false);
1733 FLAG_SET_CMDLINE(bool, ClassUnloadingWithConcurrentMark, false);
1734 }
1735 #endif // INCLUDE_ALL_GCS
1736 }
1737
|