12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "jvm.h"
27 #include "classfile/classLoader.hpp"
28 #include "classfile/javaAssertions.hpp"
29 #include "classfile/moduleEntry.hpp"
30 #include "classfile/stringTable.hpp"
31 #include "classfile/symbolTable.hpp"
32 #include "gc/shared/cardTableRS.hpp"
33 #include "gc/shared/genCollectedHeap.hpp"
34 #include "gc/shared/referenceProcessor.hpp"
35 #include "gc/shared/taskqueue.hpp"
36 #include "logging/log.hpp"
37 #include "logging/logConfiguration.hpp"
38 #include "logging/logStream.hpp"
39 #include "logging/logTag.hpp"
40 #include "memory/allocation.inline.hpp"
41 #include "memory/universe.inline.hpp"
42 #include "oops/oop.inline.hpp"
43 #include "prims/jvmtiExport.hpp"
44 #include "runtime/arguments.hpp"
45 #include "runtime/arguments_ext.hpp"
46 #include "runtime/commandLineFlagConstraintList.hpp"
47 #include "runtime/commandLineFlagWriteableList.hpp"
48 #include "runtime/commandLineFlagRangeList.hpp"
49 #include "runtime/globals.hpp"
50 #include "runtime/globals_extension.hpp"
51 #include "runtime/java.hpp"
52 #include "runtime/os.hpp"
53 #include "runtime/vm_version.hpp"
54 #include "services/management.hpp"
55 #include "services/memTracker.hpp"
56 #include "utilities/align.hpp"
57 #include "utilities/defaultStream.hpp"
58 #include "utilities/macros.hpp"
59 #include "utilities/stringUtils.hpp"
60 #if INCLUDE_JVMCI
61 #include "jvmci/jvmciRuntime.hpp"
62 #endif
63 #if INCLUDE_ALL_GCS
64 #include "gc/cms/compactibleFreeListSpace.hpp"
65 #include "gc/g1/g1CollectedHeap.inline.hpp"
66 #include "gc/parallel/parallelScavengeHeap.hpp"
67 #endif // INCLUDE_ALL_GCS
68
69 // Note: This is a special bug reporting site for the JVM
70 #define DEFAULT_VENDOR_URL_BUG "http://bugreport.java.com/bugreport/crash.jsp"
71 #define DEFAULT_JAVA_LAUNCHER "generic"
72
73 char* Arguments::_jvm_flags_file = NULL;
74 char** Arguments::_jvm_flags_array = NULL;
75 int Arguments::_num_jvm_flags = 0;
76 char** Arguments::_jvm_args_array = NULL;
77 int Arguments::_num_jvm_args = 0;
78 char* Arguments::_java_command = NULL;
79 SystemProperty* Arguments::_system_properties = NULL;
80 const char* Arguments::_gc_log_filename = NULL;
81 size_t Arguments::_conservative_max_heap_alignment = 0;
82 size_t Arguments::_min_heap_size = 0;
83 Arguments::Mode Arguments::_mode = _mixed;
84 bool Arguments::_java_compiler = false;
85 bool Arguments::_xdebug_mode = false;
86 const char* Arguments::_java_vendor_url_bug = DEFAULT_VENDOR_URL_BUG;
87 const char* Arguments::_sun_java_launcher = DEFAULT_JAVA_LAUNCHER;
1483 FLAG_SET_ERGO(intx, Tier3BackEdgeThreshold, scaled_compile_threshold(Tier3BackEdgeThreshold));
1484
1485 // Tier2{Invocation,MinInvocation,Compile,Backedge}Threshold should be scaled here
1486 // once these thresholds become supported.
1487
1488 FLAG_SET_ERGO(intx, Tier2InvokeNotifyFreqLog, scaled_freq_log(Tier2InvokeNotifyFreqLog));
1489 FLAG_SET_ERGO(intx, Tier2BackedgeNotifyFreqLog, scaled_freq_log(Tier2BackedgeNotifyFreqLog));
1490
1491 FLAG_SET_ERGO(intx, Tier3InvokeNotifyFreqLog, scaled_freq_log(Tier3InvokeNotifyFreqLog));
1492 FLAG_SET_ERGO(intx, Tier3BackedgeNotifyFreqLog, scaled_freq_log(Tier3BackedgeNotifyFreqLog));
1493
1494 FLAG_SET_ERGO(intx, Tier23InlineeNotifyFreqLog, scaled_freq_log(Tier23InlineeNotifyFreqLog));
1495
1496 FLAG_SET_ERGO(intx, Tier4InvocationThreshold, scaled_compile_threshold(Tier4InvocationThreshold));
1497 FLAG_SET_ERGO(intx, Tier4MinInvocationThreshold, scaled_compile_threshold(Tier4MinInvocationThreshold));
1498 FLAG_SET_ERGO(intx, Tier4CompileThreshold, scaled_compile_threshold(Tier4CompileThreshold));
1499 FLAG_SET_ERGO(intx, Tier4BackEdgeThreshold, scaled_compile_threshold(Tier4BackEdgeThreshold));
1500 }
1501 }
1502
1503 #if INCLUDE_ALL_GCS
1504 static void disable_adaptive_size_policy(const char* collector_name) {
1505 if (UseAdaptiveSizePolicy) {
1506 if (FLAG_IS_CMDLINE(UseAdaptiveSizePolicy)) {
1507 warning("Disabling UseAdaptiveSizePolicy; it is incompatible with %s.",
1508 collector_name);
1509 }
1510 FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
1511 }
1512 }
1513
1514 void Arguments::set_parnew_gc_flags() {
1515 assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC && !UseG1GC,
1516 "control point invariant");
1517 assert(UseConcMarkSweepGC, "CMS is expected to be on here");
1518
1519 if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
1520 FLAG_SET_DEFAULT(ParallelGCThreads, Abstract_VM_Version::parallel_worker_threads());
1521 assert(ParallelGCThreads > 0, "We should always have at least one thread by default");
1522 } else if (ParallelGCThreads == 0) {
1523 jio_fprintf(defaultStream::error_stream(),
1524 "The ParNew GC can not be combined with -XX:ParallelGCThreads=0\n");
1525 vm_exit(1);
1526 }
1527
1528 // By default YoungPLABSize and OldPLABSize are set to 4096 and 1024 respectively,
1529 // these settings are default for Parallel Scavenger. For ParNew+Tenured configuration
1530 // we set them to 1024 and 1024.
1531 // See CR 6362902.
1532 if (FLAG_IS_DEFAULT(YoungPLABSize)) {
1533 FLAG_SET_DEFAULT(YoungPLABSize, (intx)1024);
1534 }
1535 if (FLAG_IS_DEFAULT(OldPLABSize)) {
1536 FLAG_SET_DEFAULT(OldPLABSize, (intx)1024);
1537 }
1538
1539 // When using compressed oops, we use local overflow stacks,
1540 // rather than using a global overflow list chained through
1541 // the klass word of the object's pre-image.
1542 if (UseCompressedOops && !ParGCUseLocalOverflow) {
1543 if (!FLAG_IS_DEFAULT(ParGCUseLocalOverflow)) {
1544 warning("Forcing +ParGCUseLocalOverflow: needed if using compressed references");
1545 }
1546 FLAG_SET_DEFAULT(ParGCUseLocalOverflow, true);
1547 }
1548 assert(ParGCUseLocalOverflow || !UseCompressedOops, "Error");
1549 }
1550
1551 // Adjust some sizes to suit CMS and/or ParNew needs; these work well on
1552 // sparc/solaris for certain applications, but would gain from
1553 // further optimization and tuning efforts, and would almost
1554 // certainly gain from analysis of platform and environment.
1555 void Arguments::set_cms_and_parnew_gc_flags() {
1556 assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC, "Error");
1557 assert(UseConcMarkSweepGC, "CMS is expected to be on here");
1558
1559 // Turn off AdaptiveSizePolicy by default for cms until it is complete.
1560 disable_adaptive_size_policy("UseConcMarkSweepGC");
1561
1562 set_parnew_gc_flags();
1563
1564 size_t max_heap = align_down(MaxHeapSize,
1565 CardTableRS::ct_max_alignment_constraint());
1566
1567 // Now make adjustments for CMS
1568 intx tenuring_default = (intx)6;
1569 size_t young_gen_per_worker = CMSYoungGenPerWorker;
1570
1571 // Preferred young gen size for "short" pauses:
1572 // upper bound depends on # of threads and NewRatio.
1573 const size_t preferred_max_new_size_unaligned =
1574 MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * ParallelGCThreads));
1575 size_t preferred_max_new_size =
1576 align_up(preferred_max_new_size_unaligned, os::vm_page_size());
1577
1578 // Unless explicitly requested otherwise, size young gen
1579 // for "short" pauses ~ CMSYoungGenPerWorker*ParallelGCThreads
1580
1581 // If either MaxNewSize or NewRatio is set on the command line,
1582 // assume the user is trying to set the size of the young gen.
1583 if (FLAG_IS_DEFAULT(MaxNewSize) && FLAG_IS_DEFAULT(NewRatio)) {
1584
1585 // Set MaxNewSize to our calculated preferred_max_new_size unless
1586 // NewSize was set on the command line and it is larger than
1587 // preferred_max_new_size.
1588 if (!FLAG_IS_DEFAULT(NewSize)) { // NewSize explicitly set at command-line
1589 FLAG_SET_ERGO(size_t, MaxNewSize, MAX2(NewSize, preferred_max_new_size));
1590 } else {
1591 FLAG_SET_ERGO(size_t, MaxNewSize, preferred_max_new_size);
1592 }
1593 log_trace(gc, heap)("CMS ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
1594
1595 // Code along this path potentially sets NewSize and OldSize
1596 log_trace(gc, heap)("CMS set min_heap_size: " SIZE_FORMAT " initial_heap_size: " SIZE_FORMAT " max_heap: " SIZE_FORMAT,
1597 min_heap_size(), InitialHeapSize, max_heap);
1598 size_t min_new = preferred_max_new_size;
1599 if (FLAG_IS_CMDLINE(NewSize)) {
1600 min_new = NewSize;
1601 }
1602 if (max_heap > min_new && min_heap_size() > min_new) {
1603 // Unless explicitly requested otherwise, make young gen
1604 // at least min_new, and at most preferred_max_new_size.
1605 if (FLAG_IS_DEFAULT(NewSize)) {
1606 FLAG_SET_ERGO(size_t, NewSize, MAX2(NewSize, min_new));
1607 FLAG_SET_ERGO(size_t, NewSize, MIN2(preferred_max_new_size, NewSize));
1608 log_trace(gc, heap)("CMS ergo set NewSize: " SIZE_FORMAT, NewSize);
1609 }
1610 // Unless explicitly requested otherwise, size old gen
1611 // so it's NewRatio x of NewSize.
1612 if (FLAG_IS_DEFAULT(OldSize)) {
1613 if (max_heap > NewSize) {
1614 FLAG_SET_ERGO(size_t, OldSize, MIN2(NewRatio*NewSize, max_heap - NewSize));
1615 log_trace(gc, heap)("CMS ergo set OldSize: " SIZE_FORMAT, OldSize);
1616 }
1617 }
1618 }
1619 }
1620 // Unless explicitly requested otherwise, definitely
1621 // promote all objects surviving "tenuring_default" scavenges.
1622 if (FLAG_IS_DEFAULT(MaxTenuringThreshold) &&
1623 FLAG_IS_DEFAULT(SurvivorRatio)) {
1624 FLAG_SET_ERGO(uintx, MaxTenuringThreshold, tenuring_default);
1625 }
1626 // If we decided above (or user explicitly requested)
1627 // `promote all' (via MaxTenuringThreshold := 0),
1628 // prefer minuscule survivor spaces so as not to waste
1629 // space for (non-existent) survivors
1630 if (FLAG_IS_DEFAULT(SurvivorRatio) && MaxTenuringThreshold == 0) {
1631 FLAG_SET_ERGO(uintx, SurvivorRatio, MAX2((uintx)1024, SurvivorRatio));
1632 }
1633
1634 // OldPLABSize is interpreted in CMS as not the size of the PLAB in words,
1635 // but rather the number of free blocks of a given size that are used when
1636 // replenishing the local per-worker free list caches.
1637 if (FLAG_IS_DEFAULT(OldPLABSize)) {
1638 if (!FLAG_IS_DEFAULT(ResizeOldPLAB) && !ResizeOldPLAB) {
1639 // OldPLAB sizing manually turned off: Use a larger default setting,
1640 // unless it was manually specified. This is because a too-low value
1641 // will slow down scavenges.
1642 FLAG_SET_ERGO(size_t, OldPLABSize, CompactibleFreeListSpaceLAB::_default_static_old_plab_size); // default value before 6631166
1643 } else {
1644 FLAG_SET_DEFAULT(OldPLABSize, CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size); // old CMSParPromoteBlocksToClaim default
1645 }
1646 }
1647
1648 // If either of the static initialization defaults have changed, note this
1649 // modification.
1650 if (!FLAG_IS_DEFAULT(OldPLABSize) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
1651 CompactibleFreeListSpaceLAB::modify_initialization(OldPLABSize, OldPLABWeight);
1652 }
1653
1654 log_trace(gc)("MarkStackSize: %uk MarkStackSizeMax: %uk", (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
1655 }
1656 #endif // INCLUDE_ALL_GCS
1657
1658 void set_object_alignment() {
1659 // Object alignment.
1660 assert(is_power_of_2(ObjectAlignmentInBytes), "ObjectAlignmentInBytes must be power of 2");
1661 MinObjAlignmentInBytes = ObjectAlignmentInBytes;
1662 assert(MinObjAlignmentInBytes >= HeapWordsPerLong * HeapWordSize, "ObjectAlignmentInBytes value is too small");
1663 MinObjAlignment = MinObjAlignmentInBytes / HeapWordSize;
1664 assert(MinObjAlignmentInBytes == MinObjAlignment * HeapWordSize, "ObjectAlignmentInBytes value is incorrect");
1665 MinObjAlignmentInBytesMask = MinObjAlignmentInBytes - 1;
1666
1667 LogMinObjAlignmentInBytes = exact_log2(ObjectAlignmentInBytes);
1668 LogMinObjAlignment = LogMinObjAlignmentInBytes - LogHeapWordSize;
1669
1670 // Oop encoding heap max
1671 OopEncodingHeapMax = (uint64_t(max_juint) + 1) << LogMinObjAlignmentInBytes;
1672
1673 if (SurvivorAlignmentInBytes == 0) {
1674 SurvivorAlignmentInBytes = ObjectAlignmentInBytes;
1675 }
1676
1677 #if INCLUDE_ALL_GCS
1678 // Set CMS global values
1679 CompactibleFreeListSpace::set_cms_values();
1680 #endif // INCLUDE_ALL_GCS
1681 }
1682
1683 size_t Arguments::max_heap_for_compressed_oops() {
1684 // Avoid sign flip.
1685 assert(OopEncodingHeapMax > (uint64_t)os::vm_page_size(), "Unusual page size");
1686 // We need to fit both the NULL page and the heap into the memory budget, while
1687 // keeping alignment constraints of the heap. To guarantee the latter, as the
1688 // NULL page is located before the heap, we pad the NULL page to the conservative
1689 // maximum alignment that the GC may ever impose upon the heap.
1690 size_t displacement_due_to_null_page = align_up((size_t)os::vm_page_size(),
1691 _conservative_max_heap_alignment);
1692
1693 LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page);
1694 NOT_LP64(ShouldNotReachHere(); return 0);
1695 }
1696
1697 void Arguments::set_use_compressed_oops() {
1698 #ifndef ZERO
1699 #ifdef _LP64
1700 // MaxHeapSize is not set up properly at this point, but
1736 if (FLAG_IS_DEFAULT(UseCompressedClassPointers)) {
1737 FLAG_SET_ERGO(bool, UseCompressedClassPointers, true);
1738 }
1739 // Check the CompressedClassSpaceSize to make sure we use compressed klass ptrs.
1740 if (UseCompressedClassPointers) {
1741 if (CompressedClassSpaceSize > KlassEncodingMetaspaceMax) {
1742 warning("CompressedClassSpaceSize is too large for UseCompressedClassPointers");
1743 FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
1744 }
1745 }
1746 }
1747 #endif // _LP64
1748 #endif // !ZERO
1749 }
1750
1751 void Arguments::set_conservative_max_heap_alignment() {
1752 // The conservative maximum required alignment for the heap is the maximum of
1753 // the alignments imposed by several sources: any requirements from the heap
1754 // itself, the collector policy and the maximum page size we may run the VM
1755 // with.
1756 size_t heap_alignment = GenCollectedHeap::conservative_max_heap_alignment();
1757 #if INCLUDE_ALL_GCS
1758 if (UseParallelGC) {
1759 heap_alignment = ParallelScavengeHeap::conservative_max_heap_alignment();
1760 } else if (UseG1GC) {
1761 heap_alignment = G1CollectedHeap::conservative_max_heap_alignment();
1762 }
1763 #endif // INCLUDE_ALL_GCS
1764 _conservative_max_heap_alignment = MAX4(heap_alignment,
1765 (size_t)os::vm_allocation_granularity(),
1766 os::max_page_size(),
1767 CollectorPolicy::compute_heap_alignment());
1768 }
1769
1770 bool Arguments::gc_selected() {
1771 #if INCLUDE_ALL_GCS
1772 return UseSerialGC || UseParallelGC || UseParallelOldGC || UseConcMarkSweepGC || UseG1GC;
1773 #else
1774 return UseSerialGC;
1775 #endif // INCLUDE_ALL_GCS
1776 }
1777
1778 #ifdef TIERED
1779 bool Arguments::compilation_mode_selected() {
1780 return !FLAG_IS_DEFAULT(TieredCompilation) || !FLAG_IS_DEFAULT(TieredStopAtLevel) ||
1781 !FLAG_IS_DEFAULT(UseAOT) JVMCI_ONLY(|| !FLAG_IS_DEFAULT(EnableJVMCI) || !FLAG_IS_DEFAULT(UseJVMCICompiler));
1782
1783 }
1784
1785 void Arguments::select_compilation_mode_ergonomically() {
1786 #if defined(_WINDOWS) && !defined(_LP64)
1787 if (FLAG_IS_DEFAULT(NeverActAsServerClassMachine)) {
1788 FLAG_SET_ERGO(bool, NeverActAsServerClassMachine, true);
1789 }
1790 #endif
1791 if (NeverActAsServerClassMachine) {
1792 set_client_compilation_mode();
1793 }
1794 }
1795 #endif //TIERED
1796
1797 void Arguments::select_gc_ergonomically() {
1798 #if INCLUDE_ALL_GCS
1799 if (os::is_server_class_machine()) {
1800 FLAG_SET_ERGO_IF_DEFAULT(bool, UseG1GC, true);
1801 } else {
1802 FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
1803 }
1804 #else
1805 UNSUPPORTED_OPTION(UseG1GC);
1806 UNSUPPORTED_OPTION(UseParallelGC);
1807 UNSUPPORTED_OPTION(UseParallelOldGC);
1808 UNSUPPORTED_OPTION(UseConcMarkSweepGC);
1809 FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
1810 #endif // INCLUDE_ALL_GCS
1811 }
1812
1813 void Arguments::select_gc() {
1814 if (!gc_selected()) {
1815 select_gc_ergonomically();
1816 if (!gc_selected()) {
1817 vm_exit_during_initialization("Garbage collector not selected (default collector explicitly disabled)", NULL);
1818 }
1819 }
1820 }
1821
1822 #if INCLUDE_JVMCI
1823 void Arguments::set_jvmci_specific_flags() {
1824 if (UseJVMCICompiler) {
1825 if (FLAG_IS_DEFAULT(TypeProfileWidth)) {
1826 FLAG_SET_DEFAULT(TypeProfileWidth, 8);
1827 }
1828 if (FLAG_IS_DEFAULT(OnStackReplacePercentage)) {
1829 FLAG_SET_DEFAULT(OnStackReplacePercentage, 933);
1830 }
1831 if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
1832 FLAG_SET_DEFAULT(ReservedCodeCacheSize, 64*M);
1833 }
1834 if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) {
1835 FLAG_SET_DEFAULT(InitialCodeCacheSize, 16*M);
1836 }
1837 if (FLAG_IS_DEFAULT(MetaspaceSize)) {
1838 FLAG_SET_DEFAULT(MetaspaceSize, 12*M);
1839 }
1840 if (FLAG_IS_DEFAULT(NewSizeThreadIncrease)) {
1841 FLAG_SET_DEFAULT(NewSizeThreadIncrease, 4*K);
1842 }
1843 if (TieredStopAtLevel != CompLevel_full_optimization) {
1844 // Currently JVMCI compiler can only work at the full optimization level
1845 warning("forcing TieredStopAtLevel to full optimization because JVMCI is enabled");
1846 TieredStopAtLevel = CompLevel_full_optimization;
1847 }
1848 if (FLAG_IS_DEFAULT(TypeProfileLevel)) {
1849 FLAG_SET_DEFAULT(TypeProfileLevel, 0);
1850 }
1851 }
1852 }
1853 #endif
1854
1855 void Arguments::set_ergonomics_flags() {
1856 #ifdef TIERED
1857 if (!compilation_mode_selected()) {
1858 select_compilation_mode_ergonomically();
1859 }
1860 #endif
1861 select_gc();
1862
1863 #if defined(COMPILER2) || INCLUDE_JVMCI
1864 // Shared spaces work fine with other GCs but causes bytecode rewriting
1865 // to be disabled, which hurts interpreter performance and decreases
1866 // server performance. When -server is specified, keep the default off
1867 // unless it is asked for. Future work: either add bytecode rewriting
1868 // at link time, or rewrite bytecodes in non-shared methods.
1869 if (is_server_compilation_mode_vm() && !DumpSharedSpaces && !RequireSharedSpaces &&
1870 (FLAG_IS_DEFAULT(UseSharedSpaces) || !UseSharedSpaces)) {
1871 no_shared_spaces("COMPILER2 default: -Xshare:auto | off, have to manually setup to on.");
1872 }
1873 #endif
1874
1875 set_conservative_max_heap_alignment();
1876
1877 #ifndef ZERO
1878 #ifdef _LP64
1879 set_use_compressed_oops();
1880
1881 // set_use_compressed_klass_ptrs() must be called after calling
1882 // set_use_compressed_oops().
1883 set_use_compressed_klass_ptrs();
1884
1885 // Also checks that certain machines are slower with compressed oops
1886 // in vm_version initialization code.
1887 #endif // _LP64
1888 #endif // !ZERO
1889
1890 }
1891
1892 void Arguments::set_parallel_gc_flags() {
1893 assert(UseParallelGC || UseParallelOldGC, "Error");
1894 // Enable ParallelOld unless it was explicitly disabled (cmd line or rc file).
1895 if (FLAG_IS_DEFAULT(UseParallelOldGC)) {
1896 FLAG_SET_DEFAULT(UseParallelOldGC, true);
1897 }
1898 FLAG_SET_DEFAULT(UseParallelGC, true);
1899
1900 // If no heap maximum was requested explicitly, use some reasonable fraction
1901 // of the physical memory, up to a maximum of 1GB.
1902 FLAG_SET_DEFAULT(ParallelGCThreads,
1903 Abstract_VM_Version::parallel_worker_threads());
1904 if (ParallelGCThreads == 0) {
1905 jio_fprintf(defaultStream::error_stream(),
1906 "The Parallel GC can not be combined with -XX:ParallelGCThreads=0\n");
1907 vm_exit(1);
1908 }
1909
1910 if (UseAdaptiveSizePolicy) {
1911 // We don't want to limit adaptive heap sizing's freedom to adjust the heap
1912 // unless the user actually sets these flags.
1913 if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
1914 FLAG_SET_DEFAULT(MinHeapFreeRatio, 0);
1915 }
1916 if (FLAG_IS_DEFAULT(MaxHeapFreeRatio)) {
1917 FLAG_SET_DEFAULT(MaxHeapFreeRatio, 100);
1918 }
1919 }
1920
1921 // If InitialSurvivorRatio or MinSurvivorRatio were not specified, but the
1922 // SurvivorRatio has been set, reset their default values to SurvivorRatio +
1923 // 2. By doing this we make SurvivorRatio also work for Parallel Scavenger.
1924 // See CR 6362902 for details.
1925 if (!FLAG_IS_DEFAULT(SurvivorRatio)) {
1926 if (FLAG_IS_DEFAULT(InitialSurvivorRatio)) {
1927 FLAG_SET_DEFAULT(InitialSurvivorRatio, SurvivorRatio + 2);
1928 }
1929 if (FLAG_IS_DEFAULT(MinSurvivorRatio)) {
1930 FLAG_SET_DEFAULT(MinSurvivorRatio, SurvivorRatio + 2);
1931 }
1932 }
1933
1934 if (UseParallelOldGC) {
1935 // Par compact uses lower default values since they are treated as
1936 // minimums. These are different defaults because of the different
1937 // interpretation and are not ergonomically set.
1938 if (FLAG_IS_DEFAULT(MarkSweepDeadRatio)) {
1939 FLAG_SET_DEFAULT(MarkSweepDeadRatio, 1);
1940 }
1941 }
1942 }
1943
1944 void Arguments::set_g1_gc_flags() {
1945 assert(UseG1GC, "Error");
1946 #if defined(COMPILER1) || INCLUDE_JVMCI
1947 FastTLABRefill = false;
1948 #endif
1949 FLAG_SET_DEFAULT(ParallelGCThreads, Abstract_VM_Version::parallel_worker_threads());
1950 if (ParallelGCThreads == 0) {
1951 assert(!FLAG_IS_DEFAULT(ParallelGCThreads), "The default value for ParallelGCThreads should not be 0.");
1952 vm_exit_during_initialization("The flag -XX:+UseG1GC can not be combined with -XX:ParallelGCThreads=0", NULL);
1953 }
1954
1955 #if INCLUDE_ALL_GCS
1956 if (FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
1957 FLAG_SET_ERGO(uint, G1ConcRefinementThreads, ParallelGCThreads);
1958 }
1959 #endif
1960
1961 // MarkStackSize will be set (if it hasn't been set by the user)
1962 // when concurrent marking is initialized.
1963 // Its value will be based upon the number of parallel marking threads.
1964 // But we do set the maximum mark stack size here.
1965 if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
1966 FLAG_SET_DEFAULT(MarkStackSizeMax, 128 * TASKQUEUE_SIZE);
1967 }
1968
1969 if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) {
1970 // In G1, we want the default GC overhead goal to be higher than
1971 // it is for PS, or the heap might be expanded too aggressively.
1972 // We set it here to ~8%.
1973 FLAG_SET_DEFAULT(GCTimeRatio, 12);
1974 }
1975
1976 // Below, we might need to calculate the pause time interval based on
1977 // the pause target. When we do so we are going to give G1 maximum
1978 // flexibility and allow it to do pauses when it needs to. So, we'll
1979 // arrange that the pause interval to be pause time target + 1 to
1980 // ensure that a) the pause time target is maximized with respect to
1981 // the pause interval and b) we maintain the invariant that pause
1982 // time target < pause interval. If the user does not want this
1983 // maximum flexibility, they will have to set the pause interval
1984 // explicitly.
1985
1986 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
1987 // The default pause time target in G1 is 200ms
1988 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
1989 }
1990
1991 // Then, if the interval parameter was not set, set it according to
1992 // the pause time target (this will also deal with the case when the
1993 // pause time target is the default value).
1994 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
1995 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
1996 }
1997
1998 log_trace(gc)("MarkStackSize: %uk MarkStackSizeMax: %uk", (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
1999 }
2000
2001 void Arguments::set_gc_specific_flags() {
2002 #if INCLUDE_ALL_GCS
2003 // Set per-collector flags
2004 if (UseParallelGC || UseParallelOldGC) {
2005 set_parallel_gc_flags();
2006 } else if (UseConcMarkSweepGC) {
2007 set_cms_and_parnew_gc_flags();
2008 } else if (UseG1GC) {
2009 set_g1_gc_flags();
2010 }
2011 if (AssumeMP && !UseSerialGC) {
2012 if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
2013 warning("If the number of processors is expected to increase from one, then"
2014 " you should configure the number of parallel GC threads appropriately"
2015 " using -XX:ParallelGCThreads=N");
2016 }
2017 }
2018 if (MinHeapFreeRatio == 100) {
2019 // Keeping the heap 100% free is hard ;-) so limit it to 99%.
2020 FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99);
2021 }
2022
2023 // If class unloading is disabled, also disable concurrent class unloading.
2024 if (!ClassUnloading) {
2025 FLAG_SET_CMDLINE(bool, CMSClassUnloadingEnabled, false);
2026 FLAG_SET_CMDLINE(bool, ClassUnloadingWithConcurrentMark, false);
2027 }
2028 #endif // INCLUDE_ALL_GCS
2029 }
2030
2031 julong Arguments::limit_by_allocatable_memory(julong limit) {
2032 julong max_allocatable;
2033 julong result = limit;
2034 if (os::has_allocatable_memory_limit(&max_allocatable)) {
2035 result = MIN2(result, max_allocatable / MaxVirtMemFraction);
2036 }
2037 return result;
2038 }
2039
2040 // Use static initialization to get the default before parsing
2041 static const size_t DefaultHeapBaseMinAddress = HeapBaseMinAddress;
2042
2043 void Arguments::set_heap_size() {
2044 julong phys_mem =
2045 FLAG_IS_DEFAULT(MaxRAM) ? MIN2(os::physical_memory(), (julong)MaxRAM)
2046 : (julong)MaxRAM;
2047
2048 // Experimental support for CGroup memory limits
4453 #if !INCLUDE_CDS
4454 if (DumpSharedSpaces || RequireSharedSpaces) {
4455 jio_fprintf(defaultStream::error_stream(),
4456 "Shared spaces are not supported in this VM\n");
4457 return JNI_ERR;
4458 }
4459 if ((UseSharedSpaces && FLAG_IS_CMDLINE(UseSharedSpaces)) ||
4460 log_is_enabled(Info, cds)) {
4461 warning("Shared spaces are not supported in this VM");
4462 FLAG_SET_DEFAULT(UseSharedSpaces, false);
4463 LogConfiguration::configure_stdout(LogLevel::Off, true, LOG_TAGS(cds));
4464 }
4465 no_shared_spaces("CDS Disabled");
4466 #endif // INCLUDE_CDS
4467
4468 return JNI_OK;
4469 }
4470
4471 jint Arguments::apply_ergo() {
4472 // Set flags based on ergonomics.
4473 set_ergonomics_flags();
4474
4475 #if INCLUDE_JVMCI
4476 set_jvmci_specific_flags();
4477 #endif
4478
4479 set_shared_spaces_flags();
4480
4481 // Check the GC selections again.
4482 if (!check_gc_consistency()) {
4483 return JNI_EINVAL;
4484 }
4485
4486 if (TieredCompilation) {
4487 set_tiered_flags();
4488 } else {
4489 int max_compilation_policy_choice = 1;
4490 #ifdef COMPILER2
4491 if (is_server_compilation_mode_vm()) {
4492 max_compilation_policy_choice = 2;
4493 }
|
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "jvm.h"
27 #include "classfile/classLoader.hpp"
28 #include "classfile/javaAssertions.hpp"
29 #include "classfile/moduleEntry.hpp"
30 #include "classfile/stringTable.hpp"
31 #include "classfile/symbolTable.hpp"
32 #include "gc/shared/gcArguments.hpp"
33 #include "gc/shared/genCollectedHeap.hpp"
34 #include "gc/shared/referenceProcessor.hpp"
35 #include "gc/shared/taskqueue.hpp"
36 #include "logging/log.hpp"
37 #include "logging/logConfiguration.hpp"
38 #include "logging/logStream.hpp"
39 #include "logging/logTag.hpp"
40 #include "memory/allocation.inline.hpp"
41 #include "memory/universe.inline.hpp"
42 #include "oops/oop.inline.hpp"
43 #include "prims/jvmtiExport.hpp"
44 #include "runtime/arguments.hpp"
45 #include "runtime/arguments_ext.hpp"
46 #include "runtime/commandLineFlagConstraintList.hpp"
47 #include "runtime/commandLineFlagWriteableList.hpp"
48 #include "runtime/commandLineFlagRangeList.hpp"
49 #include "runtime/globals.hpp"
50 #include "runtime/globals_extension.hpp"
51 #include "runtime/java.hpp"
52 #include "runtime/os.hpp"
53 #include "runtime/vm_version.hpp"
54 #include "services/management.hpp"
55 #include "services/memTracker.hpp"
56 #include "utilities/align.hpp"
57 #include "utilities/defaultStream.hpp"
58 #include "utilities/macros.hpp"
59 #include "utilities/stringUtils.hpp"
60 #if INCLUDE_JVMCI
61 #include "jvmci/jvmciRuntime.hpp"
62 #endif
63
64 // Note: This is a special bug reporting site for the JVM
65 #define DEFAULT_VENDOR_URL_BUG "http://bugreport.java.com/bugreport/crash.jsp"
66 #define DEFAULT_JAVA_LAUNCHER "generic"
67
68 char* Arguments::_jvm_flags_file = NULL;
69 char** Arguments::_jvm_flags_array = NULL;
70 int Arguments::_num_jvm_flags = 0;
71 char** Arguments::_jvm_args_array = NULL;
72 int Arguments::_num_jvm_args = 0;
73 char* Arguments::_java_command = NULL;
74 SystemProperty* Arguments::_system_properties = NULL;
75 const char* Arguments::_gc_log_filename = NULL;
76 size_t Arguments::_conservative_max_heap_alignment = 0;
77 size_t Arguments::_min_heap_size = 0;
78 Arguments::Mode Arguments::_mode = _mixed;
79 bool Arguments::_java_compiler = false;
80 bool Arguments::_xdebug_mode = false;
81 const char* Arguments::_java_vendor_url_bug = DEFAULT_VENDOR_URL_BUG;
82 const char* Arguments::_sun_java_launcher = DEFAULT_JAVA_LAUNCHER;
1478 FLAG_SET_ERGO(intx, Tier3BackEdgeThreshold, scaled_compile_threshold(Tier3BackEdgeThreshold));
1479
1480 // Tier2{Invocation,MinInvocation,Compile,Backedge}Threshold should be scaled here
1481 // once these thresholds become supported.
1482
1483 FLAG_SET_ERGO(intx, Tier2InvokeNotifyFreqLog, scaled_freq_log(Tier2InvokeNotifyFreqLog));
1484 FLAG_SET_ERGO(intx, Tier2BackedgeNotifyFreqLog, scaled_freq_log(Tier2BackedgeNotifyFreqLog));
1485
1486 FLAG_SET_ERGO(intx, Tier3InvokeNotifyFreqLog, scaled_freq_log(Tier3InvokeNotifyFreqLog));
1487 FLAG_SET_ERGO(intx, Tier3BackedgeNotifyFreqLog, scaled_freq_log(Tier3BackedgeNotifyFreqLog));
1488
1489 FLAG_SET_ERGO(intx, Tier23InlineeNotifyFreqLog, scaled_freq_log(Tier23InlineeNotifyFreqLog));
1490
1491 FLAG_SET_ERGO(intx, Tier4InvocationThreshold, scaled_compile_threshold(Tier4InvocationThreshold));
1492 FLAG_SET_ERGO(intx, Tier4MinInvocationThreshold, scaled_compile_threshold(Tier4MinInvocationThreshold));
1493 FLAG_SET_ERGO(intx, Tier4CompileThreshold, scaled_compile_threshold(Tier4CompileThreshold));
1494 FLAG_SET_ERGO(intx, Tier4BackEdgeThreshold, scaled_compile_threshold(Tier4BackEdgeThreshold));
1495 }
1496 }
1497
1498 void set_object_alignment() {
1499 // Object alignment.
1500 assert(is_power_of_2(ObjectAlignmentInBytes), "ObjectAlignmentInBytes must be power of 2");
1501 MinObjAlignmentInBytes = ObjectAlignmentInBytes;
1502 assert(MinObjAlignmentInBytes >= HeapWordsPerLong * HeapWordSize, "ObjectAlignmentInBytes value is too small");
1503 MinObjAlignment = MinObjAlignmentInBytes / HeapWordSize;
1504 assert(MinObjAlignmentInBytes == MinObjAlignment * HeapWordSize, "ObjectAlignmentInBytes value is incorrect");
1505 MinObjAlignmentInBytesMask = MinObjAlignmentInBytes - 1;
1506
1507 LogMinObjAlignmentInBytes = exact_log2(ObjectAlignmentInBytes);
1508 LogMinObjAlignment = LogMinObjAlignmentInBytes - LogHeapWordSize;
1509
1510 // Oop encoding heap max
1511 OopEncodingHeapMax = (uint64_t(max_juint) + 1) << LogMinObjAlignmentInBytes;
1512
1513 if (SurvivorAlignmentInBytes == 0) {
1514 SurvivorAlignmentInBytes = ObjectAlignmentInBytes;
1515 }
1516 }
1517
1518 size_t Arguments::max_heap_for_compressed_oops() {
1519 // Avoid sign flip.
1520 assert(OopEncodingHeapMax > (uint64_t)os::vm_page_size(), "Unusual page size");
1521 // We need to fit both the NULL page and the heap into the memory budget, while
1522 // keeping alignment constraints of the heap. To guarantee the latter, as the
1523 // NULL page is located before the heap, we pad the NULL page to the conservative
1524 // maximum alignment that the GC may ever impose upon the heap.
1525 size_t displacement_due_to_null_page = align_up((size_t)os::vm_page_size(),
1526 _conservative_max_heap_alignment);
1527
1528 LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page);
1529 NOT_LP64(ShouldNotReachHere(); return 0);
1530 }
1531
1532 void Arguments::set_use_compressed_oops() {
1533 #ifndef ZERO
1534 #ifdef _LP64
1535 // MaxHeapSize is not set up properly at this point, but
1571 if (FLAG_IS_DEFAULT(UseCompressedClassPointers)) {
1572 FLAG_SET_ERGO(bool, UseCompressedClassPointers, true);
1573 }
1574 // Check the CompressedClassSpaceSize to make sure we use compressed klass ptrs.
1575 if (UseCompressedClassPointers) {
1576 if (CompressedClassSpaceSize > KlassEncodingMetaspaceMax) {
1577 warning("CompressedClassSpaceSize is too large for UseCompressedClassPointers");
1578 FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
1579 }
1580 }
1581 }
1582 #endif // _LP64
1583 #endif // !ZERO
1584 }
1585
1586 void Arguments::set_conservative_max_heap_alignment() {
1587 // The conservative maximum required alignment for the heap is the maximum of
1588 // the alignments imposed by several sources: any requirements from the heap
1589 // itself, the collector policy and the maximum page size we may run the VM
1590 // with.
1591 size_t heap_alignment = GCArguments::arguments()->conservative_max_heap_alignment();
1592 _conservative_max_heap_alignment = MAX4(heap_alignment,
1593 (size_t)os::vm_allocation_granularity(),
1594 os::max_page_size(),
1595 CollectorPolicy::compute_heap_alignment());
1596 }
1597
1598 #ifdef TIERED
1599 bool Arguments::compilation_mode_selected() {
1600 return !FLAG_IS_DEFAULT(TieredCompilation) || !FLAG_IS_DEFAULT(TieredStopAtLevel) ||
1601 !FLAG_IS_DEFAULT(UseAOT) JVMCI_ONLY(|| !FLAG_IS_DEFAULT(EnableJVMCI) || !FLAG_IS_DEFAULT(UseJVMCICompiler));
1602
1603 }
1604
1605 void Arguments::select_compilation_mode_ergonomically() {
1606 #if defined(_WINDOWS) && !defined(_LP64)
1607 if (FLAG_IS_DEFAULT(NeverActAsServerClassMachine)) {
1608 FLAG_SET_ERGO(bool, NeverActAsServerClassMachine, true);
1609 }
1610 #endif
1611 if (NeverActAsServerClassMachine) {
1612 set_client_compilation_mode();
1613 }
1614 }
1615 #endif //TIERED
1616
1617 #if INCLUDE_JVMCI
1618 void Arguments::set_jvmci_specific_flags() {
1619 if (UseJVMCICompiler) {
1620 if (FLAG_IS_DEFAULT(TypeProfileWidth)) {
1621 FLAG_SET_DEFAULT(TypeProfileWidth, 8);
1622 }
1623 if (FLAG_IS_DEFAULT(OnStackReplacePercentage)) {
1624 FLAG_SET_DEFAULT(OnStackReplacePercentage, 933);
1625 }
1626 if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
1627 FLAG_SET_DEFAULT(ReservedCodeCacheSize, 64*M);
1628 }
1629 if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) {
1630 FLAG_SET_DEFAULT(InitialCodeCacheSize, 16*M);
1631 }
1632 if (FLAG_IS_DEFAULT(MetaspaceSize)) {
1633 FLAG_SET_DEFAULT(MetaspaceSize, 12*M);
1634 }
1635 if (FLAG_IS_DEFAULT(NewSizeThreadIncrease)) {
1636 FLAG_SET_DEFAULT(NewSizeThreadIncrease, 4*K);
1637 }
1638 if (TieredStopAtLevel != CompLevel_full_optimization) {
1639 // Currently JVMCI compiler can only work at the full optimization level
1640 warning("forcing TieredStopAtLevel to full optimization because JVMCI is enabled");
1641 TieredStopAtLevel = CompLevel_full_optimization;
1642 }
1643 if (FLAG_IS_DEFAULT(TypeProfileLevel)) {
1644 FLAG_SET_DEFAULT(TypeProfileLevel, 0);
1645 }
1646 }
1647 }
1648 #endif
1649
1650 jint Arguments::set_ergonomics_flags() {
1651 #ifdef TIERED
1652 if (!compilation_mode_selected()) {
1653 select_compilation_mode_ergonomically();
1654 }
1655 #endif
1656
1657 jint gc_result = GCArguments::initialize();
1658 if (gc_result != JNI_OK) {
1659 return gc_result;
1660 }
1661
1662 #if defined(COMPILER2) || INCLUDE_JVMCI
1663 // Shared spaces work fine with other GCs but causes bytecode rewriting
1664 // to be disabled, which hurts interpreter performance and decreases
1665 // server performance. When -server is specified, keep the default off
1666 // unless it is asked for. Future work: either add bytecode rewriting
1667 // at link time, or rewrite bytecodes in non-shared methods.
1668 if (is_server_compilation_mode_vm() && !DumpSharedSpaces && !RequireSharedSpaces &&
1669 (FLAG_IS_DEFAULT(UseSharedSpaces) || !UseSharedSpaces)) {
1670 no_shared_spaces("COMPILER2 default: -Xshare:auto | off, have to manually setup to on.");
1671 }
1672 #endif
1673
1674 set_conservative_max_heap_alignment();
1675
1676 #ifndef ZERO
1677 #ifdef _LP64
1678 set_use_compressed_oops();
1679
1680 // set_use_compressed_klass_ptrs() must be called after calling
1681 // set_use_compressed_oops().
1682 set_use_compressed_klass_ptrs();
1683
1684 // Also checks that certain machines are slower with compressed oops
1685 // in vm_version initialization code.
1686 #endif // _LP64
1687 #endif // !ZERO
1688
1689 return JNI_OK;
1690 }
1691
1692 void Arguments::set_gc_specific_flags() {
1693 // Set GC flags
1694 GCArguments::arguments()->initialize_flags();
1695 }
1696
1697 julong Arguments::limit_by_allocatable_memory(julong limit) {
1698 julong max_allocatable;
1699 julong result = limit;
1700 if (os::has_allocatable_memory_limit(&max_allocatable)) {
1701 result = MIN2(result, max_allocatable / MaxVirtMemFraction);
1702 }
1703 return result;
1704 }
1705
1706 // Use static initialization to get the default before parsing
1707 static const size_t DefaultHeapBaseMinAddress = HeapBaseMinAddress;
1708
1709 void Arguments::set_heap_size() {
1710 julong phys_mem =
1711 FLAG_IS_DEFAULT(MaxRAM) ? MIN2(os::physical_memory(), (julong)MaxRAM)
1712 : (julong)MaxRAM;
1713
1714 // Experimental support for CGroup memory limits
4119 #if !INCLUDE_CDS
4120 if (DumpSharedSpaces || RequireSharedSpaces) {
4121 jio_fprintf(defaultStream::error_stream(),
4122 "Shared spaces are not supported in this VM\n");
4123 return JNI_ERR;
4124 }
4125 if ((UseSharedSpaces && FLAG_IS_CMDLINE(UseSharedSpaces)) ||
4126 log_is_enabled(Info, cds)) {
4127 warning("Shared spaces are not supported in this VM");
4128 FLAG_SET_DEFAULT(UseSharedSpaces, false);
4129 LogConfiguration::configure_stdout(LogLevel::Off, true, LOG_TAGS(cds));
4130 }
4131 no_shared_spaces("CDS Disabled");
4132 #endif // INCLUDE_CDS
4133
4134 return JNI_OK;
4135 }
4136
4137 jint Arguments::apply_ergo() {
4138 // Set flags based on ergonomics.
4139 jint result = set_ergonomics_flags();
4140 if (result != JNI_OK) return result;
4141
4142 #if INCLUDE_JVMCI
4143 set_jvmci_specific_flags();
4144 #endif
4145
4146 set_shared_spaces_flags();
4147
4148 // Check the GC selections again.
4149 if (!check_gc_consistency()) {
4150 return JNI_EINVAL;
4151 }
4152
4153 if (TieredCompilation) {
4154 set_tiered_flags();
4155 } else {
4156 int max_compilation_policy_choice = 1;
4157 #ifdef COMPILER2
4158 if (is_server_compilation_mode_vm()) {
4159 max_compilation_policy_choice = 2;
4160 }
|