< prev index next >

src/share/vm/runtime/arguments.cpp

Print this page
rev 12310 : [mq]: gcinterface.patch


  41 #include "oops/oop.inline.hpp"
  42 #include "prims/jvmtiExport.hpp"
  43 #include "runtime/arguments.hpp"
  44 #include "runtime/arguments_ext.hpp"
  45 #include "runtime/commandLineFlagConstraintList.hpp"
  46 #include "runtime/commandLineFlagWriteableList.hpp"
  47 #include "runtime/commandLineFlagRangeList.hpp"
  48 #include "runtime/globals.hpp"
  49 #include "runtime/globals_extension.hpp"
  50 #include "runtime/java.hpp"
  51 #include "runtime/os.hpp"
  52 #include "runtime/vm_version.hpp"
  53 #include "services/management.hpp"
  54 #include "services/memTracker.hpp"
  55 #include "utilities/defaultStream.hpp"
  56 #include "utilities/macros.hpp"
  57 #include "utilities/stringUtils.hpp"
  58 #if INCLUDE_JVMCI
  59 #include "jvmci/jvmciRuntime.hpp"
  60 #endif
  61 #if INCLUDE_ALL_GCS
  62 #include "gc/cms/compactibleFreeListSpace.hpp"
  63 #include "gc/g1/g1CollectedHeap.inline.hpp"
  64 #include "gc/parallel/parallelScavengeHeap.hpp"
  65 #endif // INCLUDE_ALL_GCS
  66 
  67 // Note: This is a special bug reporting site for the JVM
  68 #define DEFAULT_VENDOR_URL_BUG "http://bugreport.java.com/bugreport/crash.jsp"
  69 #define DEFAULT_JAVA_LAUNCHER  "generic"
  70 
  71 char*  Arguments::_jvm_flags_file               = NULL;
  72 char** Arguments::_jvm_flags_array              = NULL;
  73 int    Arguments::_num_jvm_flags                = 0;
  74 char** Arguments::_jvm_args_array               = NULL;
  75 int    Arguments::_num_jvm_args                 = 0;
  76 char*  Arguments::_java_command                 = NULL;
  77 SystemProperty* Arguments::_system_properties   = NULL;
  78 const char*  Arguments::_gc_log_filename        = NULL;
  79 bool   Arguments::_has_profile                  = false;
  80 size_t Arguments::_conservative_max_heap_alignment = 0;
  81 size_t Arguments::_min_heap_size                = 0;
  82 Arguments::Mode Arguments::_mode                = _mixed;
  83 bool   Arguments::_java_compiler                = false;
  84 bool   Arguments::_xdebug_mode                  = false;
  85 const char*  Arguments::_java_vendor_url_bug    = DEFAULT_VENDOR_URL_BUG;


1508     FLAG_SET_ERGO(intx, Tier3BackEdgeThreshold, scaled_compile_threshold(Tier3BackEdgeThreshold));
1509 
1510     // Tier2{Invocation,MinInvocation,Compile,Backedge}Threshold should be scaled here
1511     // once these thresholds become supported.
1512 
1513     FLAG_SET_ERGO(intx, Tier2InvokeNotifyFreqLog, scaled_freq_log(Tier2InvokeNotifyFreqLog));
1514     FLAG_SET_ERGO(intx, Tier2BackedgeNotifyFreqLog, scaled_freq_log(Tier2BackedgeNotifyFreqLog));
1515 
1516     FLAG_SET_ERGO(intx, Tier3InvokeNotifyFreqLog, scaled_freq_log(Tier3InvokeNotifyFreqLog));
1517     FLAG_SET_ERGO(intx, Tier3BackedgeNotifyFreqLog, scaled_freq_log(Tier3BackedgeNotifyFreqLog));
1518 
1519     FLAG_SET_ERGO(intx, Tier23InlineeNotifyFreqLog, scaled_freq_log(Tier23InlineeNotifyFreqLog));
1520 
1521     FLAG_SET_ERGO(intx, Tier4InvocationThreshold, scaled_compile_threshold(Tier4InvocationThreshold));
1522     FLAG_SET_ERGO(intx, Tier4MinInvocationThreshold, scaled_compile_threshold(Tier4MinInvocationThreshold));
1523     FLAG_SET_ERGO(intx, Tier4CompileThreshold, scaled_compile_threshold(Tier4CompileThreshold));
1524     FLAG_SET_ERGO(intx, Tier4BackEdgeThreshold, scaled_compile_threshold(Tier4BackEdgeThreshold));
1525   }
1526 }
1527 
1528 #if INCLUDE_ALL_GCS
1529 static void disable_adaptive_size_policy(const char* collector_name) {
1530   if (UseAdaptiveSizePolicy) {
1531     if (FLAG_IS_CMDLINE(UseAdaptiveSizePolicy)) {
1532       warning("Disabling UseAdaptiveSizePolicy; it is incompatible with %s.",
1533               collector_name);
1534     }
1535     FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
1536   }
1537 }
1538 
1539 void Arguments::set_parnew_gc_flags() {
1540   assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC && !UseG1GC,
1541          "control point invariant");
1542   assert(UseConcMarkSweepGC, "CMS is expected to be on here");
1543   assert(UseParNewGC, "ParNew should always be used with CMS");
1544 
1545   if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
1546     FLAG_SET_DEFAULT(ParallelGCThreads, Abstract_VM_Version::parallel_worker_threads());
1547     assert(ParallelGCThreads > 0, "We should always have at least one thread by default");
1548   } else if (ParallelGCThreads == 0) {
1549     jio_fprintf(defaultStream::error_stream(),
1550         "The ParNew GC can not be combined with -XX:ParallelGCThreads=0\n");
1551     vm_exit(1);
1552   }
1553 
1554   // By default YoungPLABSize and OldPLABSize are set to 4096 and 1024 respectively,
1555   // these settings are default for Parallel Scavenger. For ParNew+Tenured configuration
1556   // we set them to 1024 and 1024.
1557   // See CR 6362902.
1558   if (FLAG_IS_DEFAULT(YoungPLABSize)) {
1559     FLAG_SET_DEFAULT(YoungPLABSize, (intx)1024);
1560   }
1561   if (FLAG_IS_DEFAULT(OldPLABSize)) {
1562     FLAG_SET_DEFAULT(OldPLABSize, (intx)1024);
1563   }
1564 
1565   // When using compressed oops, we use local overflow stacks,
1566   // rather than using a global overflow list chained through
1567   // the klass word of the object's pre-image.
1568   if (UseCompressedOops && !ParGCUseLocalOverflow) {
1569     if (!FLAG_IS_DEFAULT(ParGCUseLocalOverflow)) {
1570       warning("Forcing +ParGCUseLocalOverflow: needed if using compressed references");
1571     }
1572     FLAG_SET_DEFAULT(ParGCUseLocalOverflow, true);
1573   }
1574   assert(ParGCUseLocalOverflow || !UseCompressedOops, "Error");
1575 }
1576 
1577 // Adjust some sizes to suit CMS and/or ParNew needs; these work well on
1578 // sparc/solaris for certain applications, but would gain from
1579 // further optimization and tuning efforts, and would almost
1580 // certainly gain from analysis of platform and environment.
1581 void Arguments::set_cms_and_parnew_gc_flags() {
1582   assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC, "Error");
1583   assert(UseConcMarkSweepGC, "CMS is expected to be on here");
1584   assert(UseParNewGC, "ParNew should always be used with CMS");
1585 
1586   // Turn off AdaptiveSizePolicy by default for cms until it is complete.
1587   disable_adaptive_size_policy("UseConcMarkSweepGC");
1588 
1589   set_parnew_gc_flags();
1590 
1591   size_t max_heap = align_size_down(MaxHeapSize,
1592                                     CardTableRS::ct_max_alignment_constraint());
1593 
1594   // Now make adjustments for CMS
1595   intx   tenuring_default = (intx)6;
1596   size_t young_gen_per_worker = CMSYoungGenPerWorker;
1597 
1598   // Preferred young gen size for "short" pauses:
1599   // upper bound depends on # of threads and NewRatio.
1600   const size_t preferred_max_new_size_unaligned =
1601     MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * ParallelGCThreads));
1602   size_t preferred_max_new_size =
1603     align_size_up(preferred_max_new_size_unaligned, os::vm_page_size());
1604 
1605   // Unless explicitly requested otherwise, size young gen
1606   // for "short" pauses ~ CMSYoungGenPerWorker*ParallelGCThreads
1607 
1608   // If either MaxNewSize or NewRatio is set on the command line,
1609   // assume the user is trying to set the size of the young gen.
1610   if (FLAG_IS_DEFAULT(MaxNewSize) && FLAG_IS_DEFAULT(NewRatio)) {
1611 
1612     // Set MaxNewSize to our calculated preferred_max_new_size unless
1613     // NewSize was set on the command line and it is larger than
1614     // preferred_max_new_size.
1615     if (!FLAG_IS_DEFAULT(NewSize)) {   // NewSize explicitly set at command-line
1616       FLAG_SET_ERGO(size_t, MaxNewSize, MAX2(NewSize, preferred_max_new_size));
1617     } else {
1618       FLAG_SET_ERGO(size_t, MaxNewSize, preferred_max_new_size);
1619     }
1620     log_trace(gc, heap)("CMS ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
1621 
1622     // Code along this path potentially sets NewSize and OldSize
1623     log_trace(gc, heap)("CMS set min_heap_size: " SIZE_FORMAT " initial_heap_size:  " SIZE_FORMAT " max_heap: " SIZE_FORMAT,
1624                         min_heap_size(), InitialHeapSize, max_heap);
1625     size_t min_new = preferred_max_new_size;
1626     if (FLAG_IS_CMDLINE(NewSize)) {
1627       min_new = NewSize;
1628     }
1629     if (max_heap > min_new && min_heap_size() > min_new) {
1630       // Unless explicitly requested otherwise, make young gen
1631       // at least min_new, and at most preferred_max_new_size.
1632       if (FLAG_IS_DEFAULT(NewSize)) {
1633         FLAG_SET_ERGO(size_t, NewSize, MAX2(NewSize, min_new));
1634         FLAG_SET_ERGO(size_t, NewSize, MIN2(preferred_max_new_size, NewSize));
1635         log_trace(gc, heap)("CMS ergo set NewSize: " SIZE_FORMAT, NewSize);
1636       }
1637       // Unless explicitly requested otherwise, size old gen
1638       // so it's NewRatio x of NewSize.
1639       if (FLAG_IS_DEFAULT(OldSize)) {
1640         if (max_heap > NewSize) {
1641           FLAG_SET_ERGO(size_t, OldSize, MIN2(NewRatio*NewSize, max_heap - NewSize));
1642           log_trace(gc, heap)("CMS ergo set OldSize: " SIZE_FORMAT, OldSize);
1643         }
1644       }
1645     }
1646   }
1647   // Unless explicitly requested otherwise, definitely
1648   // promote all objects surviving "tenuring_default" scavenges.
1649   if (FLAG_IS_DEFAULT(MaxTenuringThreshold) &&
1650       FLAG_IS_DEFAULT(SurvivorRatio)) {
1651     FLAG_SET_ERGO(uintx, MaxTenuringThreshold, tenuring_default);
1652   }
1653   // If we decided above (or user explicitly requested)
1654   // `promote all' (via MaxTenuringThreshold := 0),
1655   // prefer minuscule survivor spaces so as not to waste
1656   // space for (non-existent) survivors
1657   if (FLAG_IS_DEFAULT(SurvivorRatio) && MaxTenuringThreshold == 0) {
1658     FLAG_SET_ERGO(uintx, SurvivorRatio, MAX2((uintx)1024, SurvivorRatio));
1659   }
1660 
1661   // OldPLABSize is interpreted in CMS as not the size of the PLAB in words,
1662   // but rather the number of free blocks of a given size that are used when
1663   // replenishing the local per-worker free list caches.
1664   if (FLAG_IS_DEFAULT(OldPLABSize)) {
1665     if (!FLAG_IS_DEFAULT(ResizeOldPLAB) && !ResizeOldPLAB) {
1666       // OldPLAB sizing manually turned off: Use a larger default setting,
1667       // unless it was manually specified. This is because a too-low value
1668       // will slow down scavenges.
1669       FLAG_SET_ERGO(size_t, OldPLABSize, CompactibleFreeListSpaceLAB::_default_static_old_plab_size); // default value before 6631166
1670     } else {
1671       FLAG_SET_DEFAULT(OldPLABSize, CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size); // old CMSParPromoteBlocksToClaim default
1672     }
1673   }
1674 
1675   // If either of the static initialization defaults have changed, note this
1676   // modification.
1677   if (!FLAG_IS_DEFAULT(OldPLABSize) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
1678     CompactibleFreeListSpaceLAB::modify_initialization(OldPLABSize, OldPLABWeight);
1679   }
1680 
1681   log_trace(gc)("MarkStackSize: %uk  MarkStackSizeMax: %uk", (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
1682 }
1683 #endif // INCLUDE_ALL_GCS
1684 
1685 void set_object_alignment() {
1686   // Object alignment.
1687   assert(is_power_of_2(ObjectAlignmentInBytes), "ObjectAlignmentInBytes must be power of 2");
1688   MinObjAlignmentInBytes     = ObjectAlignmentInBytes;
1689   assert(MinObjAlignmentInBytes >= HeapWordsPerLong * HeapWordSize, "ObjectAlignmentInBytes value is too small");
1690   MinObjAlignment            = MinObjAlignmentInBytes / HeapWordSize;
1691   assert(MinObjAlignmentInBytes == MinObjAlignment * HeapWordSize, "ObjectAlignmentInBytes value is incorrect");
1692   MinObjAlignmentInBytesMask = MinObjAlignmentInBytes - 1;
1693 
1694   LogMinObjAlignmentInBytes  = exact_log2(ObjectAlignmentInBytes);
1695   LogMinObjAlignment         = LogMinObjAlignmentInBytes - LogHeapWordSize;
1696 
1697   // Oop encoding heap max
1698   OopEncodingHeapMax = (uint64_t(max_juint) + 1) << LogMinObjAlignmentInBytes;
1699 
1700   if (SurvivorAlignmentInBytes == 0) {
1701     SurvivorAlignmentInBytes = ObjectAlignmentInBytes;
1702   }
1703 
1704 #if INCLUDE_ALL_GCS
1705   // Set CMS global values
1706   CompactibleFreeListSpace::set_cms_values();
1707 #endif // INCLUDE_ALL_GCS
1708 }
1709 
1710 size_t Arguments::max_heap_for_compressed_oops() {
1711   // Avoid sign flip.
1712   assert(OopEncodingHeapMax > (uint64_t)os::vm_page_size(), "Unusual page size");
1713   // We need to fit both the NULL page and the heap into the memory budget, while
1714   // keeping alignment constraints of the heap. To guarantee the latter, as the
1715   // NULL page is located before the heap, we pad the NULL page to the conservative
1716   // maximum alignment that the GC may ever impose upon the heap.
1717   size_t displacement_due_to_null_page = align_size_up_(os::vm_page_size(),
1718                                                         _conservative_max_heap_alignment);
1719 
1720   LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page);
1721   NOT_LP64(ShouldNotReachHere(); return 0);
1722 }
1723 
1724 bool Arguments::should_auto_select_low_pause_collector() {
1725   if (UseAutoGCSelectPolicy &&
1726       !FLAG_IS_DEFAULT(MaxGCPauseMillis) &&
1727       (MaxGCPauseMillis <= AutoGCSelectPauseMillis)) {


1773     if (FLAG_IS_DEFAULT(UseCompressedClassPointers)) {
1774       FLAG_SET_ERGO(bool, UseCompressedClassPointers, true);
1775     }
1776     // Check the CompressedClassSpaceSize to make sure we use compressed klass ptrs.
1777     if (UseCompressedClassPointers) {
1778       if (CompressedClassSpaceSize > KlassEncodingMetaspaceMax) {
1779         warning("CompressedClassSpaceSize is too large for UseCompressedClassPointers");
1780         FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
1781       }
1782     }
1783   }
1784 #endif // _LP64
1785 #endif // !ZERO
1786 }
1787 
1788 void Arguments::set_conservative_max_heap_alignment() {
1789   // The conservative maximum required alignment for the heap is the maximum of
1790   // the alignments imposed by several sources: any requirements from the heap
1791   // itself, the collector policy and the maximum page size we may run the VM
1792   // with.
1793   size_t heap_alignment = GenCollectedHeap::conservative_max_heap_alignment();
1794 #if INCLUDE_ALL_GCS
1795   if (UseParallelGC) {
1796     heap_alignment = ParallelScavengeHeap::conservative_max_heap_alignment();
1797   } else if (UseG1GC) {
1798     heap_alignment = G1CollectedHeap::conservative_max_heap_alignment();
1799   }
1800 #endif // INCLUDE_ALL_GCS
1801   _conservative_max_heap_alignment = MAX4(heap_alignment,
1802                                           (size_t)os::vm_allocation_granularity(),
1803                                           os::max_page_size(),
1804                                           CollectorPolicy::compute_heap_alignment());
1805 }
1806 
1807 bool Arguments::gc_selected() {
1808 #if INCLUDE_ALL_GCS
1809   return UseSerialGC || UseParallelGC || UseParallelOldGC || UseConcMarkSweepGC || UseG1GC;
1810 #else
1811   return UseSerialGC;
1812 #endif // INCLUDE_ALL_GCS
1813 }
1814 
1815 void Arguments::select_gc_ergonomically() {
1816 #if INCLUDE_ALL_GCS
1817   if (os::is_server_class_machine()) {
1818     if (!UseAutoGCSelectPolicy) {
1819        FLAG_SET_ERGO_IF_DEFAULT(bool, UseG1GC, true);
1820     } else {


1831 #else
1832   UNSUPPORTED_OPTION(UseG1GC);
1833   UNSUPPORTED_OPTION(UseParallelGC);
1834   UNSUPPORTED_OPTION(UseParallelOldGC);
1835   UNSUPPORTED_OPTION(UseConcMarkSweepGC);
1836   UNSUPPORTED_OPTION(UseParNewGC);
1837   FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
1838 #endif // INCLUDE_ALL_GCS
1839 }
1840 
1841 void Arguments::select_gc() {
1842   if (!gc_selected()) {
1843     select_gc_ergonomically();
1844     if (!gc_selected()) {
1845       vm_exit_during_initialization("Garbage collector not selected (default collector explicitly disabled)", NULL);
1846     }
1847   }
1848 }
1849 
1850 void Arguments::set_ergonomics_flags() {
1851   select_gc();
1852 
1853 #if defined(COMPILER2) || INCLUDE_JVMCI
1854   // Shared spaces work fine with other GCs but causes bytecode rewriting
1855   // to be disabled, which hurts interpreter performance and decreases
1856   // server performance.  When -server is specified, keep the default off
1857   // unless it is asked for.  Future work: either add bytecode rewriting
1858   // at link time, or rewrite bytecodes in non-shared methods.
1859   if (!DumpSharedSpaces && !RequireSharedSpaces &&
1860       (FLAG_IS_DEFAULT(UseSharedSpaces) || !UseSharedSpaces)) {
1861     no_shared_spaces("COMPILER2 default: -Xshare:auto | off, have to manually setup to on.");
1862   }
1863 #endif
1864 
1865   set_conservative_max_heap_alignment();
1866 
1867 #ifndef ZERO
1868 #ifdef _LP64
1869   set_use_compressed_oops();
1870 
1871   // set_use_compressed_klass_ptrs() must be called after calling
1872   // set_use_compressed_oops().
1873   set_use_compressed_klass_ptrs();
1874 
1875   // Also checks that certain machines are slower with compressed oops
1876   // in vm_version initialization code.
1877 #endif // _LP64
1878 #endif // !ZERO
1879 
1880   CodeCacheExtensions::set_ergonomics_flags();
1881 }
1882 
1883 void Arguments::set_parallel_gc_flags() {
1884   assert(UseParallelGC || UseParallelOldGC, "Error");
1885   // Enable ParallelOld unless it was explicitly disabled (cmd line or rc file).
1886   if (FLAG_IS_DEFAULT(UseParallelOldGC)) {
1887     FLAG_SET_DEFAULT(UseParallelOldGC, true);
1888   }
1889   FLAG_SET_DEFAULT(UseParallelGC, true);
1890 
1891   // If no heap maximum was requested explicitly, use some reasonable fraction
1892   // of the physical memory, up to a maximum of 1GB.
1893   FLAG_SET_DEFAULT(ParallelGCThreads,
1894                    Abstract_VM_Version::parallel_worker_threads());
1895   if (ParallelGCThreads == 0) {
1896     jio_fprintf(defaultStream::error_stream(),
1897         "The Parallel GC can not be combined with -XX:ParallelGCThreads=0\n");
1898     vm_exit(1);
1899   }
1900 
1901   if (UseAdaptiveSizePolicy) {
1902     // We don't want to limit adaptive heap sizing's freedom to adjust the heap
1903     // unless the user actually sets these flags.
1904     if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
1905       FLAG_SET_DEFAULT(MinHeapFreeRatio, 0);
1906     }
1907     if (FLAG_IS_DEFAULT(MaxHeapFreeRatio)) {
1908       FLAG_SET_DEFAULT(MaxHeapFreeRatio, 100);
1909     }
1910   }
1911 
1912   // If InitialSurvivorRatio or MinSurvivorRatio were not specified, but the
1913   // SurvivorRatio has been set, reset their default values to SurvivorRatio +
1914   // 2.  By doing this we make SurvivorRatio also work for Parallel Scavenger.
1915   // See CR 6362902 for details.
1916   if (!FLAG_IS_DEFAULT(SurvivorRatio)) {
1917     if (FLAG_IS_DEFAULT(InitialSurvivorRatio)) {
1918        FLAG_SET_DEFAULT(InitialSurvivorRatio, SurvivorRatio + 2);
1919     }
1920     if (FLAG_IS_DEFAULT(MinSurvivorRatio)) {
1921       FLAG_SET_DEFAULT(MinSurvivorRatio, SurvivorRatio + 2);
1922     }
1923   }
1924 
1925   if (UseParallelOldGC) {
1926     // Par compact uses lower default values since they are treated as
1927     // minimums.  These are different defaults because of the different
1928     // interpretation and are not ergonomically set.
1929     if (FLAG_IS_DEFAULT(MarkSweepDeadRatio)) {
1930       FLAG_SET_DEFAULT(MarkSweepDeadRatio, 1);
1931     }
1932   }
1933 }
1934 
1935 void Arguments::set_g1_gc_flags() {
1936   assert(UseG1GC, "Error");
1937 #if defined(COMPILER1) || INCLUDE_JVMCI
1938   FastTLABRefill = false;
1939 #endif
1940   FLAG_SET_DEFAULT(ParallelGCThreads, Abstract_VM_Version::parallel_worker_threads());
1941   if (ParallelGCThreads == 0) {
1942     assert(!FLAG_IS_DEFAULT(ParallelGCThreads), "The default value for ParallelGCThreads should not be 0.");
1943     vm_exit_during_initialization("The flag -XX:+UseG1GC can not be combined with -XX:ParallelGCThreads=0", NULL);
1944   }
1945 
1946 #if INCLUDE_ALL_GCS
1947   if (FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
1948     FLAG_SET_ERGO(uint, G1ConcRefinementThreads, ParallelGCThreads);
1949   }
1950 #endif
1951 
1952   // MarkStackSize will be set (if it hasn't been set by the user)
1953   // when concurrent marking is initialized.
1954   // Its value will be based upon the number of parallel marking threads.
1955   // But we do set the maximum mark stack size here.
1956   if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
1957     FLAG_SET_DEFAULT(MarkStackSizeMax, 128 * TASKQUEUE_SIZE);
1958   }
1959 
1960   if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) {
1961     // In G1, we want the default GC overhead goal to be higher than
1962     // it is for PS, or the heap might be expanded too aggressively.
1963     // We set it here to ~8%.
1964     FLAG_SET_DEFAULT(GCTimeRatio, 12);
1965   }
1966 
1967   // Below, we might need to calculate the pause time interval based on
1968   // the pause target. When we do so we are going to give G1 maximum
1969   // flexibility and allow it to do pauses when it needs to. So, we'll
1970   // arrange that the pause interval to be pause time target + 1 to
1971   // ensure that a) the pause time target is maximized with respect to
1972   // the pause interval and b) we maintain the invariant that pause
1973   // time target < pause interval. If the user does not want this
1974   // maximum flexibility, they will have to set the pause interval
1975   // explicitly.
1976 
1977   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
1978     // The default pause time target in G1 is 200ms
1979     FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
1980   }
1981 
1982   // Then, if the interval parameter was not set, set it according to
1983   // the pause time target (this will also deal with the case when the
1984   // pause time target is the default value).
1985   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
1986     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
1987   }
1988 
1989   log_trace(gc)("MarkStackSize: %uk  MarkStackSizeMax: %uk", (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
1990 }
1991 
1992 void Arguments::set_gc_specific_flags() {
1993 #if INCLUDE_ALL_GCS
1994   // Set per-collector flags
1995   if (UseParallelGC || UseParallelOldGC) {
1996     set_parallel_gc_flags();
1997   } else if (UseConcMarkSweepGC) {
1998     set_cms_and_parnew_gc_flags();
1999   } else if (UseG1GC) {
2000     set_g1_gc_flags();
2001   }
2002   if (AssumeMP && !UseSerialGC) {
2003     if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
2004       warning("If the number of processors is expected to increase from one, then"
2005               " you should configure the number of parallel GC threads appropriately"
2006               " using -XX:ParallelGCThreads=N");
2007     }
2008   }
2009   if (MinHeapFreeRatio == 100) {
2010     // Keeping the heap 100% free is hard ;-) so limit it to 99%.
2011     FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99);
2012   }
2013 
2014   // If class unloading is disabled, also disable concurrent class unloading.
2015   if (!ClassUnloading) {
2016     FLAG_SET_CMDLINE(bool, CMSClassUnloadingEnabled, false);
2017     FLAG_SET_CMDLINE(bool, ClassUnloadingWithConcurrentMark, false);
2018     FLAG_SET_CMDLINE(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false);
2019   }
2020 #endif // INCLUDE_ALL_GCS
2021 }




  41 #include "oops/oop.inline.hpp"
  42 #include "prims/jvmtiExport.hpp"
  43 #include "runtime/arguments.hpp"
  44 #include "runtime/arguments_ext.hpp"
  45 #include "runtime/commandLineFlagConstraintList.hpp"
  46 #include "runtime/commandLineFlagWriteableList.hpp"
  47 #include "runtime/commandLineFlagRangeList.hpp"
  48 #include "runtime/globals.hpp"
  49 #include "runtime/globals_extension.hpp"
  50 #include "runtime/java.hpp"
  51 #include "runtime/os.hpp"
  52 #include "runtime/vm_version.hpp"
  53 #include "services/management.hpp"
  54 #include "services/memTracker.hpp"
  55 #include "utilities/defaultStream.hpp"
  56 #include "utilities/macros.hpp"
  57 #include "utilities/stringUtils.hpp"
  58 #if INCLUDE_JVMCI
  59 #include "jvmci/jvmciRuntime.hpp"
  60 #endif





  61 
  62 // Note: This is a special bug reporting site for the JVM
  63 #define DEFAULT_VENDOR_URL_BUG "http://bugreport.java.com/bugreport/crash.jsp"
  64 #define DEFAULT_JAVA_LAUNCHER  "generic"
  65 
  66 char*  Arguments::_jvm_flags_file               = NULL;
  67 char** Arguments::_jvm_flags_array              = NULL;
  68 int    Arguments::_num_jvm_flags                = 0;
  69 char** Arguments::_jvm_args_array               = NULL;
  70 int    Arguments::_num_jvm_args                 = 0;
  71 char*  Arguments::_java_command                 = NULL;
  72 SystemProperty* Arguments::_system_properties   = NULL;
  73 const char*  Arguments::_gc_log_filename        = NULL;
  74 bool   Arguments::_has_profile                  = false;
  75 size_t Arguments::_conservative_max_heap_alignment = 0;
  76 size_t Arguments::_min_heap_size                = 0;
  77 Arguments::Mode Arguments::_mode                = _mixed;
  78 bool   Arguments::_java_compiler                = false;
  79 bool   Arguments::_xdebug_mode                  = false;
  80 const char*  Arguments::_java_vendor_url_bug    = DEFAULT_VENDOR_URL_BUG;


1503     FLAG_SET_ERGO(intx, Tier3BackEdgeThreshold, scaled_compile_threshold(Tier3BackEdgeThreshold));
1504 
1505     // Tier2{Invocation,MinInvocation,Compile,Backedge}Threshold should be scaled here
1506     // once these thresholds become supported.
1507 
1508     FLAG_SET_ERGO(intx, Tier2InvokeNotifyFreqLog, scaled_freq_log(Tier2InvokeNotifyFreqLog));
1509     FLAG_SET_ERGO(intx, Tier2BackedgeNotifyFreqLog, scaled_freq_log(Tier2BackedgeNotifyFreqLog));
1510 
1511     FLAG_SET_ERGO(intx, Tier3InvokeNotifyFreqLog, scaled_freq_log(Tier3InvokeNotifyFreqLog));
1512     FLAG_SET_ERGO(intx, Tier3BackedgeNotifyFreqLog, scaled_freq_log(Tier3BackedgeNotifyFreqLog));
1513 
1514     FLAG_SET_ERGO(intx, Tier23InlineeNotifyFreqLog, scaled_freq_log(Tier23InlineeNotifyFreqLog));
1515 
1516     FLAG_SET_ERGO(intx, Tier4InvocationThreshold, scaled_compile_threshold(Tier4InvocationThreshold));
1517     FLAG_SET_ERGO(intx, Tier4MinInvocationThreshold, scaled_compile_threshold(Tier4MinInvocationThreshold));
1518     FLAG_SET_ERGO(intx, Tier4CompileThreshold, scaled_compile_threshold(Tier4CompileThreshold));
1519     FLAG_SET_ERGO(intx, Tier4BackEdgeThreshold, scaled_compile_threshold(Tier4BackEdgeThreshold));
1520   }
1521 }
1522 





























































































































































1523 void set_object_alignment() {
1524   // Object alignment.
1525   assert(is_power_of_2(ObjectAlignmentInBytes), "ObjectAlignmentInBytes must be power of 2");
1526   MinObjAlignmentInBytes     = ObjectAlignmentInBytes;
1527   assert(MinObjAlignmentInBytes >= HeapWordsPerLong * HeapWordSize, "ObjectAlignmentInBytes value is too small");
1528   MinObjAlignment            = MinObjAlignmentInBytes / HeapWordSize;
1529   assert(MinObjAlignmentInBytes == MinObjAlignment * HeapWordSize, "ObjectAlignmentInBytes value is incorrect");
1530   MinObjAlignmentInBytesMask = MinObjAlignmentInBytes - 1;
1531 
1532   LogMinObjAlignmentInBytes  = exact_log2(ObjectAlignmentInBytes);
1533   LogMinObjAlignment         = LogMinObjAlignmentInBytes - LogHeapWordSize;
1534 
1535   // Oop encoding heap max
1536   OopEncodingHeapMax = (uint64_t(max_juint) + 1) << LogMinObjAlignmentInBytes;
1537 
1538   if (SurvivorAlignmentInBytes == 0) {
1539     SurvivorAlignmentInBytes = ObjectAlignmentInBytes;
1540   }
1541 




1542 }
1543 
1544 size_t Arguments::max_heap_for_compressed_oops() {
1545   // Avoid sign flip.
1546   assert(OopEncodingHeapMax > (uint64_t)os::vm_page_size(), "Unusual page size");
1547   // We need to fit both the NULL page and the heap into the memory budget, while
1548   // keeping alignment constraints of the heap. To guarantee the latter, as the
1549   // NULL page is located before the heap, we pad the NULL page to the conservative
1550   // maximum alignment that the GC may ever impose upon the heap.
1551   size_t displacement_due_to_null_page = align_size_up_(os::vm_page_size(),
1552                                                         _conservative_max_heap_alignment);
1553 
1554   LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page);
1555   NOT_LP64(ShouldNotReachHere(); return 0);
1556 }
1557 
1558 bool Arguments::should_auto_select_low_pause_collector() {
1559   if (UseAutoGCSelectPolicy &&
1560       !FLAG_IS_DEFAULT(MaxGCPauseMillis) &&
1561       (MaxGCPauseMillis <= AutoGCSelectPauseMillis)) {


1607     if (FLAG_IS_DEFAULT(UseCompressedClassPointers)) {
1608       FLAG_SET_ERGO(bool, UseCompressedClassPointers, true);
1609     }
1610     // Check the CompressedClassSpaceSize to make sure we use compressed klass ptrs.
1611     if (UseCompressedClassPointers) {
1612       if (CompressedClassSpaceSize > KlassEncodingMetaspaceMax) {
1613         warning("CompressedClassSpaceSize is too large for UseCompressedClassPointers");
1614         FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
1615       }
1616     }
1617   }
1618 #endif // _LP64
1619 #endif // !ZERO
1620 }
1621 
1622 void Arguments::set_conservative_max_heap_alignment() {
1623   // The conservative maximum required alignment for the heap is the maximum of
1624   // the alignments imposed by several sources: any requirements from the heap
1625   // itself, the collector policy and the maximum page size we may run the VM
1626   // with.
1627   size_t heap_alignment = GC::gc()->conservative_max_heap_alignment();







1628   _conservative_max_heap_alignment = MAX4(heap_alignment,
1629                                           (size_t)os::vm_allocation_granularity(),
1630                                           os::max_page_size(),
1631                                           CollectorPolicy::compute_heap_alignment());
1632 }
1633 
1634 bool Arguments::gc_selected() {
1635 #if INCLUDE_ALL_GCS
1636   return UseSerialGC || UseParallelGC || UseParallelOldGC || UseConcMarkSweepGC || UseG1GC;
1637 #else
1638   return UseSerialGC;
1639 #endif // INCLUDE_ALL_GCS
1640 }
1641 
1642 void Arguments::select_gc_ergonomically() {
1643 #if INCLUDE_ALL_GCS
1644   if (os::is_server_class_machine()) {
1645     if (!UseAutoGCSelectPolicy) {
1646        FLAG_SET_ERGO_IF_DEFAULT(bool, UseG1GC, true);
1647     } else {


1658 #else
1659   UNSUPPORTED_OPTION(UseG1GC);
1660   UNSUPPORTED_OPTION(UseParallelGC);
1661   UNSUPPORTED_OPTION(UseParallelOldGC);
1662   UNSUPPORTED_OPTION(UseConcMarkSweepGC);
1663   UNSUPPORTED_OPTION(UseParNewGC);
1664   FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
1665 #endif // INCLUDE_ALL_GCS
1666 }
1667 
1668 void Arguments::select_gc() {
1669   if (!gc_selected()) {
1670     select_gc_ergonomically();
1671     if (!gc_selected()) {
1672       vm_exit_during_initialization("Garbage collector not selected (default collector explicitly disabled)", NULL);
1673     }
1674   }
1675 }
1676 
1677 void Arguments::set_ergonomics_flags() {

1678 
1679 #if defined(COMPILER2) || INCLUDE_JVMCI
1680   // Shared spaces work fine with other GCs but causes bytecode rewriting
1681   // to be disabled, which hurts interpreter performance and decreases
1682   // server performance.  When -server is specified, keep the default off
1683   // unless it is asked for.  Future work: either add bytecode rewriting
1684   // at link time, or rewrite bytecodes in non-shared methods.
1685   if (!DumpSharedSpaces && !RequireSharedSpaces &&
1686       (FLAG_IS_DEFAULT(UseSharedSpaces) || !UseSharedSpaces)) {
1687     no_shared_spaces("COMPILER2 default: -Xshare:auto | off, have to manually setup to on.");
1688   }
1689 #endif
1690 
1691   set_conservative_max_heap_alignment();
1692 
1693 #ifndef ZERO
1694 #ifdef _LP64
1695   set_use_compressed_oops();
1696 
1697   // set_use_compressed_klass_ptrs() must be called after calling
1698   // set_use_compressed_oops().
1699   set_use_compressed_klass_ptrs();
1700 
1701   // Also checks that certain machines are slower with compressed oops
1702   // in vm_version initialization code.
1703 #endif // _LP64
1704 #endif // !ZERO
1705 
1706   CodeCacheExtensions::set_ergonomics_flags();
1707 }
1708 













































































































1709 void Arguments::set_gc_specific_flags() {
1710   GC::gc()->initialize_flags();
1711 #ifdef INCLUDE_ALL_GCS







1712   if (AssumeMP && !UseSerialGC) {
1713     if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
1714       warning("If the number of processors is expected to increase from one, then"
1715               " you should configure the number of parallel GC threads appropriately"
1716               " using -XX:ParallelGCThreads=N");
1717     }
1718   }
1719   if (MinHeapFreeRatio == 100) {
1720     // Keeping the heap 100% free is hard ;-) so limit it to 99%.
1721     FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99);
1722   }
1723 
1724   // If class unloading is disabled, also disable concurrent class unloading.
1725   if (!ClassUnloading) {
1726     FLAG_SET_CMDLINE(bool, CMSClassUnloadingEnabled, false);
1727     FLAG_SET_CMDLINE(bool, ClassUnloadingWithConcurrentMark, false);
1728     FLAG_SET_CMDLINE(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false);
1729   }
1730 #endif // INCLUDE_ALL_GCS
1731 }


< prev index next >