1 /*
   2  * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_GLOBALS_X86_HPP
  26 #define CPU_X86_GLOBALS_X86_HPP
  27 
  28 #include "utilities/globalDefinitions.hpp"
  29 #include "utilities/macros.hpp"
  30 
  31 // Sets the default values for platform dependent flags used by the runtime system.
  32 // (see globals.hpp)
  33 
  34 define_pd_global(bool, ImplicitNullChecks,       true);  // Generate code for implicit null checks
  35 define_pd_global(bool, TrapBasedNullChecks,      false); // Not needed on x86.
  36 define_pd_global(bool, UncommonNullCast,         true);  // Uncommon-trap NULLs passed to check cast
  37 
  38 define_pd_global(uintx, CodeCacheSegmentSize,    64 TIERED_ONLY(+64)); // Tiered compilation has large code-entry alignment.
  39 // See 4827828 for this change. There is no globals_core_i486.hpp. I can't
  40 // assign a different value for C2 without touching a number of files. Use
  41 // #ifdef to minimize the change as it's late in Mantis. -- FIXME.
  42 // c1 doesn't have this problem because the fix to 4858033 assures us
  43 // the the vep is aligned at CodeEntryAlignment whereas c2 only aligns
  44 // the uep and the vep doesn't get real alignment but just slops on by
  45 // only assured that the entry instruction meets the 5 byte size requirement.
  46 #if COMPILER2_OR_JVMCI
  47 define_pd_global(intx, CodeEntryAlignment,       32);
  48 #else
  49 define_pd_global(intx, CodeEntryAlignment,       16);
  50 #endif // COMPILER2_OR_JVMCI
  51 define_pd_global(intx, OptoLoopAlignment,        16);
  52 define_pd_global(intx, InlineFrequencyCount,     100);
  53 define_pd_global(intx, InlineSmallCode,          1000);
  54 
  55 #define DEFAULT_STACK_YELLOW_PAGES (NOT_WINDOWS(2) WINDOWS_ONLY(3))
  56 #define DEFAULT_STACK_RED_PAGES (1)
  57 #define DEFAULT_STACK_RESERVED_PAGES (NOT_WINDOWS(1) WINDOWS_ONLY(0))
  58 
  59 #define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
  60 #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
  61 #define MIN_STACK_RESERVED_PAGES (0)
  62 
  63 #ifdef _LP64
  64 // Java_java_net_SocketOutputStream_socketWrite0() uses a 64k buffer on the
  65 // stack if compiled for unix and LP64. To pass stack overflow tests we need
  66 // 20 shadow pages.
  67 #define DEFAULT_STACK_SHADOW_PAGES (NOT_WIN64(20) WIN64_ONLY(7) DEBUG_ONLY(+2))
  68 // For those clients that do not use write socket, we allow
  69 // the min range value to be below that of the default
  70 #define MIN_STACK_SHADOW_PAGES (NOT_WIN64(10) WIN64_ONLY(7) DEBUG_ONLY(+2))
  71 #else
  72 #define DEFAULT_STACK_SHADOW_PAGES (4 DEBUG_ONLY(+5))
  73 #define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
  74 #endif // _LP64
  75 
  76 define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
  77 define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
  78 define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
  79 define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES);
  80 
  81 define_pd_global(bool, RewriteBytecodes,     true);
  82 define_pd_global(bool, RewriteFrequentPairs, true);
  83 
  84 // GC Ergo Flags
  85 define_pd_global(size_t, CMSYoungGenPerWorker, 64*M);  // default max size of CMS young gen, per GC worker thread
  86 
  87 define_pd_global(uintx, TypeProfileLevel, 111);
  88 
  89 define_pd_global(bool, CompactStrings, true);
  90 
  91 define_pd_global(bool, PreserveFramePointer, false);
  92 
  93 define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
  94 
  95 #if defined(_LP64) || defined(_WINDOWS)
  96 define_pd_global(bool, ThreadLocalHandshakes, true);
  97 // ObjectMonitor ref_count is implemented in LP64 C2 fast_lock()
  98 // and fast_unlock() so we don't need the handshake by default.
  99 #ifdef _LP64
 100 define_pd_global(bool, HandshakeAfterDeflateIdleMonitors, false);
 101 #else
 102 define_pd_global(bool, HandshakeAfterDeflateIdleMonitors, true);
 103 #endif
 104 #else
 105 // get_thread() is slow on linux 32 bit, therefore off by default
 106 define_pd_global(bool, ThreadLocalHandshakes, false);
 107 // ObjectMonitor ref_count not implemented in C2 fast_lock() or
 108 // fast_unlock() so use a handshake for safety.
 109 // Will use a safepoint instead of a handshake on this platform.
 110 define_pd_global(bool, HandshakeAfterDeflateIdleMonitors, true);
 111 #endif
 112 
 113 #define ARCH_FLAGS(develop, \
 114                    product, \
 115                    diagnostic, \
 116                    experimental, \
 117                    notproduct, \
 118                    range, \
 119                    constraint, \
 120                    writeable) \
 121                                                                             \
 122   develop(bool, IEEEPrecision, true,                                        \
 123           "Enables IEEE precision (for INTEL only)")                        \
 124                                                                             \
 125   product(bool, UseStoreImmI16, true,                                       \
 126           "Use store immediate 16-bits value instruction on x86")           \
 127                                                                             \
 128   product(intx, UseAVX, 3,                                                  \
 129           "Highest supported AVX instructions set on x86/x64")              \
 130           range(0, 99)                                                      \
 131                                                                             \
 132   product(bool, UseCLMUL, false,                                            \
 133           "Control whether CLMUL instructions can be used on x86/x64")      \
 134                                                                             \
 135   diagnostic(bool, UseIncDec, true,                                         \
 136           "Use INC, DEC instructions on x86")                               \
 137                                                                             \
 138   product(bool, UseNewLongLShift, false,                                    \
 139           "Use optimized bitwise shift left")                               \
 140                                                                             \
 141   product(bool, UseAddressNop, false,                                       \
 142           "Use '0F 1F [addr]' NOP instructions on x86 cpus")                \
 143                                                                             \
 144   product(bool, UseXmmLoadAndClearUpper, true,                              \
 145           "Load low part of XMM register and clear upper part")             \
 146                                                                             \
 147   product(bool, UseXmmRegToRegMoveAll, false,                               \
 148           "Copy all XMM register bits when moving value between registers") \
 149                                                                             \
 150   product(bool, UseXmmI2D, false,                                           \
 151           "Use SSE2 CVTDQ2PD instruction to convert Integer to Double")     \
 152                                                                             \
 153   product(bool, UseXmmI2F, false,                                           \
 154           "Use SSE2 CVTDQ2PS instruction to convert Integer to Float")      \
 155                                                                             \
 156   product(bool, UseUnalignedLoadStores, false,                              \
 157           "Use SSE2 MOVDQU instruction for Arraycopy")                      \
 158                                                                             \
 159   product(bool, UseXMMForObjInit, false,                                    \
 160           "Use XMM/YMM MOVDQU instruction for Object Initialization")       \
 161                                                                             \
 162   product(bool, UseFastStosb, false,                                        \
 163           "Use fast-string operation for zeroing: rep stosb")               \
 164                                                                             \
 165   /* Use Restricted Transactional Memory for lock eliding */                \
 166   product(bool, UseRTMLocking, false,                                       \
 167           "Enable RTM lock eliding for inflated locks in compiled code")    \
 168                                                                             \
 169   experimental(bool, UseRTMForStackLocks, false,                            \
 170           "Enable RTM lock eliding for stack locks in compiled code")       \
 171                                                                             \
 172   product(bool, UseRTMDeopt, false,                                         \
 173           "Perform deopt and recompilation based on RTM abort ratio")       \
 174                                                                             \
 175   product(int, RTMRetryCount, 5,                                            \
 176           "Number of RTM retries on lock abort or busy")                    \
 177           range(0, max_jint)                                                \
 178                                                                             \
 179   experimental(int, RTMSpinLoopCount, 100,                                  \
 180           "Spin count for lock to become free before RTM retry")            \
 181           range(0, max_jint)                                                \
 182                                                                             \
 183   experimental(int, RTMAbortThreshold, 1000,                                \
 184           "Calculate abort ratio after this number of aborts")              \
 185           range(0, max_jint)                                                \
 186                                                                             \
 187   experimental(int, RTMLockingThreshold, 10000,                             \
 188           "Lock count at which to do RTM lock eliding without "             \
 189           "abort ratio calculation")                                        \
 190           range(0, max_jint)                                                \
 191                                                                             \
 192   experimental(int, RTMAbortRatio, 50,                                      \
 193           "Lock abort ratio at which to stop use RTM lock eliding")         \
 194           range(0, 100) /* natural range */                                 \
 195                                                                             \
 196   experimental(int, RTMTotalCountIncrRate, 64,                              \
 197           "Increment total RTM attempted lock count once every n times")    \
 198           range(1, max_jint)                                                \
 199           constraint(RTMTotalCountIncrRateConstraintFunc,AfterErgo)         \
 200                                                                             \
 201   experimental(intx, RTMLockingCalculationDelay, 0,                         \
 202           "Number of milliseconds to wait before start calculating aborts " \
 203           "for RTM locking")                                                \
 204                                                                             \
 205   experimental(bool, UseRTMXendForLockBusy, true,                           \
 206           "Use RTM Xend instead of Xabort when lock busy")                  \
 207                                                                             \
 208   /* assembler */                                                           \
 209   product(bool, UseCountLeadingZerosInstruction, false,                     \
 210           "Use count leading zeros instruction")                            \
 211                                                                             \
 212   product(bool, UseCountTrailingZerosInstruction, false,                    \
 213           "Use count trailing zeros instruction")                           \
 214                                                                             \
 215   product(bool, UseSSE42Intrinsics, false,                                  \
 216           "SSE4.2 versions of intrinsics")                                  \
 217                                                                             \
 218   product(bool, UseBMI1Instructions, false,                                 \
 219           "Use BMI1 instructions")                                          \
 220                                                                             \
 221   product(bool, UseBMI2Instructions, false,                                 \
 222           "Use BMI2 instructions")                                          \
 223                                                                             \
 224   diagnostic(bool, UseLibmIntrinsic, true,                                  \
 225           "Use Libm Intrinsics")                                            \
 226                                                                             \
 227   /* Minimum array size in bytes to use AVX512 intrinsics */                \
 228   /* for copy, inflate and fill which don't bail out early based on any */  \
 229   /* condition. When this value is set to zero compare operations like */   \
 230   /* compare, vectorizedMismatch, compress can also use AVX512 intrinsics.*/\
 231   diagnostic(int, AVX3Threshold, 4096,                                      \
 232              "Minimum array size in bytes to use AVX512 intrinsics"         \
 233              "for copy, inflate and fill. When this value is set as zero"   \
 234              "compare operations can also use AVX512 intrinsics.")          \
 235           range(0, max_jint)
 236 #endif // CPU_X86_GLOBALS_X86_HPP