< prev index next >

src/hotspot/cpu/aarch64/vm_version_aarch64.cpp

Print this page
rev 60630 : 8248659: AArch64: Extend CPU Feature detection
Reviewed-by:
Contributed-by: mbeckwit, luhenry, burban


   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"

  29 #include "memory/resourceArea.hpp"
  30 #include "runtime/java.hpp"
  31 #include "runtime/os.hpp"
  32 #include "runtime/stubCodeGenerator.hpp"
  33 #include "runtime/vm_version.hpp"
  34 #include "utilities/macros.hpp"

  35 
  36 #include OS_HEADER_INLINE(os)
  37 

  38 #include <sys/auxv.h>
  39 #include <asm/hwcap.h>
  40 
  41 #ifndef HWCAP_AES
  42 #define HWCAP_AES   (1<<3)
  43 #endif
  44 
  45 #ifndef HWCAP_PMULL
  46 #define HWCAP_PMULL (1<<4)
  47 #endif
  48 
  49 #ifndef HWCAP_SHA1
  50 #define HWCAP_SHA1  (1<<5)
  51 #endif
  52 
  53 #ifndef HWCAP_SHA2
  54 #define HWCAP_SHA2  (1<<6)
  55 #endif
  56 
  57 #ifndef HWCAP_CRC32
  58 #define HWCAP_CRC32 (1<<7)
  59 #endif
  60 
  61 #ifndef HWCAP_ATOMICS
  62 #define HWCAP_ATOMICS (1<<8)
  63 #endif
  64 
  65 #ifndef HWCAP_SHA512
  66 #define HWCAP_SHA512 (1 << 21)
  67 #endif
  68 
  69 int VM_Version::_cpu;
  70 int VM_Version::_model;
  71 int VM_Version::_model2;
  72 int VM_Version::_variant;
  73 int VM_Version::_revision;
  74 int VM_Version::_stepping;
  75 bool VM_Version::_dcpop;
  76 VM_Version::PsrInfo VM_Version::_psr_info   = { 0, };
  77 
  78 static BufferBlob* stub_blob;
  79 static const int stub_size = 550;
  80 
  81 extern "C" {
  82   typedef void (*getPsrInfo_stub_t)(void*);
  83 }
  84 static getPsrInfo_stub_t getPsrInfo_stub = NULL;
  85 
  86 
  87 class VM_Version_StubGenerator: public StubCodeGenerator {
  88  public:
  89 
  90   VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
  91 
  92   address generate_getPsrInfo() {
  93     StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
  94 #   define __ _masm->
  95     address start = __ pc();
  96 
  97     // void getPsrInfo(VM_Version::PsrInfo* psr_info);
  98 
  99     address entry = __ pc();
 100 
 101     __ enter();
 102 
 103     __ get_dczid_el0(rscratch1);
 104     __ strw(rscratch1, Address(c_rarg0, in_bytes(VM_Version::dczid_el0_offset())));
 105 

 106     __ get_ctr_el0(rscratch1);
 107     __ strw(rscratch1, Address(c_rarg0, in_bytes(VM_Version::ctr_el0_offset())));

 108 
 109     __ leave();
 110     __ ret(lr);
 111 
 112 #   undef __
 113 
 114     return start;
 115   }
 116 };
 117 
 118 
 119 void VM_Version::get_processor_features() {
 120   _supports_cx8 = true;
 121   _supports_atomic_getset4 = true;
 122   _supports_atomic_getadd4 = true;
 123   _supports_atomic_getset8 = true;
 124   _supports_atomic_getadd8 = true;
 125 
 126   getPsrInfo_stub(&_psr_info);
 127 


 148     if (PrefetchCopyIntervalInBytes >= 32768)
 149       PrefetchCopyIntervalInBytes = 32760;
 150   }
 151 
 152   if (AllocatePrefetchDistance !=-1 && (AllocatePrefetchDistance & 7)) {
 153     warning("AllocatePrefetchDistance must be multiple of 8");
 154     AllocatePrefetchDistance &= ~7;
 155   }
 156 
 157   if (AllocatePrefetchStepSize & 7) {
 158     warning("AllocatePrefetchStepSize must be multiple of 8");
 159     AllocatePrefetchStepSize &= ~7;
 160   }
 161 
 162   if (SoftwarePrefetchHintDistance != -1 &&
 163        (SoftwarePrefetchHintDistance & 7)) {
 164     warning("SoftwarePrefetchHintDistance must be -1, or a multiple of 8");
 165     SoftwarePrefetchHintDistance &= ~7;
 166   }
 167 
 168   uint64_t auxv = getauxval(AT_HWCAP);







 169 
 170   char buf[512];
 171 
 172   _features = auxv;
 173 
 174   int cpu_lines = 0;

 175   if (FILE *f = fopen("/proc/cpuinfo", "r")) {
 176     // need a large buffer as the flags line may include lots of text
 177     char buf[1024], *p;
 178     while (fgets(buf, sizeof (buf), f) != NULL) {
 179       if ((p = strchr(buf, ':')) != NULL) {
 180         long v = strtol(p+1, NULL, 0);
 181         if (strncmp(buf, "CPU implementer", sizeof "CPU implementer" - 1) == 0) {
 182           _cpu = v;
 183           cpu_lines++;
 184         } else if (strncmp(buf, "CPU variant", sizeof "CPU variant" - 1) == 0) {
 185           _variant = v;
 186         } else if (strncmp(buf, "CPU part", sizeof "CPU part" - 1) == 0) {
 187           if (_model != v)  _model2 = _model;
 188           _model = v;
 189         } else if (strncmp(buf, "CPU revision", sizeof "CPU revision" - 1) == 0) {
 190           _revision = v;
 191         } else if (strncmp(buf, "flags", sizeof("flags") - 1) == 0) {
 192           if (strstr(p+1, "dcpop")) {
 193             _dcpop = true;
 194           }
 195         }
 196       }
 197     }
 198     fclose(f);
 199   }






















 200 
 201   if (os::supports_map_sync()) {
 202     // if dcpop is available publish data cache line flush size via
 203     // generic field, otherwise let if default to zero thereby
 204     // disabling writeback
 205     if (_dcpop) {
 206       _data_cache_line_flush_size = dcache_line;
 207     }
 208   }
 209 
 210   // Enable vendor specific features
 211 
 212   // Ampere eMAG
 213   if (_cpu == CPU_AMCC && (_model == 0) && (_variant == 0x3)) {
 214     if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) {
 215       FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true);
 216     }
 217     if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
 218       FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
 219     }


 267 
 268   // Cortex A73
 269   if (_cpu == CPU_ARM && (_model == 0xd09 || _model2 == 0xd09)) {
 270     if (FLAG_IS_DEFAULT(SoftwarePrefetchHintDistance)) {
 271       FLAG_SET_DEFAULT(SoftwarePrefetchHintDistance, -1);
 272     }
 273     // A73 is faster with short-and-easy-for-speculative-execution-loop
 274     if (FLAG_IS_DEFAULT(UseSimpleArrayEquals)) {
 275       FLAG_SET_DEFAULT(UseSimpleArrayEquals, true);
 276     }
 277   }
 278 
 279   if (_cpu == CPU_ARM && (_model == 0xd07 || _model2 == 0xd07)) _features |= CPU_STXR_PREFETCH;
 280   // If an olde style /proc/cpuinfo (cpu_lines == 1) then if _model is an A57 (0xd07)
 281   // we assume the worst and assume we could be on a big little system and have
 282   // undisclosed A53 cores which we could be swapped to at any stage
 283   if (_cpu == CPU_ARM && cpu_lines == 1 && _model == 0xd07) _features |= CPU_A53MAC;
 284 
 285   sprintf(buf, "0x%02x:0x%x:0x%03x:%d", _cpu, _variant, _model, _revision);
 286   if (_model2) sprintf(buf+strlen(buf), "(0x%03x)", _model2);
 287   if (auxv & HWCAP_ASIMD) strcat(buf, ", simd");
 288   if (auxv & HWCAP_CRC32) strcat(buf, ", crc");
 289   if (auxv & HWCAP_AES)   strcat(buf, ", aes");
 290   if (auxv & HWCAP_SHA1)  strcat(buf, ", sha1");
 291   if (auxv & HWCAP_SHA2)  strcat(buf, ", sha256");
 292   if (auxv & HWCAP_SHA512) strcat(buf, ", sha512");
 293   if (auxv & HWCAP_ATOMICS) strcat(buf, ", lse");
 294 
 295   _features_string = os::strdup(buf);
 296 
 297   if (FLAG_IS_DEFAULT(UseCRC32)) {
 298     UseCRC32 = (auxv & HWCAP_CRC32) != 0;
 299   }
 300 
 301   if (UseCRC32 && (auxv & HWCAP_CRC32) == 0) {
 302     warning("UseCRC32 specified, but not supported on this CPU");
 303     FLAG_SET_DEFAULT(UseCRC32, false);
 304   }
 305 
 306   if (FLAG_IS_DEFAULT(UseAdler32Intrinsics)) {
 307     FLAG_SET_DEFAULT(UseAdler32Intrinsics, true);
 308   }
 309 
 310   if (UseVectorizedMismatchIntrinsic) {
 311     warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU.");
 312     FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
 313   }
 314 
 315   if (auxv & HWCAP_ATOMICS) {
 316     if (FLAG_IS_DEFAULT(UseLSE))
 317       FLAG_SET_DEFAULT(UseLSE, true);
 318   } else {
 319     if (UseLSE) {
 320       warning("UseLSE specified, but not supported on this CPU");
 321       FLAG_SET_DEFAULT(UseLSE, false);
 322     }
 323   }
 324 
 325   if (auxv & HWCAP_AES) {
 326     UseAES = UseAES || FLAG_IS_DEFAULT(UseAES);
 327     UseAESIntrinsics =
 328         UseAESIntrinsics || (UseAES && FLAG_IS_DEFAULT(UseAESIntrinsics));
 329     if (UseAESIntrinsics && !UseAES) {
 330       warning("UseAESIntrinsics enabled, but UseAES not, enabling");
 331       UseAES = true;
 332     }
 333   } else {
 334     if (UseAES) {
 335       warning("AES instructions are not available on this CPU");
 336       FLAG_SET_DEFAULT(UseAES, false);
 337     }
 338     if (UseAESIntrinsics) {
 339       warning("AES intrinsics are not available on this CPU");
 340       FLAG_SET_DEFAULT(UseAESIntrinsics, false);
 341     }
 342   }
 343 
 344   if (UseAESCTRIntrinsics) {
 345     warning("AES/CTR intrinsics are not available on this CPU");
 346     FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
 347   }
 348 
 349   if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
 350     UseCRC32Intrinsics = true;
 351   }
 352 
 353   if (auxv & HWCAP_CRC32) {
 354     if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
 355       FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
 356     }
 357   } else if (UseCRC32CIntrinsics) {
 358     warning("CRC32C is not available on the CPU");
 359     FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
 360   }
 361 
 362   if (FLAG_IS_DEFAULT(UseFMA)) {
 363     FLAG_SET_DEFAULT(UseFMA, true);
 364   }
 365 
 366   if (UseMD5Intrinsics) {
 367     warning("MD5 intrinsics are not available on this CPU");
 368     FLAG_SET_DEFAULT(UseMD5Intrinsics, false);
 369   }
 370 
 371   if (auxv & (HWCAP_SHA1 | HWCAP_SHA2)) {
 372     if (FLAG_IS_DEFAULT(UseSHA)) {
 373       FLAG_SET_DEFAULT(UseSHA, true);
 374     }
 375   } else if (UseSHA) {
 376     warning("SHA instructions are not available on this CPU");
 377     FLAG_SET_DEFAULT(UseSHA, false);
 378   }
 379 
 380   if (UseSHA && (auxv & HWCAP_SHA1)) {
 381     if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) {
 382       FLAG_SET_DEFAULT(UseSHA1Intrinsics, true);
 383     }
 384   } else if (UseSHA1Intrinsics) {
 385     warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
 386     FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
 387   }
 388 
 389   if (UseSHA && (auxv & HWCAP_SHA2)) {
 390     if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
 391       FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
 392     }
 393   } else if (UseSHA256Intrinsics) {
 394     warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
 395     FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
 396   }
 397 
 398   if (UseSHA && (auxv & HWCAP_SHA512)) {
 399     // Do not auto-enable UseSHA512Intrinsics until it has been fully tested on hardware
 400     // if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) {
 401       // FLAG_SET_DEFAULT(UseSHA512Intrinsics, true);
 402     // }
 403   } else if (UseSHA512Intrinsics) {
 404     warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
 405     FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
 406   }
 407 
 408   if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
 409     FLAG_SET_DEFAULT(UseSHA, false);
 410   }
 411 
 412   if (auxv & HWCAP_PMULL) {
 413     if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {
 414       FLAG_SET_DEFAULT(UseGHASHIntrinsics, true);
 415     }
 416   } else if (UseGHASHIntrinsics) {
 417     warning("GHASH intrinsics are not available on this CPU");
 418     FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
 419   }
 420 
 421   if (is_zva_enabled()) {
 422     if (FLAG_IS_DEFAULT(UseBlockZeroing)) {
 423       FLAG_SET_DEFAULT(UseBlockZeroing, true);
 424     }
 425     if (FLAG_IS_DEFAULT(BlockZeroingLowLimit)) {
 426       FLAG_SET_DEFAULT(BlockZeroingLowLimit, 4 * VM_Version::zva_length());
 427     }
 428   } else if (UseBlockZeroing) {
 429     warning("DC ZVA is not available on this CPU");
 430     FLAG_SET_DEFAULT(UseBlockZeroing, false);
 431   }
 432 




   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "logging/log.hpp"
  30 #include "memory/resourceArea.hpp"
  31 #include "runtime/java.hpp"
  32 #include "runtime/os.hpp"
  33 #include "runtime/stubCodeGenerator.hpp"
  34 #include "runtime/vm_version.hpp"
  35 #include "utilities/macros.hpp"
  36 #include "vm_version_aarch64.hpp"
  37 
  38 #include OS_HEADER_INLINE(os)
  39 
  40 #ifndef _WIN64
  41 #include <sys/auxv.h>
  42 #include <asm/hwcap.h>



























  43 #endif
  44 
  45 int VM_Version::_cpu;
  46 int VM_Version::_model;
  47 int VM_Version::_model2;
  48 int VM_Version::_variant;
  49 int VM_Version::_revision;
  50 int VM_Version::_stepping;
  51 bool VM_Version::_dcpop;
  52 VM_Version::PsrInfo VM_Version::_psr_info   = { 0, };
  53 
  54 static BufferBlob* stub_blob;
  55 static const int stub_size = 550;
  56 
  57 extern "C" {
  58   typedef void (*getPsrInfo_stub_t)(void*);
  59 }
  60 static getPsrInfo_stub_t getPsrInfo_stub = NULL;
  61 
  62 
  63 class VM_Version_StubGenerator: public StubCodeGenerator {
  64  public:
  65 
  66   VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
  67 
  68   address generate_getPsrInfo() {
  69     StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
  70 #   define __ _masm->
  71     address start = __ pc();
  72 
  73     // void getPsrInfo(VM_Version::PsrInfo* psr_info);
  74 
  75     address entry = __ pc();
  76 
  77     __ enter();
  78 
  79     __ get_dczid_el0(rscratch1);
  80     __ strw(rscratch1, Address(c_rarg0, in_bytes(VM_Version::dczid_el0_offset())));
  81 
  82 #ifndef _WIN64
  83     __ get_ctr_el0(rscratch1);
  84     __ strw(rscratch1, Address(c_rarg0, in_bytes(VM_Version::ctr_el0_offset())));
  85 #endif
  86 
  87     __ leave();
  88     __ ret(lr);
  89 
  90 #   undef __
  91 
  92     return start;
  93   }
  94 };
  95 
  96 
  97 void VM_Version::get_processor_features() {
  98   _supports_cx8 = true;
  99   _supports_atomic_getset4 = true;
 100   _supports_atomic_getadd4 = true;
 101   _supports_atomic_getset8 = true;
 102   _supports_atomic_getadd8 = true;
 103 
 104   getPsrInfo_stub(&_psr_info);
 105 


 126     if (PrefetchCopyIntervalInBytes >= 32768)
 127       PrefetchCopyIntervalInBytes = 32760;
 128   }
 129 
 130   if (AllocatePrefetchDistance !=-1 && (AllocatePrefetchDistance & 7)) {
 131     warning("AllocatePrefetchDistance must be multiple of 8");
 132     AllocatePrefetchDistance &= ~7;
 133   }
 134 
 135   if (AllocatePrefetchStepSize & 7) {
 136     warning("AllocatePrefetchStepSize must be multiple of 8");
 137     AllocatePrefetchStepSize &= ~7;
 138   }
 139 
 140   if (SoftwarePrefetchHintDistance != -1 &&
 141        (SoftwarePrefetchHintDistance & 7)) {
 142     warning("SoftwarePrefetchHintDistance must be -1, or a multiple of 8");
 143     SoftwarePrefetchHintDistance &= ~7;
 144   }
 145 
 146 #ifndef _WIN64
 147   _features = getauxval(AT_HWCAP);
 148 #else
 149   if (IsProcessorFeaturePresent(PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE))   _features |= CPU_CRC32;
 150   if (IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE))  _features |= CPU_AES | CPU_SHA1 | CPU_SHA2;
 151   if (IsProcessorFeaturePresent(PF_ARM_VFP_32_REGISTERS_AVAILABLE))        _features |= CPU_ASIMD;
 152   // No check for CPU_PMULL
 153 #endif // _WIN64
 154 
 155   char buf[512];
 156 


 157   int cpu_lines = 0;
 158 #ifndef _WIN64
 159   if (FILE *f = fopen("/proc/cpuinfo", "r")) {
 160     // need a large buffer as the flags line may include lots of text
 161     char buf[1024], *p;
 162     while (fgets(buf, sizeof (buf), f) != NULL) {
 163       if ((p = strchr(buf, ':')) != NULL) {
 164         long v = strtol(p+1, NULL, 0);
 165         if (strncmp(buf, "CPU implementer", sizeof "CPU implementer" - 1) == 0) {
 166           _cpu = v;
 167           cpu_lines++;
 168         } else if (strncmp(buf, "CPU variant", sizeof "CPU variant" - 1) == 0) {
 169           _variant = v;
 170         } else if (strncmp(buf, "CPU part", sizeof "CPU part" - 1) == 0) {
 171           if (_model != v)  _model2 = _model;
 172           _model = v;
 173         } else if (strncmp(buf, "CPU revision", sizeof "CPU revision" - 1) == 0) {
 174           _revision = v;
 175         } else if (strncmp(buf, "flags", sizeof("flags") - 1) == 0) {
 176           if (strstr(p+1, "dcpop")) {
 177             _dcpop = true;
 178           }
 179         }
 180       }
 181     }
 182     fclose(f);
 183   }
 184 #else
 185   {
 186     char* buf = ::getenv("PROCESSOR_IDENTIFIER");
 187     if (buf && strstr(buf, "Ampere(TM)") != NULL) {
 188       _cpu = CPU_AMCC;
 189       cpu_lines++;
 190     } else if (buf && strstr(buf, "Cavium Inc.") != NULL) {
 191       _cpu = CPU_CAVIUM;
 192       cpu_lines++;
 193     } else {
 194       log_info(os)("VM_Version: unknown CPU model");
 195     }
 196 
 197     if (_cpu) {
 198       SYSTEM_INFO si;
 199       GetSystemInfo(&si);
 200       _model = si.wProcessorLevel;
 201       _variant = si.wProcessorRevision / 0xFF;
 202       _revision = si.wProcessorRevision & 0xFF;
 203     }
 204   }
 205 #endif // _WIN64
 206 
 207   if (os::supports_map_sync()) {
 208     // if dcpop is available publish data cache line flush size via
 209     // generic field, otherwise let if default to zero thereby
 210     // disabling writeback
 211     if (_dcpop) {
 212       _data_cache_line_flush_size = dcache_line;
 213     }
 214   }
 215 
 216   // Enable vendor specific features
 217 
 218   // Ampere eMAG
 219   if (_cpu == CPU_AMCC && (_model == 0) && (_variant == 0x3)) {
 220     if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) {
 221       FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true);
 222     }
 223     if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
 224       FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
 225     }


 273 
 274   // Cortex A73
 275   if (_cpu == CPU_ARM && (_model == 0xd09 || _model2 == 0xd09)) {
 276     if (FLAG_IS_DEFAULT(SoftwarePrefetchHintDistance)) {
 277       FLAG_SET_DEFAULT(SoftwarePrefetchHintDistance, -1);
 278     }
 279     // A73 is faster with short-and-easy-for-speculative-execution-loop
 280     if (FLAG_IS_DEFAULT(UseSimpleArrayEquals)) {
 281       FLAG_SET_DEFAULT(UseSimpleArrayEquals, true);
 282     }
 283   }
 284 
 285   if (_cpu == CPU_ARM && (_model == 0xd07 || _model2 == 0xd07)) _features |= CPU_STXR_PREFETCH;
 286   // If an olde style /proc/cpuinfo (cpu_lines == 1) then if _model is an A57 (0xd07)
 287   // we assume the worst and assume we could be on a big little system and have
 288   // undisclosed A53 cores which we could be swapped to at any stage
 289   if (_cpu == CPU_ARM && cpu_lines == 1 && _model == 0xd07) _features |= CPU_A53MAC;
 290 
 291   sprintf(buf, "0x%02x:0x%x:0x%03x:%d", _cpu, _variant, _model, _revision);
 292   if (_model2) sprintf(buf+strlen(buf), "(0x%03x)", _model2);
 293   if (_features & CPU_ASIMD)    strcat(buf, ", simd");
 294   if (_features & CPU_CRC32)    strcat(buf, ", crc");
 295   if (_features & CPU_AES)      strcat(buf, ", aes");
 296   if (_features & CPU_SHA1)     strcat(buf, ", sha1");
 297   if (_features & CPU_SHA2)     strcat(buf, ", sha256");
 298   if (_features & CPU_SHA512)   strcat(buf, ", sha512");
 299   if (_features & CPU_LSE)      strcat(buf, ", lse");
 300 
 301   _features_string = os::strdup(buf);
 302 
 303   if (FLAG_IS_DEFAULT(UseCRC32)) {
 304     UseCRC32 = (_features & CPU_CRC32) != 0;
 305   }
 306 
 307   if (UseCRC32 && (_features & CPU_CRC32) == 0) {
 308     warning("UseCRC32 specified, but not supported on this CPU");
 309     FLAG_SET_DEFAULT(UseCRC32, false);
 310   }
 311 
 312   if (FLAG_IS_DEFAULT(UseAdler32Intrinsics)) {
 313     FLAG_SET_DEFAULT(UseAdler32Intrinsics, true);
 314   }
 315 
 316   if (UseVectorizedMismatchIntrinsic) {
 317     warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU.");
 318     FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
 319   }
 320 
 321   if (_features & CPU_LSE) {
 322     if (FLAG_IS_DEFAULT(UseLSE))
 323       FLAG_SET_DEFAULT(UseLSE, true);
 324   } else {
 325     if (UseLSE) {
 326       warning("UseLSE specified, but not supported on this CPU");
 327       FLAG_SET_DEFAULT(UseLSE, false);
 328     }
 329   }
 330 
 331   if (_features & CPU_AES) {
 332     UseAES = UseAES || FLAG_IS_DEFAULT(UseAES);
 333     UseAESIntrinsics =
 334         UseAESIntrinsics || (UseAES && FLAG_IS_DEFAULT(UseAESIntrinsics));
 335     if (UseAESIntrinsics && !UseAES) {
 336       warning("UseAESIntrinsics enabled, but UseAES not, enabling");
 337       UseAES = true;
 338     }
 339   } else {
 340     if (UseAES) {
 341       warning("AES instructions are not available on this CPU");
 342       FLAG_SET_DEFAULT(UseAES, false);
 343     }
 344     if (UseAESIntrinsics) {
 345       warning("AES intrinsics are not available on this CPU");
 346       FLAG_SET_DEFAULT(UseAESIntrinsics, false);
 347     }
 348   }
 349 
 350   if (UseAESCTRIntrinsics) {
 351     warning("AES/CTR intrinsics are not available on this CPU");
 352     FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
 353   }
 354 
 355   if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
 356     UseCRC32Intrinsics = true;
 357   }
 358 
 359   if (_features & CPU_CRC32) {
 360     if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
 361       FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
 362     }
 363   } else if (UseCRC32CIntrinsics) {
 364     warning("CRC32C is not available on the CPU");
 365     FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
 366   }
 367 
 368   if (FLAG_IS_DEFAULT(UseFMA)) {
 369     FLAG_SET_DEFAULT(UseFMA, true);
 370   }
 371 
 372   if (UseMD5Intrinsics) {
 373     warning("MD5 intrinsics are not available on this CPU");
 374     FLAG_SET_DEFAULT(UseMD5Intrinsics, false);
 375   }
 376 
 377   if (_features & (CPU_SHA1 | CPU_SHA2)) {
 378     if (FLAG_IS_DEFAULT(UseSHA)) {
 379       FLAG_SET_DEFAULT(UseSHA, true);
 380     }
 381   } else if (UseSHA) {
 382     warning("SHA instructions are not available on this CPU");
 383     FLAG_SET_DEFAULT(UseSHA, false);
 384   }
 385 
 386   if (UseSHA && (_features & CPU_SHA1)) {
 387     if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) {
 388       FLAG_SET_DEFAULT(UseSHA1Intrinsics, true);
 389     }
 390   } else if (UseSHA1Intrinsics) {
 391     warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
 392     FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
 393   }
 394 
 395   if (UseSHA && (_features & CPU_SHA2)) {
 396     if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
 397       FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
 398     }
 399   } else if (UseSHA256Intrinsics) {
 400     warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
 401     FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
 402   }
 403 
 404   if (UseSHA && (_features & CPU_SHA512)) {
 405     // Do not auto-enable UseSHA512Intrinsics until it has been fully tested on hardware
 406     // if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) {
 407       // FLAG_SET_DEFAULT(UseSHA512Intrinsics, true);
 408     // }
 409   } else if (UseSHA512Intrinsics) {
 410     warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
 411     FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
 412   }
 413 
 414   if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
 415     FLAG_SET_DEFAULT(UseSHA, false);
 416   }
 417 
 418   if (_features & CPU_PMULL) {
 419     if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {
 420       FLAG_SET_DEFAULT(UseGHASHIntrinsics, true);
 421     }
 422   } else if (UseGHASHIntrinsics) {
 423     warning("GHASH intrinsics are not available on this CPU");
 424     FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
 425   }
 426 
 427   if (is_zva_enabled()) {
 428     if (FLAG_IS_DEFAULT(UseBlockZeroing)) {
 429       FLAG_SET_DEFAULT(UseBlockZeroing, true);
 430     }
 431     if (FLAG_IS_DEFAULT(BlockZeroingLowLimit)) {
 432       FLAG_SET_DEFAULT(BlockZeroingLowLimit, 4 * VM_Version::zva_length());
 433     }
 434   } else if (UseBlockZeroing) {
 435     warning("DC ZVA is not available on this CPU");
 436     FLAG_SET_DEFAULT(UseBlockZeroing, false);
 437   }
 438 


< prev index next >