< prev index next >

src/hotspot/cpu/aarch64/vm_version_aarch64.cpp

Print this page
8248238: Adding Windows support to OpenJDK on AArch64

Summary: LP64 vs LLP64 changes to add Windows support

Contributed-by: Monica Beckwith <monica.beckwith@microsoft.com>, Ludovic Henry <luhenry@microsoft.com>
Reviewed-by:
8248238: Adding Windows support to OpenJDK on AArch64

Summary: Adding Windows support for AArch64

Contributed-by: Ludovic Henry <luhenry@microsoft.com>, Monica Beckwith <monica.beckwith@microsoft.com>
Reviewed-by:


  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "runtime/java.hpp"
  31 #include "runtime/os.hpp"
  32 #include "runtime/stubCodeGenerator.hpp"
  33 #include "runtime/vm_version.hpp"
  34 #include "utilities/macros.hpp"

  35 
  36 #include OS_HEADER_INLINE(os)
  37 

  38 #include <sys/auxv.h>
  39 #include <asm/hwcap.h>
  40 
  41 #ifndef HWCAP_AES
  42 #define HWCAP_AES   (1<<3)
  43 #endif
  44 
  45 #ifndef HWCAP_PMULL
  46 #define HWCAP_PMULL (1<<4)
  47 #endif
  48 
  49 #ifndef HWCAP_SHA1
  50 #define HWCAP_SHA1  (1<<5)
  51 #endif
  52 
  53 #ifndef HWCAP_SHA2
  54 #define HWCAP_SHA2  (1<<6)
  55 #endif
  56 
  57 #ifndef HWCAP_CRC32
  58 #define HWCAP_CRC32 (1<<7)
  59 #endif
  60 
  61 #ifndef HWCAP_ATOMICS
  62 #define HWCAP_ATOMICS (1<<8)
  63 #endif
  64 
  65 int VM_Version::_cpu;
  66 int VM_Version::_model;
  67 int VM_Version::_model2;
  68 int VM_Version::_variant;
  69 int VM_Version::_revision;
  70 int VM_Version::_stepping;
  71 bool VM_Version::_dcpop;
  72 VM_Version::PsrInfo VM_Version::_psr_info   = { 0, };
  73 
  74 static BufferBlob* stub_blob;
  75 static const int stub_size = 550;
  76 
  77 extern "C" {
  78   typedef void (*getPsrInfo_stub_t)(void*);
  79 }
  80 static getPsrInfo_stub_t getPsrInfo_stub = NULL;
  81 
  82 
  83 class VM_Version_StubGenerator: public StubCodeGenerator {
  84  public:
  85 
  86   VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
  87 
  88   address generate_getPsrInfo() {
  89     StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
  90 #   define __ _masm->
  91     address start = __ pc();
  92 
  93     // void getPsrInfo(VM_Version::PsrInfo* psr_info);
  94 
  95     address entry = __ pc();
  96 
  97     __ enter();
  98 
  99     __ get_dczid_el0(rscratch1);
 100     __ strw(rscratch1, Address(c_rarg0, in_bytes(VM_Version::dczid_el0_offset())));
 101 

 102     __ get_ctr_el0(rscratch1);
 103     __ strw(rscratch1, Address(c_rarg0, in_bytes(VM_Version::ctr_el0_offset())));

 104 
 105     __ leave();
 106     __ ret(lr);
 107 
 108 #   undef __
 109 
 110     return start;
 111   }
 112 };
 113 
 114 
 115 void VM_Version::get_processor_features() {
 116   _supports_cx8 = true;
 117   _supports_atomic_getset4 = true;
 118   _supports_atomic_getadd4 = true;
 119   _supports_atomic_getset8 = true;
 120   _supports_atomic_getadd8 = true;
 121 
 122   getPsrInfo_stub(&_psr_info);
 123 


 144     if (PrefetchCopyIntervalInBytes >= 32768)
 145       PrefetchCopyIntervalInBytes = 32760;
 146   }
 147 
 148   if (AllocatePrefetchDistance !=-1 && (AllocatePrefetchDistance & 7)) {
 149     warning("AllocatePrefetchDistance must be multiple of 8");
 150     AllocatePrefetchDistance &= ~7;
 151   }
 152 
 153   if (AllocatePrefetchStepSize & 7) {
 154     warning("AllocatePrefetchStepSize must be multiple of 8");
 155     AllocatePrefetchStepSize &= ~7;
 156   }
 157 
 158   if (SoftwarePrefetchHintDistance != -1 &&
 159        (SoftwarePrefetchHintDistance & 7)) {
 160     warning("SoftwarePrefetchHintDistance must be -1, or a multiple of 8");
 161     SoftwarePrefetchHintDistance &= ~7;
 162   }
 163 
 164   unsigned long auxv = getauxval(AT_HWCAP);







 165 
 166   char buf[512];
 167 
 168   _features = auxv;
 169 
 170   int cpu_lines = 0;

 171   if (FILE *f = fopen("/proc/cpuinfo", "r")) {
 172     // need a large buffer as the flags line may include lots of text
 173     char buf[1024], *p;
 174     while (fgets(buf, sizeof (buf), f) != NULL) {
 175       if ((p = strchr(buf, ':')) != NULL) {
 176         long v = strtol(p+1, NULL, 0);
 177         if (strncmp(buf, "CPU implementer", sizeof "CPU implementer" - 1) == 0) {
 178           _cpu = v;
 179           cpu_lines++;
 180         } else if (strncmp(buf, "CPU variant", sizeof "CPU variant" - 1) == 0) {
 181           _variant = v;
 182         } else if (strncmp(buf, "CPU part", sizeof "CPU part" - 1) == 0) {
 183           if (_model != v)  _model2 = _model;
 184           _model = v;
 185         } else if (strncmp(buf, "CPU revision", sizeof "CPU revision" - 1) == 0) {
 186           _revision = v;
 187         } else if (strncmp(buf, "flags", sizeof("flags") - 1) == 0) {
 188           if (strstr(p+1, "dcpop")) {
 189             _dcpop = true;
 190           }
 191         }
 192       }
 193     }
 194     fclose(f);
 195   }






















 196 
 197   if (os::supports_map_sync()) {
 198     // if dcpop is available publish data cache line flush size via
 199     // generic field, otherwise let if default to zero thereby
 200     // disabling writeback
 201     if (_dcpop) {
 202       _data_cache_line_flush_size = dcache_line;
 203     }
 204   }
 205 
 206   // Enable vendor specific features
 207 
 208   // Ampere eMAG
 209   if (_cpu == CPU_AMCC && (_model == 0) && (_variant == 0x3)) {
 210     if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) {
 211       FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true);
 212     }
 213     if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
 214       FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
 215     }


 263 
 264   // Cortex A73
 265   if (_cpu == CPU_ARM && (_model == 0xd09 || _model2 == 0xd09)) {
 266     if (FLAG_IS_DEFAULT(SoftwarePrefetchHintDistance)) {
 267       FLAG_SET_DEFAULT(SoftwarePrefetchHintDistance, -1);
 268     }
 269     // A73 is faster with short-and-easy-for-speculative-execution-loop
 270     if (FLAG_IS_DEFAULT(UseSimpleArrayEquals)) {
 271       FLAG_SET_DEFAULT(UseSimpleArrayEquals, true);
 272     }
 273   }
 274 
 275   if (_cpu == CPU_ARM && (_model == 0xd07 || _model2 == 0xd07)) _features |= CPU_STXR_PREFETCH;
 276   // If an olde style /proc/cpuinfo (cpu_lines == 1) then if _model is an A57 (0xd07)
 277   // we assume the worst and assume we could be on a big little system and have
 278   // undisclosed A53 cores which we could be swapped to at any stage
 279   if (_cpu == CPU_ARM && cpu_lines == 1 && _model == 0xd07) _features |= CPU_A53MAC;
 280 
 281   sprintf(buf, "0x%02x:0x%x:0x%03x:%d", _cpu, _variant, _model, _revision);
 282   if (_model2) sprintf(buf+strlen(buf), "(0x%03x)", _model2);
 283   if (auxv & HWCAP_ASIMD) strcat(buf, ", simd");
 284   if (auxv & HWCAP_CRC32) strcat(buf, ", crc");
 285   if (auxv & HWCAP_AES)   strcat(buf, ", aes");
 286   if (auxv & HWCAP_SHA1)  strcat(buf, ", sha1");
 287   if (auxv & HWCAP_SHA2)  strcat(buf, ", sha256");
 288   if (auxv & HWCAP_ATOMICS) strcat(buf, ", lse");
 289 
 290   _features_string = os::strdup(buf);
 291 
 292   if (FLAG_IS_DEFAULT(UseCRC32)) {
 293     UseCRC32 = (auxv & HWCAP_CRC32) != 0;
 294   }
 295 
 296   if (UseCRC32 && (auxv & HWCAP_CRC32) == 0) {
 297     warning("UseCRC32 specified, but not supported on this CPU");
 298     FLAG_SET_DEFAULT(UseCRC32, false);
 299   }
 300 
 301   if (FLAG_IS_DEFAULT(UseAdler32Intrinsics)) {
 302     FLAG_SET_DEFAULT(UseAdler32Intrinsics, true);
 303   }
 304 
 305   if (UseVectorizedMismatchIntrinsic) {
 306     warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU.");
 307     FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
 308   }
 309 
 310   if (auxv & HWCAP_ATOMICS) {
 311     if (FLAG_IS_DEFAULT(UseLSE))
 312       FLAG_SET_DEFAULT(UseLSE, true);
 313   } else {
 314     if (UseLSE) {
 315       warning("UseLSE specified, but not supported on this CPU");
 316       FLAG_SET_DEFAULT(UseLSE, false);
 317     }
 318   }
 319 
 320   if (auxv & HWCAP_AES) {
 321     UseAES = UseAES || FLAG_IS_DEFAULT(UseAES);
 322     UseAESIntrinsics =
 323         UseAESIntrinsics || (UseAES && FLAG_IS_DEFAULT(UseAESIntrinsics));
 324     if (UseAESIntrinsics && !UseAES) {
 325       warning("UseAESIntrinsics enabled, but UseAES not, enabling");
 326       UseAES = true;
 327     }
 328   } else {
 329     if (UseAES) {
 330       warning("AES instructions are not available on this CPU");
 331       FLAG_SET_DEFAULT(UseAES, false);
 332     }
 333     if (UseAESIntrinsics) {
 334       warning("AES intrinsics are not available on this CPU");
 335       FLAG_SET_DEFAULT(UseAESIntrinsics, false);
 336     }
 337   }
 338 
 339   if (UseAESCTRIntrinsics) {
 340     warning("AES/CTR intrinsics are not available on this CPU");
 341     FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
 342   }
 343 
 344   if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
 345     UseCRC32Intrinsics = true;
 346   }
 347 
 348   if (auxv & HWCAP_CRC32) {
 349     if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
 350       FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
 351     }
 352   } else if (UseCRC32CIntrinsics) {
 353     warning("CRC32C is not available on the CPU");
 354     FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
 355   }
 356 
 357   if (FLAG_IS_DEFAULT(UseFMA)) {
 358     FLAG_SET_DEFAULT(UseFMA, true);
 359   }
 360 
 361   if (auxv & (HWCAP_SHA1 | HWCAP_SHA2)) {
 362     if (FLAG_IS_DEFAULT(UseSHA)) {
 363       FLAG_SET_DEFAULT(UseSHA, true);
 364     }
 365   } else if (UseSHA) {
 366     warning("SHA instructions are not available on this CPU");
 367     FLAG_SET_DEFAULT(UseSHA, false);
 368   }
 369 
 370   if (UseSHA && (auxv & HWCAP_SHA1)) {
 371     if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) {
 372       FLAG_SET_DEFAULT(UseSHA1Intrinsics, true);
 373     }
 374   } else if (UseSHA1Intrinsics) {
 375     warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
 376     FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
 377   }
 378 
 379   if (UseSHA && (auxv & HWCAP_SHA2)) {
 380     if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
 381       FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
 382     }
 383   } else if (UseSHA256Intrinsics) {
 384     warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
 385     FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
 386   }
 387 
 388   if (UseSHA512Intrinsics) {
 389     warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
 390     FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
 391   }
 392 
 393   if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
 394     FLAG_SET_DEFAULT(UseSHA, false);
 395   }
 396 
 397   if (auxv & HWCAP_PMULL) {
 398     if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {
 399       FLAG_SET_DEFAULT(UseGHASHIntrinsics, true);
 400     }
 401   } else if (UseGHASHIntrinsics) {
 402     warning("GHASH intrinsics are not available on this CPU");
 403     FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
 404   }
 405 
 406   if (is_zva_enabled()) {
 407     if (FLAG_IS_DEFAULT(UseBlockZeroing)) {
 408       FLAG_SET_DEFAULT(UseBlockZeroing, true);
 409     }
 410     if (FLAG_IS_DEFAULT(BlockZeroingLowLimit)) {
 411       FLAG_SET_DEFAULT(BlockZeroingLowLimit, 4 * VM_Version::zva_length());
 412     }
 413   } else if (UseBlockZeroing) {
 414     warning("DC ZVA is not available on this CPU");
 415     FLAG_SET_DEFAULT(UseBlockZeroing, false);
 416   }
 417 




  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "runtime/java.hpp"
  31 #include "runtime/os.hpp"
  32 #include "runtime/stubCodeGenerator.hpp"
  33 #include "runtime/vm_version.hpp"
  34 #include "utilities/macros.hpp"
  35 #include "vm_version_aarch64.hpp"
  36 
  37 #include OS_HEADER_INLINE(os)
  38 
  39 #ifndef _WIN64
  40 #include <sys/auxv.h>
  41 #include <asm/hwcap.h>























  42 #endif
  43 
  44 int VM_Version::_cpu;
  45 int VM_Version::_model;
  46 int VM_Version::_model2;
  47 int VM_Version::_variant;
  48 int VM_Version::_revision;
  49 int VM_Version::_stepping;
  50 bool VM_Version::_dcpop;
  51 VM_Version::PsrInfo VM_Version::_psr_info   = { 0, };
  52 
  53 static BufferBlob* stub_blob;
  54 static const int stub_size = 550;
  55 
  56 extern "C" {
  57   typedef void (*getPsrInfo_stub_t)(void*);
  58 }
  59 static getPsrInfo_stub_t getPsrInfo_stub = NULL;
  60 
  61 
  62 class VM_Version_StubGenerator: public StubCodeGenerator {
  63  public:
  64 
  65   VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
  66 
  67   address generate_getPsrInfo() {
  68     StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
  69 #   define __ _masm->
  70     address start = __ pc();
  71 
  72     // void getPsrInfo(VM_Version::PsrInfo* psr_info);
  73 
  74     address entry = __ pc();
  75 
  76     __ enter();
  77 
  78     __ get_dczid_el0(rscratch1);
  79     __ strw(rscratch1, Address(c_rarg0, in_bytes(VM_Version::dczid_el0_offset())));
  80 
  81 #ifndef _WIN64
  82     __ get_ctr_el0(rscratch1);
  83     __ strw(rscratch1, Address(c_rarg0, in_bytes(VM_Version::ctr_el0_offset())));
  84 #endif
  85 
  86     __ leave();
  87     __ ret(lr);
  88 
  89 #   undef __
  90 
  91     return start;
  92   }
  93 };
  94 
  95 
  96 void VM_Version::get_processor_features() {
  97   _supports_cx8 = true;
  98   _supports_atomic_getset4 = true;
  99   _supports_atomic_getadd4 = true;
 100   _supports_atomic_getset8 = true;
 101   _supports_atomic_getadd8 = true;
 102 
 103   getPsrInfo_stub(&_psr_info);
 104 


 125     if (PrefetchCopyIntervalInBytes >= 32768)
 126       PrefetchCopyIntervalInBytes = 32760;
 127   }
 128 
 129   if (AllocatePrefetchDistance !=-1 && (AllocatePrefetchDistance & 7)) {
 130     warning("AllocatePrefetchDistance must be multiple of 8");
 131     AllocatePrefetchDistance &= ~7;
 132   }
 133 
 134   if (AllocatePrefetchStepSize & 7) {
 135     warning("AllocatePrefetchStepSize must be multiple of 8");
 136     AllocatePrefetchStepSize &= ~7;
 137   }
 138 
 139   if (SoftwarePrefetchHintDistance != -1 &&
 140        (SoftwarePrefetchHintDistance & 7)) {
 141     warning("SoftwarePrefetchHintDistance must be -1, or a multiple of 8");
 142     SoftwarePrefetchHintDistance &= ~7;
 143   }
 144 
 145 #ifndef _WIN64
 146   _features = getauxval(AT_HWCAP);
 147 #else
 148   if (IsProcessorFeaturePresent(PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE))   _features |= CPU_CRC32;
 149   if (IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE))  _features |= CPU_AES | CPU_SHA1 | CPU_SHA2;
 150   if (IsProcessorFeaturePresent(PF_ARM_VFP_32_REGISTERS_AVAILABLE))        _features |= CPU_ASIMD;
 151   // No check for CPU_PMULL
 152 #endif // _WIN64
 153 
 154   char buf[512];
 155 


 156   int cpu_lines = 0;
 157 #ifndef _WIN64
 158   if (FILE *f = fopen("/proc/cpuinfo", "r")) {
 159     // need a large buffer as the flags line may include lots of text
 160     char buf[1024], *p;
 161     while (fgets(buf, sizeof (buf), f) != NULL) {
 162       if ((p = strchr(buf, ':')) != NULL) {
 163         int64_t v = strtol(p+1, NULL, 0);
 164         if (strncmp(buf, "CPU implementer", sizeof "CPU implementer" - 1) == 0) {
 165           _cpu = v;
 166           cpu_lines++;
 167         } else if (strncmp(buf, "CPU variant", sizeof "CPU variant" - 1) == 0) {
 168           _variant = v;
 169         } else if (strncmp(buf, "CPU part", sizeof "CPU part" - 1) == 0) {
 170           if (_model != v)  _model2 = _model;
 171           _model = v;
 172         } else if (strncmp(buf, "CPU revision", sizeof "CPU revision" - 1) == 0) {
 173           _revision = v;
 174         } else if (strncmp(buf, "flags", sizeof("flags") - 1) == 0) {
 175           if (strstr(p+1, "dcpop")) {
 176             _dcpop = true;
 177           }
 178         }
 179       }
 180     }
 181     fclose(f);
 182   }
 183 #else
 184   {
 185     char* buf = ::getenv("PROCESSOR_IDENTIFIER");
 186     if (buf && strstr(buf, "Ampere(TM)") != NULL) {
 187       _cpu = CPU_AMCC;
 188       cpu_lines++;
 189     } else if (buf && strstr(buf, "Cavium Inc.") != NULL) {
 190       _cpu = CPU_CAVIUM;
 191       cpu_lines++;
 192     } else {
 193       log_info(os)("VM_Version: unknown CPU model");
 194     }
 195 
 196     if (_cpu) {
 197       SYSTEM_INFO si;
 198       GetSystemInfo(&si);
 199       _model = si.wProcessorLevel;
 200       _variant = si.wProcessorRevision / 0xFF;
 201       _revision = si.wProcessorRevision & 0xFF;
 202     }
 203   }
 204 #endif // _WIN64
 205 
 206   if (os::supports_map_sync()) {
 207     // if dcpop is available publish data cache line flush size via
 208     // generic field, otherwise let if default to zero thereby
 209     // disabling writeback
 210     if (_dcpop) {
 211       _data_cache_line_flush_size = dcache_line;
 212     }
 213   }
 214 
 215   // Enable vendor specific features
 216 
 217   // Ampere eMAG
 218   if (_cpu == CPU_AMCC && (_model == 0) && (_variant == 0x3)) {
 219     if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) {
 220       FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true);
 221     }
 222     if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
 223       FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
 224     }


 272 
 273   // Cortex A73
 274   if (_cpu == CPU_ARM && (_model == 0xd09 || _model2 == 0xd09)) {
 275     if (FLAG_IS_DEFAULT(SoftwarePrefetchHintDistance)) {
 276       FLAG_SET_DEFAULT(SoftwarePrefetchHintDistance, -1);
 277     }
 278     // A73 is faster with short-and-easy-for-speculative-execution-loop
 279     if (FLAG_IS_DEFAULT(UseSimpleArrayEquals)) {
 280       FLAG_SET_DEFAULT(UseSimpleArrayEquals, true);
 281     }
 282   }
 283 
 284   if (_cpu == CPU_ARM && (_model == 0xd07 || _model2 == 0xd07)) _features |= CPU_STXR_PREFETCH;
 285   // If an olde style /proc/cpuinfo (cpu_lines == 1) then if _model is an A57 (0xd07)
 286   // we assume the worst and assume we could be on a big little system and have
 287   // undisclosed A53 cores which we could be swapped to at any stage
 288   if (_cpu == CPU_ARM && cpu_lines == 1 && _model == 0xd07) _features |= CPU_A53MAC;
 289 
 290   sprintf(buf, "0x%02x:0x%x:0x%03x:%d", _cpu, _variant, _model, _revision);
 291   if (_model2) sprintf(buf+strlen(buf), "(0x%03x)", _model2);
 292   if (_features & CPU_ASIMD) strcat(buf, ", simd");
 293   if (_features & CPU_CRC32) strcat(buf, ", crc");
 294   if (_features & CPU_AES)   strcat(buf, ", aes");
 295   if (_features & CPU_SHA1)  strcat(buf, ", sha1");
 296   if (_features & CPU_SHA2)  strcat(buf, ", sha256");
 297   if (_features & CPU_LSE)   strcat(buf, ", lse");
 298 
 299   _features_string = os::strdup(buf);
 300 
 301   if (FLAG_IS_DEFAULT(UseCRC32)) {
 302     UseCRC32 = (_features & CPU_CRC32) != 0;
 303   }
 304 
 305   if (UseCRC32 && (_features & CPU_CRC32) == 0) {
 306     warning("UseCRC32 specified, but not supported on this CPU");
 307     FLAG_SET_DEFAULT(UseCRC32, false);
 308   }
 309 
 310   if (FLAG_IS_DEFAULT(UseAdler32Intrinsics)) {
 311     FLAG_SET_DEFAULT(UseAdler32Intrinsics, true);
 312   }
 313 
 314   if (UseVectorizedMismatchIntrinsic) {
 315     warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU.");
 316     FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
 317   }
 318 
 319   if (_features & CPU_LSE) {
 320     if (FLAG_IS_DEFAULT(UseLSE))
 321       FLAG_SET_DEFAULT(UseLSE, true);
 322   } else {
 323     if (UseLSE) {
 324       warning("UseLSE specified, but not supported on this CPU");
 325       FLAG_SET_DEFAULT(UseLSE, false);
 326     }
 327   }
 328 
 329   if (_features & CPU_AES) {
 330     UseAES = UseAES || FLAG_IS_DEFAULT(UseAES);
 331     UseAESIntrinsics =
 332         UseAESIntrinsics || (UseAES && FLAG_IS_DEFAULT(UseAESIntrinsics));
 333     if (UseAESIntrinsics && !UseAES) {
 334       warning("UseAESIntrinsics enabled, but UseAES not, enabling");
 335       UseAES = true;
 336     }
 337   } else {
 338     if (UseAES) {
 339       warning("AES instructions are not available on this CPU");
 340       FLAG_SET_DEFAULT(UseAES, false);
 341     }
 342     if (UseAESIntrinsics) {
 343       warning("AES intrinsics are not available on this CPU");
 344       FLAG_SET_DEFAULT(UseAESIntrinsics, false);
 345     }
 346   }
 347 
 348   if (UseAESCTRIntrinsics) {
 349     warning("AES/CTR intrinsics are not available on this CPU");
 350     FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
 351   }
 352 
 353   if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
 354     UseCRC32Intrinsics = true;
 355   }
 356 
 357   if (_features & CPU_CRC32) {
 358     if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
 359       FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
 360     }
 361   } else if (UseCRC32CIntrinsics) {
 362     warning("CRC32C is not available on the CPU");
 363     FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
 364   }
 365 
 366   if (FLAG_IS_DEFAULT(UseFMA)) {
 367     FLAG_SET_DEFAULT(UseFMA, true);
 368   }
 369 
 370   if (_features & (CPU_SHA1 | CPU_SHA2)) {
 371     if (FLAG_IS_DEFAULT(UseSHA)) {
 372       FLAG_SET_DEFAULT(UseSHA, true);
 373     }
 374   } else if (UseSHA) {
 375     warning("SHA instructions are not available on this CPU");
 376     FLAG_SET_DEFAULT(UseSHA, false);
 377   }
 378 
 379   if (UseSHA && (_features & CPU_SHA1)) {
 380     if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) {
 381       FLAG_SET_DEFAULT(UseSHA1Intrinsics, true);
 382     }
 383   } else if (UseSHA1Intrinsics) {
 384     warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
 385     FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
 386   }
 387 
 388   if (UseSHA && (_features & CPU_SHA2)) {
 389     if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
 390       FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
 391     }
 392   } else if (UseSHA256Intrinsics) {
 393     warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
 394     FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
 395   }
 396 
 397   if (UseSHA512Intrinsics) {
 398     warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
 399     FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
 400   }
 401 
 402   if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
 403     FLAG_SET_DEFAULT(UseSHA, false);
 404   }
 405 
 406   if (_features & CPU_PMULL) {
 407     if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {
 408       FLAG_SET_DEFAULT(UseGHASHIntrinsics, true);
 409     }
 410   } else if (UseGHASHIntrinsics) {
 411     warning("GHASH intrinsics are not available on this CPU");
 412     FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
 413   }
 414 
 415   if (is_zva_enabled()) {
 416     if (FLAG_IS_DEFAULT(UseBlockZeroing)) {
 417       FLAG_SET_DEFAULT(UseBlockZeroing, true);
 418     }
 419     if (FLAG_IS_DEFAULT(BlockZeroingLowLimit)) {
 420       FLAG_SET_DEFAULT(BlockZeroingLowLimit, 4 * VM_Version::zva_length());
 421     }
 422   } else if (UseBlockZeroing) {
 423     warning("DC ZVA is not available on this CPU");
 424     FLAG_SET_DEFAULT(UseBlockZeroing, false);
 425   }
 426 


< prev index next >