1 /* 2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2015, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.hpp" 28 #include "asm/macroAssembler.inline.hpp" 29 #include "memory/resourceArea.hpp" 30 #include "runtime/java.hpp" 31 #include "runtime/stubCodeGenerator.hpp" 32 #include "utilities/macros.hpp" 33 #include "vm_version_aarch64.hpp" 34 35 #include OS_HEADER_INLINE(os) 36 37 #include <sys/auxv.h> 38 #include <asm/hwcap.h> 39 40 #ifndef HWCAP_AES 41 #define HWCAP_AES (1<<3) 42 #endif 43 44 #ifndef HWCAP_PMULL 45 #define HWCAP_PMULL (1<<4) 46 #endif 47 48 #ifndef HWCAP_SHA1 49 #define HWCAP_SHA1 (1<<5) 50 #endif 51 52 #ifndef HWCAP_SHA2 53 #define HWCAP_SHA2 (1<<6) 54 #endif 55 56 #ifndef HWCAP_CRC32 57 #define HWCAP_CRC32 (1<<7) 58 #endif 59 60 #ifndef HWCAP_ATOMICS 61 #define HWCAP_ATOMICS (1<<8) 62 #endif 63 64 int VM_Version::_cpu; 65 int VM_Version::_model; 66 int VM_Version::_model2; 67 int VM_Version::_variant; 68 int VM_Version::_revision; 69 int VM_Version::_stepping; 70 VM_Version::PsrInfo VM_Version::_psr_info = { 0, }; 71 72 static BufferBlob* stub_blob; 73 static const int stub_size = 550; 74 75 extern "C" { 76 typedef void (*getPsrInfo_stub_t)(void*); 77 } 78 static getPsrInfo_stub_t getPsrInfo_stub = NULL; 79 80 81 class VM_Version_StubGenerator: public StubCodeGenerator { 82 public: 83 84 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {} 85 86 address generate_getPsrInfo() { 87 StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub"); 88 # define __ _masm-> 89 address start = __ pc(); 90 91 // void getPsrInfo(VM_Version::PsrInfo* psr_info); 92 93 address entry = __ pc(); 94 95 __ enter(); 96 97 __ get_dczid_el0(rscratch1); 98 __ strw(rscratch1, Address(c_rarg0, in_bytes(VM_Version::dczid_el0_offset()))); 99 100 __ get_ctr_el0(rscratch1); 101 __ strw(rscratch1, Address(c_rarg0, in_bytes(VM_Version::ctr_el0_offset()))); 102 103 __ leave(); 104 __ ret(lr); 105 106 # undef __ 107 108 return start; 109 } 110 }; 111 112 113 void VM_Version::get_processor_features() { 114 _supports_cx8 = true; 115 _supports_atomic_getset4 = true; 116 _supports_atomic_getadd4 = true; 117 _supports_atomic_getset8 = true; 118 _supports_atomic_getadd8 = true; 119 120 getPsrInfo_stub(&_psr_info); 121 122 int dcache_line = VM_Version::dcache_line_size(); 123 124 // Limit AllocatePrefetchDistance so that it does not exceed the 125 // constraint in AllocatePrefetchDistanceConstraintFunc. 126 if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) 127 FLAG_SET_DEFAULT(AllocatePrefetchDistance, MIN2(512, 3*dcache_line)); 128 129 if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize)) 130 FLAG_SET_DEFAULT(AllocatePrefetchStepSize, dcache_line); 131 if (FLAG_IS_DEFAULT(PrefetchScanIntervalInBytes)) 132 FLAG_SET_DEFAULT(PrefetchScanIntervalInBytes, 3*dcache_line); 133 if (FLAG_IS_DEFAULT(PrefetchCopyIntervalInBytes)) 134 FLAG_SET_DEFAULT(PrefetchCopyIntervalInBytes, 3*dcache_line); 135 if (FLAG_IS_DEFAULT(SoftwarePrefetchHintDistance)) 136 FLAG_SET_DEFAULT(SoftwarePrefetchHintDistance, 3*dcache_line); 137 138 if (PrefetchCopyIntervalInBytes != -1 && 139 ((PrefetchCopyIntervalInBytes & 7) || (PrefetchCopyIntervalInBytes >= 32768))) { 140 warning("PrefetchCopyIntervalInBytes must be -1, or a multiple of 8 and < 32768"); 141 PrefetchCopyIntervalInBytes &= ~7; 142 if (PrefetchCopyIntervalInBytes >= 32768) 143 PrefetchCopyIntervalInBytes = 32760; 144 } 145 146 if (AllocatePrefetchDistance !=-1 && (AllocatePrefetchDistance & 7)) { 147 warning("AllocatePrefetchDistance must be multiple of 8"); 148 AllocatePrefetchDistance &= ~7; 149 } 150 151 if (AllocatePrefetchStepSize & 7) { 152 warning("AllocatePrefetchStepSize must be multiple of 8"); 153 AllocatePrefetchStepSize &= ~7; 154 } 155 156 if (SoftwarePrefetchHintDistance != -1 && 157 (SoftwarePrefetchHintDistance & 7)) { 158 warning("SoftwarePrefetchHintDistance must be -1, or a multiple of 8"); 159 SoftwarePrefetchHintDistance &= ~7; 160 } 161 162 unsigned long auxv = getauxval(AT_HWCAP); 163 164 char buf[512]; 165 166 _features = auxv; 167 168 int cpu_lines = 0; 169 if (FILE *f = fopen("/proc/cpuinfo", "r")) { 170 char buf[128], *p; 171 while (fgets(buf, sizeof (buf), f) != NULL) { 172 if ((p = strchr(buf, ':')) != NULL) { 173 long v = strtol(p+1, NULL, 0); 174 if (strncmp(buf, "CPU implementer", sizeof "CPU implementer" - 1) == 0) { 175 _cpu = v; 176 cpu_lines++; 177 } else if (strncmp(buf, "CPU variant", sizeof "CPU variant" - 1) == 0) { 178 _variant = v; 179 } else if (strncmp(buf, "CPU part", sizeof "CPU part" - 1) == 0) { 180 if (_model != v) _model2 = _model; 181 _model = v; 182 } else if (strncmp(buf, "CPU revision", sizeof "CPU revision" - 1) == 0) { 183 _revision = v; 184 } 185 } 186 } 187 fclose(f); 188 } 189 190 // Enable vendor specific features 191 192 // ThunderX 193 if (_cpu == CPU_CAVIUM && (_model == 0xA1)) { 194 if (_variant == 0) _features |= CPU_DMB_ATOMICS; 195 if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) { 196 FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true); 197 } 198 if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) { 199 FLAG_SET_DEFAULT(UseSIMDForMemoryOps, (_variant > 0)); 200 } 201 if (FLAG_IS_DEFAULT(UseSIMDForArrayEquals)) { 202 FLAG_SET_DEFAULT(UseSIMDForArrayEquals, false); 203 } 204 } 205 206 // ThunderX2 207 if ((_cpu == CPU_CAVIUM && (_model == 0xAF)) || 208 (_cpu == CPU_BROADCOM && (_model == 0x516))) { 209 if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) { 210 FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true); 211 } 212 if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) { 213 FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true); 214 } 215 if (FLAG_IS_DEFAULT(UseFPUForSpilling)) { 216 FLAG_SET_DEFAULT(UseFPUForSpilling, true); 217 } 218 } 219 220 // Cortex A53 221 if (_cpu == CPU_ARM && (_model == 0xd03 || _model2 == 0xd03)) { 222 _features |= CPU_A53MAC; 223 if (FLAG_IS_DEFAULT(UseSIMDForArrayEquals)) { 224 FLAG_SET_DEFAULT(UseSIMDForArrayEquals, false); 225 } 226 } 227 228 // Cortex A73 229 if (_cpu == CPU_ARM && (_model == 0xd09 || _model2 == 0xd09)) { 230 if (FLAG_IS_DEFAULT(SoftwarePrefetchHintDistance)) { 231 FLAG_SET_DEFAULT(SoftwarePrefetchHintDistance, -1); 232 } 233 // A73 is faster with short-and-easy-for-speculative-execution-loop 234 if (FLAG_IS_DEFAULT(UseSimpleArrayEquals)) { 235 FLAG_SET_DEFAULT(UseSimpleArrayEquals, true); 236 } 237 } 238 239 // Neoverse N1 240 if (_cpu == CPU_ARM && (_model == 0xd0c || _model2 == 0xd0c)) { 241 if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) { 242 FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true); 243 } 244 } 245 246 if (_cpu == CPU_ARM && (_model == 0xd07 || _model2 == 0xd07)) _features |= CPU_STXR_PREFETCH; 247 // If an olde style /proc/cpuinfo (cpu_lines == 1) then if _model is an A57 (0xd07) 248 // we assume the worst and assume we could be on a big little system and have 249 // undisclosed A53 cores which we could be swapped to at any stage 250 if (_cpu == CPU_ARM && cpu_lines == 1 && _model == 0xd07) _features |= CPU_A53MAC; 251 252 sprintf(buf, "0x%02x:0x%x:0x%03x:%d", _cpu, _variant, _model, _revision); 253 if (_model2) sprintf(buf+strlen(buf), "(0x%03x)", _model2); 254 if (auxv & HWCAP_ASIMD) strcat(buf, ", simd"); 255 if (auxv & HWCAP_CRC32) strcat(buf, ", crc"); 256 if (auxv & HWCAP_AES) strcat(buf, ", aes"); 257 if (auxv & HWCAP_SHA1) strcat(buf, ", sha1"); 258 if (auxv & HWCAP_SHA2) strcat(buf, ", sha256"); 259 if (auxv & HWCAP_ATOMICS) strcat(buf, ", lse"); 260 261 _features_string = os::strdup(buf); 262 263 if (FLAG_IS_DEFAULT(UseCRC32)) { 264 UseCRC32 = (auxv & HWCAP_CRC32) != 0; 265 } 266 267 if (UseCRC32 && (auxv & HWCAP_CRC32) == 0) { 268 warning("UseCRC32 specified, but not supported on this CPU"); 269 FLAG_SET_DEFAULT(UseCRC32, false); 270 } 271 272 if (FLAG_IS_DEFAULT(UseAdler32Intrinsics)) { 273 FLAG_SET_DEFAULT(UseAdler32Intrinsics, true); 274 } 275 276 if (UseVectorizedMismatchIntrinsic) { 277 warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU."); 278 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); 279 } 280 281 if (auxv & HWCAP_ATOMICS) { 282 if (FLAG_IS_DEFAULT(UseLSE)) 283 FLAG_SET_DEFAULT(UseLSE, true); 284 } else { 285 if (UseLSE) { 286 warning("UseLSE specified, but not supported on this CPU"); 287 FLAG_SET_DEFAULT(UseLSE, false); 288 } 289 } 290 291 if (auxv & HWCAP_AES) { 292 UseAES = UseAES || FLAG_IS_DEFAULT(UseAES); 293 UseAESIntrinsics = 294 UseAESIntrinsics || (UseAES && FLAG_IS_DEFAULT(UseAESIntrinsics)); 295 if (UseAESIntrinsics && !UseAES) { 296 warning("UseAESIntrinsics enabled, but UseAES not, enabling"); 297 UseAES = true; 298 } 299 } else { 300 if (UseAES) { 301 warning("AES instructions are not available on this CPU"); 302 FLAG_SET_DEFAULT(UseAES, false); 303 } 304 if (UseAESIntrinsics) { 305 warning("AES intrinsics are not available on this CPU"); 306 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 307 } 308 } 309 310 if (UseAESCTRIntrinsics) { 311 warning("AES/CTR intrinsics are not available on this CPU"); 312 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 313 } 314 315 if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { 316 UseCRC32Intrinsics = true; 317 } 318 319 if (auxv & HWCAP_CRC32) { 320 if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) { 321 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true); 322 } 323 } else if (UseCRC32CIntrinsics) { 324 warning("CRC32C is not available on the CPU"); 325 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false); 326 } 327 328 if (FLAG_IS_DEFAULT(UseFMA)) { 329 FLAG_SET_DEFAULT(UseFMA, true); 330 } 331 332 if (auxv & (HWCAP_SHA1 | HWCAP_SHA2)) { 333 if (FLAG_IS_DEFAULT(UseSHA)) { 334 FLAG_SET_DEFAULT(UseSHA, true); 335 } 336 } else if (UseSHA) { 337 warning("SHA instructions are not available on this CPU"); 338 FLAG_SET_DEFAULT(UseSHA, false); 339 } 340 341 if (UseSHA && (auxv & HWCAP_SHA1)) { 342 if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) { 343 FLAG_SET_DEFAULT(UseSHA1Intrinsics, true); 344 } 345 } else if (UseSHA1Intrinsics) { 346 warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU."); 347 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); 348 } 349 350 if (UseSHA && (auxv & HWCAP_SHA2)) { 351 if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) { 352 FLAG_SET_DEFAULT(UseSHA256Intrinsics, true); 353 } 354 } else if (UseSHA256Intrinsics) { 355 warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU."); 356 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); 357 } 358 359 if (UseSHA512Intrinsics) { 360 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU."); 361 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); 362 } 363 364 if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) { 365 FLAG_SET_DEFAULT(UseSHA, false); 366 } 367 368 if (auxv & HWCAP_PMULL) { 369 if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) { 370 FLAG_SET_DEFAULT(UseGHASHIntrinsics, true); 371 } 372 } else if (UseGHASHIntrinsics) { 373 warning("GHASH intrinsics are not available on this CPU"); 374 FLAG_SET_DEFAULT(UseGHASHIntrinsics, false); 375 } 376 377 if (FLAG_IS_DEFAULT(UseBASE64Intrinsics)) { 378 UseBASE64Intrinsics = true; 379 } 380 381 if (is_zva_enabled()) { 382 if (FLAG_IS_DEFAULT(UseBlockZeroing)) { 383 FLAG_SET_DEFAULT(UseBlockZeroing, true); 384 } 385 if (FLAG_IS_DEFAULT(BlockZeroingLowLimit)) { 386 FLAG_SET_DEFAULT(BlockZeroingLowLimit, 4 * VM_Version::zva_length()); 387 } 388 } else if (UseBlockZeroing) { 389 warning("DC ZVA is not available on this CPU"); 390 FLAG_SET_DEFAULT(UseBlockZeroing, false); 391 } 392 393 // This machine allows unaligned memory accesses 394 if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) { 395 FLAG_SET_DEFAULT(UseUnalignedAccesses, true); 396 } 397 398 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 399 UseMultiplyToLenIntrinsic = true; 400 } 401 402 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { 403 UseSquareToLenIntrinsic = true; 404 } 405 406 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { 407 UseMulAddIntrinsic = true; 408 } 409 410 if (FLAG_IS_DEFAULT(UseBarriersForVolatile)) { 411 UseBarriersForVolatile = (_features & CPU_DMB_ATOMICS) != 0; 412 } 413 414 if (FLAG_IS_DEFAULT(UsePopCountInstruction)) { 415 UsePopCountInstruction = true; 416 } 417 418 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 419 UseMontgomeryMultiplyIntrinsic = true; 420 } 421 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 422 UseMontgomerySquareIntrinsic = true; 423 } 424 425 #ifdef COMPILER2 426 if (FLAG_IS_DEFAULT(OptoScheduling)) { 427 OptoScheduling = true; 428 } 429 #endif 430 } 431 432 void VM_Version::initialize() { 433 ResourceMark rm; 434 435 stub_blob = BufferBlob::create("getPsrInfo_stub", stub_size); 436 if (stub_blob == NULL) { 437 vm_exit_during_initialization("Unable to allocate getPsrInfo_stub"); 438 } 439 440 CodeBuffer c(stub_blob); 441 VM_Version_StubGenerator g(&c); 442 getPsrInfo_stub = CAST_TO_FN_PTR(getPsrInfo_stub_t, 443 g.generate_getPsrInfo()); 444 445 get_processor_features(); 446 447 UNSUPPORTED_OPTION(CriticalJNINatives); 448 }