1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2015, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.hpp" 28 #include "asm/macroAssembler.inline.hpp" 29 #include "memory/resourceArea.hpp" 30 #include "runtime/java.hpp" 31 #include "runtime/stubCodeGenerator.hpp" 32 #include "vm_version_aarch64.hpp" 33 #ifdef TARGET_OS_FAMILY_linux 34 # include "os_linux.inline.hpp" 35 #endif 36 37 #ifndef BUILTIN_SIM 38 #include <sys/auxv.h> 39 #include <asm/hwcap.h> 40 #else 41 #define getauxval(hwcap) 0 42 #endif 43 44 #ifndef HWCAP_AES 45 #define HWCAP_AES (1<<3) 46 #endif 47 48 #ifndef HWCAP_PMULL 49 #define HWCAP_PMULL (1<<4) 50 #endif 51 52 #ifndef HWCAP_SHA1 53 #define HWCAP_SHA1 (1<<5) 54 #endif 55 56 #ifndef HWCAP_SHA2 57 #define HWCAP_SHA2 (1<<6) 58 #endif 59 60 #ifndef HWCAP_CRC32 61 #define HWCAP_CRC32 (1<<7) 62 #endif 63 64 int VM_Version::_cpu; 65 int VM_Version::_model; 66 int VM_Version::_model2; 67 int VM_Version::_variant; 68 int VM_Version::_revision; 69 int VM_Version::_stepping; 70 int VM_Version::_cpuFeatures; 71 const char* VM_Version::_features_str = ""; 72 73 static BufferBlob* stub_blob; 74 static const int stub_size = 550; 75 76 extern "C" { 77 typedef void (*getPsrInfo_stub_t)(void*); 78 } 79 static getPsrInfo_stub_t getPsrInfo_stub = NULL; 80 81 82 class VM_Version_StubGenerator: public StubCodeGenerator { 83 public: 84 85 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {} 86 87 address generate_getPsrInfo() { 88 StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub"); 89 # define __ _masm-> 90 address start = __ pc(); 91 92 #ifdef BUILTIN_SIM 93 __ c_stub_prolog(1, 0, MacroAssembler::ret_type_void); 94 #endif 95 96 // void getPsrInfo(VM_Version::CpuidInfo* cpuid_info); 97 98 address entry = __ pc(); 99 100 // TODO : redefine fields in CpuidInfo and generate 101 // code to fill them in 102 103 __ ret(lr); 104 105 # undef __ 106 107 return start; 108 } 109 }; 110 111 112 void VM_Version::get_processor_features() { 113 _supports_cx8 = true; 114 _supports_atomic_getset4 = true; 115 _supports_atomic_getadd4 = true; 116 _supports_atomic_getset8 = true; 117 _supports_atomic_getadd8 = true; 118 119 if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) 120 FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256); 121 if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize)) 122 FLAG_SET_DEFAULT(AllocatePrefetchStepSize, 64); 123 FLAG_SET_DEFAULT(PrefetchScanIntervalInBytes, 256); 124 FLAG_SET_DEFAULT(PrefetchFieldsAhead, 256); 125 FLAG_SET_DEFAULT(PrefetchCopyIntervalInBytes, 256); 126 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true); 127 128 unsigned long auxv = getauxval(AT_HWCAP); 129 130 char buf[512]; 131 132 _cpuFeatures = auxv; 133 134 int cpu_lines = 0; 135 if (FILE *f = fopen("/proc/cpuinfo", "r")) { 136 char buf[128], *p; 137 while (fgets(buf, sizeof (buf), f) != NULL) { 138 if (p = strchr(buf, ':')) { 139 long v = strtol(p+1, NULL, 0); 140 if (strncmp(buf, "CPU implementer", sizeof "CPU implementer" - 1) == 0) { 141 _cpu = v; 142 cpu_lines++; 143 } else if (strncmp(buf, "CPU variant", sizeof "CPU variant" - 1) == 0) { 144 _variant = v; 145 } else if (strncmp(buf, "CPU part", sizeof "CPU part" - 1) == 0) { 146 if (_model != v) _model2 = _model; 147 _model = v; 148 } else if (strncmp(buf, "CPU revision", sizeof "CPU revision" - 1) == 0) { 149 _revision = v; 150 } 151 } 152 } 153 fclose(f); 154 } 155 156 // Enable vendor specific features 157 if (_cpu == CPU_CAVIUM && _variant == 0) _cpuFeatures |= CPU_DMB_ATOMICS; 158 if (_cpu == CPU_ARM && (_model == 0xd03 || _model2 == 0xd03)) _cpuFeatures |= CPU_A53MAC; 159 // If an olde style /proc/cpuinfo (cpu_lines == 1) then if _model is an A57 (0xd07) 160 // we assume the worst and assume we could be on a big little system and have 161 // undisclosed A53 cores which we could be swapped to at any stage 162 if (_cpu == CPU_ARM && cpu_lines == 1 && _model == 0xd07) _cpuFeatures |= CPU_A53MAC; 163 164 sprintf(buf, "0x%02x:0x%x:0x%03x:%d", _cpu, _variant, _model, _revision); 165 if (_model2) sprintf(buf+strlen(buf), "(0x%03x)", _model2); 166 if (auxv & HWCAP_ASIMD) strcat(buf, ", simd"); 167 if (auxv & HWCAP_CRC32) strcat(buf, ", crc"); 168 if (auxv & HWCAP_AES) strcat(buf, ", aes"); 169 if (auxv & HWCAP_SHA1) strcat(buf, ", sha1"); 170 if (auxv & HWCAP_SHA2) strcat(buf, ", sha256"); 171 172 _features_str = os::strdup(buf); 173 174 if (FLAG_IS_DEFAULT(UseCRC32)) { 175 UseCRC32 = (auxv & HWCAP_CRC32) != 0; 176 } 177 if (UseCRC32 && (auxv & HWCAP_CRC32) == 0) { 178 warning("UseCRC32 specified, but not supported on this CPU"); 179 } 180 181 if (UseAdler32Intrinsics) { 182 warning("Adler32Intrinsics not available on this CPU."); 183 FLAG_SET_DEFAULT(UseAdler32Intrinsics, false); 184 } 185 186 if (auxv & HWCAP_AES) { 187 UseAES = UseAES || FLAG_IS_DEFAULT(UseAES); 188 UseAESIntrinsics = 189 UseAESIntrinsics || (UseAES && FLAG_IS_DEFAULT(UseAESIntrinsics)); 190 if (UseAESIntrinsics && !UseAES) { 191 warning("UseAESIntrinsics enabled, but UseAES not, enabling"); 192 UseAES = true; 193 } 194 } else { 195 if (UseAES) { 196 warning("UseAES specified, but not supported on this CPU"); 197 } 198 if (UseAESIntrinsics) { 199 warning("UseAESIntrinsics specified, but not supported on this CPU"); 200 } 201 } 202 203 if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { 204 UseCRC32Intrinsics = true; 205 } 206 207 if (auxv & HWCAP_CRC32) { 208 if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) { 209 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true); 210 } 211 } else if (UseCRC32CIntrinsics) { 212 warning("CRC32C is not available on the CPU"); 213 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false); 214 } 215 216 if (auxv & (HWCAP_SHA1 | HWCAP_SHA2)) { 217 if (FLAG_IS_DEFAULT(UseSHA)) { 218 FLAG_SET_DEFAULT(UseSHA, true); 219 } 220 } else if (UseSHA) { 221 warning("SHA instructions are not available on this CPU"); 222 FLAG_SET_DEFAULT(UseSHA, false); 223 } 224 225 if (UseSHA && (auxv & HWCAP_SHA1)) { 226 if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) { 227 FLAG_SET_DEFAULT(UseSHA1Intrinsics, true); 228 } 229 } else if (UseSHA1Intrinsics) { 230 warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU."); 231 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); 232 } 233 234 if (UseSHA && (auxv & HWCAP_SHA2)) { 235 if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) { 236 FLAG_SET_DEFAULT(UseSHA256Intrinsics, true); 237 } 238 } else if (UseSHA256Intrinsics) { 239 warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU."); 240 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); 241 } 242 243 if (UseSHA512Intrinsics) { 244 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU."); 245 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); 246 } 247 248 if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) { 249 FLAG_SET_DEFAULT(UseSHA, false); 250 } 251 252 if (auxv & HWCAP_PMULL) { 253 if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) { 254 FLAG_SET_DEFAULT(UseGHASHIntrinsics, true); 255 } 256 } else if (UseGHASHIntrinsics) { 257 warning("GHASH intrinsics are not available on this CPU"); 258 FLAG_SET_DEFAULT(UseGHASHIntrinsics, false); 259 } 260 261 // This machine allows unaligned memory accesses 262 if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) { 263 FLAG_SET_DEFAULT(UseUnalignedAccesses, true); 264 } 265 266 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 267 UseMultiplyToLenIntrinsic = true; 268 } 269 270 if (FLAG_IS_DEFAULT(UseBarriersForVolatile)) { 271 UseBarriersForVolatile = (_cpuFeatures & CPU_DMB_ATOMICS) != 0; 272 } 273 274 if (FLAG_IS_DEFAULT(UsePopCountInstruction)) { 275 UsePopCountInstruction = true; 276 } 277 278 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 279 UseMontgomeryMultiplyIntrinsic = true; 280 } 281 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 282 UseMontgomerySquareIntrinsic = true; 283 } 284 285 #ifdef COMPILER2 286 if (FLAG_IS_DEFAULT(OptoScheduling)) { 287 OptoScheduling = true; 288 } 289 #endif 290 } 291 292 void VM_Version::initialize() { 293 ResourceMark rm; 294 295 stub_blob = BufferBlob::create("getPsrInfo_stub", stub_size); 296 if (stub_blob == NULL) { 297 vm_exit_during_initialization("Unable to allocate getPsrInfo_stub"); 298 } 299 300 CodeBuffer c(stub_blob); 301 VM_Version_StubGenerator g(&c); 302 getPsrInfo_stub = CAST_TO_FN_PTR(getPsrInfo_stub_t, 303 g.generate_getPsrInfo()); 304 305 get_processor_features(); 306 }