1 /* 2 * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.inline.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "runtime/java.hpp" 29 #include "runtime/os.inline.hpp" 30 #include "runtime/stubCodeGenerator.hpp" 31 #include "vm_version_arm.hpp" 32 33 int VM_Version::_stored_pc_adjustment = 4; 34 int VM_Version::_arm_arch = 5; 35 bool VM_Version::_is_initialized = false; 36 int VM_Version::_kuser_helper_version = 0; 37 38 extern "C" { 39 typedef int (*get_cpu_info_t)(); 40 typedef bool (*check_vfp_t)(double *d); 41 typedef bool (*check_simd_t)(); 42 } 43 44 #define __ _masm-> 45 46 class VM_Version_StubGenerator: public StubCodeGenerator { 47 public: 48 49 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {} 50 51 address generate_get_cpu_info() { 52 StubCodeMark mark(this, "VM_Version", "get_cpu_info"); 53 address start = __ pc(); 54 55 __ mov(R0, PC); 56 __ push(PC); 57 __ pop(R1); 58 __ sub(R0, R1, R0); 59 // return the result in R0 60 __ bx(LR); 61 62 return start; 63 }; 64 65 address generate_check_vfp() { 66 StubCodeMark mark(this, "VM_Version", "check_vfp"); 67 address start = __ pc(); 68 69 __ fstd(D0, Address(R0)); 70 __ mov(R0, 1); 71 __ bx(LR); 72 73 return start; 74 }; 75 76 address generate_check_vfp3_32() { 77 StubCodeMark mark(this, "VM_Version", "check_vfp3_32"); 78 address start = __ pc(); 79 80 __ fstd(D16, Address(R0)); 81 __ mov(R0, 1); 82 __ bx(LR); 83 84 return start; 85 }; 86 87 address generate_check_simd() { 88 StubCodeMark mark(this, "VM_Version", "check_simd"); 89 address start = __ pc(); 90 91 __ vcnt(Stemp, Stemp); 92 __ mov(R0, 1); 93 __ bx(LR); 94 95 return start; 96 }; 97 }; 98 99 #undef __ 100 101 102 extern "C" address check_vfp3_32_fault_instr; 103 extern "C" address check_vfp_fault_instr; 104 extern "C" address check_simd_fault_instr; 105 106 void VM_Version::initialize() { 107 ResourceMark rm; 108 109 // Making this stub must be FIRST use of assembler 110 const int stub_size = 128; 111 BufferBlob* stub_blob = BufferBlob::create("get_cpu_info", stub_size); 112 if (stub_blob == NULL) { 113 vm_exit_during_initialization("Unable to allocate get_cpu_info stub"); 114 } 115 116 CodeBuffer c(stub_blob); 117 VM_Version_StubGenerator g(&c); 118 address get_cpu_info_pc = g.generate_get_cpu_info(); 119 get_cpu_info_t get_cpu_info = CAST_TO_FN_PTR(get_cpu_info_t, get_cpu_info_pc); 120 121 int pc_adjustment = get_cpu_info(); 122 123 VM_Version::_stored_pc_adjustment = pc_adjustment; 124 125 #ifndef __SOFTFP__ 126 address check_vfp_pc = g.generate_check_vfp(); 127 check_vfp_t check_vfp = CAST_TO_FN_PTR(check_vfp_t, check_vfp_pc); 128 129 check_vfp_fault_instr = (address)check_vfp; 130 double dummy; 131 if (check_vfp(&dummy)) { 132 _features |= vfp_m; 133 } 134 135 #ifdef COMPILER2 136 if (has_vfp()) { 137 address check_vfp3_32_pc = g.generate_check_vfp3_32(); 138 check_vfp_t check_vfp3_32 = CAST_TO_FN_PTR(check_vfp_t, check_vfp3_32_pc); 139 check_vfp3_32_fault_instr = (address)check_vfp3_32; 140 double dummy; 141 if (check_vfp3_32(&dummy)) { 142 _features |= vfp3_32_m; 143 } 144 145 address check_simd_pc =g.generate_check_simd(); 146 check_simd_t check_simd = CAST_TO_FN_PTR(check_simd_t, check_simd_pc); 147 check_simd_fault_instr = (address)check_simd; 148 if (check_simd()) { 149 _features |= simd_m; 150 } 151 } 152 #endif 153 #endif 154 155 156 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 157 warning("AES intrinsics are not available on this CPU"); 158 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 159 } 160 161 if (UseAES && !FLAG_IS_DEFAULT(UseAES)) { 162 warning("AES instructions are not available on this CPU"); 163 FLAG_SET_DEFAULT(UseAES, false); 164 } 165 166 if (UseAESCTRIntrinsics) { 167 warning("AES/CTR intrinsics are not available on this CPU"); 168 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 169 } 170 171 if (UseFMA) { 172 warning("FMA instructions are not available on this CPU"); 173 FLAG_SET_DEFAULT(UseFMA, false); 174 } 175 176 if (UseSHA) { 177 warning("SHA instructions are not available on this CPU"); 178 FLAG_SET_DEFAULT(UseSHA, false); 179 } 180 181 if (UseSHA1Intrinsics) { 182 warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU."); 183 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); 184 } 185 186 if (UseSHA256Intrinsics) { 187 warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU."); 188 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); 189 } 190 191 if (UseSHA512Intrinsics) { 192 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU."); 193 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); 194 } 195 196 if (UseCRC32Intrinsics) { 197 if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) 198 warning("CRC32 intrinsics are not available on this CPU"); 199 FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); 200 } 201 202 if (UseCRC32CIntrinsics) { 203 if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) 204 warning("CRC32C intrinsics are not available on this CPU"); 205 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false); 206 } 207 208 if (UseAdler32Intrinsics) { 209 warning("Adler32 intrinsics are not available on this CPU"); 210 FLAG_SET_DEFAULT(UseAdler32Intrinsics, false); 211 } 212 213 if (UseVectorizedMismatchIntrinsic) { 214 warning("vectorizedMismatch intrinsic is not available on this CPU."); 215 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); 216 } 217 218 get_os_cpu_info(); 219 220 _kuser_helper_version = *(int*)KUSER_HELPER_VERSION_ADDR; 221 222 #ifdef COMPILER2 223 // C2 is only supported on v7+ VFP at this time 224 if (_arm_arch < 7 || !has_vfp()) { 225 vm_exit_during_initialization("Server VM is only supported on ARMv7+ VFP"); 226 } 227 #endif 228 229 // armv7 has the ldrexd instruction that can be used to implement cx8 230 // armv5 with linux >= 3.1 can use kernel helper routine 231 _supports_cx8 = (supports_ldrexd() || supports_kuser_cmpxchg64()); 232 // ARM doesn't have special instructions for these but ldrex/ldrexd 233 // enable shorter instruction sequences that the ones based on cas. 234 _supports_atomic_getset4 = supports_ldrex(); 235 _supports_atomic_getadd4 = supports_ldrex(); 236 _supports_atomic_getset8 = supports_ldrexd(); 237 _supports_atomic_getadd8 = supports_ldrexd(); 238 239 #ifdef COMPILER2 240 assert(_supports_cx8 && _supports_atomic_getset4 && _supports_atomic_getadd4 241 && _supports_atomic_getset8 && _supports_atomic_getadd8, "C2: atomic operations must be supported"); 242 #endif 243 char buf[512]; 244 jio_snprintf(buf, sizeof(buf), "(ARMv%d)%s%s%s", 245 _arm_arch, 246 (has_vfp() ? ", vfp" : ""), 247 (has_vfp3_32() ? ", vfp3-32" : ""), 248 (has_simd() ? ", simd" : "")); 249 250 // buf is started with ", " or is empty 251 _features_string = os::strdup(buf); 252 253 if (has_simd()) { 254 if (FLAG_IS_DEFAULT(UsePopCountInstruction)) { 255 FLAG_SET_DEFAULT(UsePopCountInstruction, true); 256 } 257 } 258 259 AllocatePrefetchDistance = 128; 260 261 #ifdef COMPILER2 262 FLAG_SET_DEFAULT(UseFPUForSpilling, true); 263 264 if (FLAG_IS_DEFAULT(MaxVectorSize)) { 265 // FLAG_SET_DEFAULT(MaxVectorSize, has_simd() ? 16 : 8); 266 // SIMD/NEON can use 16, but default is 8 because currently 267 // larger than 8 will disable instruction scheduling 268 FLAG_SET_DEFAULT(MaxVectorSize, 8); 269 } 270 271 if (MaxVectorSize > 16) { 272 FLAG_SET_DEFAULT(MaxVectorSize, 8); 273 } 274 #endif 275 276 if (FLAG_IS_DEFAULT(Tier4CompileThreshold)) { 277 Tier4CompileThreshold = 10000; 278 } 279 if (FLAG_IS_DEFAULT(Tier3InvocationThreshold)) { 280 Tier3InvocationThreshold = 1000; 281 } 282 if (FLAG_IS_DEFAULT(Tier3CompileThreshold)) { 283 Tier3CompileThreshold = 5000; 284 } 285 if (FLAG_IS_DEFAULT(Tier3MinInvocationThreshold)) { 286 Tier3MinInvocationThreshold = 500; 287 } 288 289 FLAG_SET_DEFAULT(TypeProfileLevel, 0); // unsupported 290 291 // This machine does not allow unaligned memory accesses 292 if (UseUnalignedAccesses) { 293 if (!FLAG_IS_DEFAULT(UseUnalignedAccesses)) 294 warning("Unaligned memory access is not available on this CPU"); 295 FLAG_SET_DEFAULT(UseUnalignedAccesses, false); 296 } 297 298 _is_initialized = true; 299 } 300 301 bool VM_Version::use_biased_locking() { 302 get_os_cpu_info(); 303 // The cost of CAS on uniprocessor ARM v6 and later is low compared to the 304 // overhead related to slightly longer Biased Locking execution path. 305 // Testing shows no improvement when running with Biased Locking enabled 306 // on an ARMv6 and higher uniprocessor systems. The situation is different on 307 // ARMv5 and MP systems. 308 // 309 // Therefore the Biased Locking is enabled on ARMv5 and ARM MP only. 310 // 311 return (!os::is_MP() && (arm_arch() > 5)) ? false : true; 312 } 313 314 #define EXP 315 316 // Temporary override for experimental features 317 // Copied from Abstract_VM_Version 318 const char* VM_Version::vm_info_string() { 319 switch (Arguments::mode()) { 320 case Arguments::_int: 321 return UseSharedSpaces ? "interpreted mode, sharing" EXP : "interpreted mode" EXP; 322 case Arguments::_mixed: 323 return UseSharedSpaces ? "mixed mode, sharing" EXP : "mixed mode" EXP; 324 case Arguments::_comp: 325 return UseSharedSpaces ? "compiled mode, sharing" EXP : "compiled mode" EXP; 326 }; 327 ShouldNotReachHere(); 328 return ""; 329 }