1 /* 2 * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "memory/resourceArea.hpp" 29 #include "runtime/java.hpp" 30 #include "runtime/os.inline.hpp" 31 #include "runtime/stubCodeGenerator.hpp" 32 #include "vm_version_arm.hpp" 33 34 int VM_Version::_stored_pc_adjustment = 4; 35 int VM_Version::_arm_arch = 5; 36 bool VM_Version::_is_initialized = false; 37 int VM_Version::_kuser_helper_version = 0; 38 39 extern "C" { 40 typedef int (*get_cpu_info_t)(); 41 typedef bool (*check_vfp_t)(double *d); 42 typedef bool (*check_simd_t)(); 43 typedef bool (*check_mp_ext_t)(int *addr); 44 } 45 46 #define __ _masm-> 47 48 class VM_Version_StubGenerator: public StubCodeGenerator { 49 public: 50 51 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {} 52 53 address generate_get_cpu_info() { 54 StubCodeMark mark(this, "VM_Version", "get_cpu_info"); 55 address start = __ pc(); 56 57 __ mov(R0, PC); 58 __ push(PC); 59 __ pop(R1); 60 __ sub(R0, R1, R0); 61 // return the result in R0 62 __ bx(LR); 63 64 return start; 65 }; 66 67 address generate_check_vfp() { 68 StubCodeMark mark(this, "VM_Version", "check_vfp"); 69 address start = __ pc(); 70 71 __ fstd(D0, Address(R0)); 72 __ mov(R0, 1); 73 __ bx(LR); 74 75 return start; 76 }; 77 78 address generate_check_vfp3_32() { 79 StubCodeMark mark(this, "VM_Version", "check_vfp3_32"); 80 address start = __ pc(); 81 82 __ fstd(D16, Address(R0)); 83 __ mov(R0, 1); 84 __ bx(LR); 85 86 return start; 87 }; 88 89 address generate_check_simd() { 90 StubCodeMark mark(this, "VM_Version", "check_simd"); 91 address start = __ pc(); 92 93 __ vcnt(Stemp, Stemp); 94 __ mov(R0, 1); 95 __ bx(LR); 96 97 return start; 98 }; 99 100 address generate_check_mp_ext() { 101 StubCodeMark mark(this, "VM_Version", "check_mp_ext"); 102 address start = __ pc(); 103 104 // PLDW is available with Multiprocessing Extensions only 105 __ pldw(Address(R0)); 106 // Return true if instruction caused no signals 107 __ mov(R0, 1); 108 // JVM_handle_linux_signal moves PC here if SIGILL happens 109 __ bx(LR); 110 111 return start; 112 }; 113 }; 114 115 #undef __ 116 117 118 extern "C" address check_vfp3_32_fault_instr; 119 extern "C" address check_vfp_fault_instr; 120 extern "C" address check_simd_fault_instr; 121 extern "C" address check_mp_ext_fault_instr; 122 123 void VM_Version::early_initialize() { 124 125 // Make sure that _arm_arch is initialized so that any calls to OrderAccess will 126 // use proper dmb instruction 127 get_os_cpu_info(); 128 129 _kuser_helper_version = *(int*)KUSER_HELPER_VERSION_ADDR; 130 // armv7 has the ldrexd instruction that can be used to implement cx8 131 // armv5 with linux >= 3.1 can use kernel helper routine 132 _supports_cx8 = (supports_ldrexd() || supports_kuser_cmpxchg64()); 133 } 134 135 void VM_Version::initialize() { 136 ResourceMark rm; 137 138 // Making this stub must be FIRST use of assembler 139 const int stub_size = 128; 140 BufferBlob* stub_blob = BufferBlob::create("get_cpu_info", stub_size); 141 if (stub_blob == NULL) { 142 vm_exit_during_initialization("Unable to allocate get_cpu_info stub"); 143 } 144 145 CodeBuffer c(stub_blob); 146 VM_Version_StubGenerator g(&c); 147 address get_cpu_info_pc = g.generate_get_cpu_info(); 148 get_cpu_info_t get_cpu_info = CAST_TO_FN_PTR(get_cpu_info_t, get_cpu_info_pc); 149 150 int pc_adjustment = get_cpu_info(); 151 152 VM_Version::_stored_pc_adjustment = pc_adjustment; 153 154 #ifndef __SOFTFP__ 155 address check_vfp_pc = g.generate_check_vfp(); 156 check_vfp_t check_vfp = CAST_TO_FN_PTR(check_vfp_t, check_vfp_pc); 157 158 check_vfp_fault_instr = (address)check_vfp; 159 double dummy; 160 if (check_vfp(&dummy)) { 161 _features |= vfp_m; 162 } 163 164 #ifdef COMPILER2 165 if (has_vfp()) { 166 address check_vfp3_32_pc = g.generate_check_vfp3_32(); 167 check_vfp_t check_vfp3_32 = CAST_TO_FN_PTR(check_vfp_t, check_vfp3_32_pc); 168 check_vfp3_32_fault_instr = (address)check_vfp3_32; 169 double dummy; 170 if (check_vfp3_32(&dummy)) { 171 _features |= vfp3_32_m; 172 } 173 174 address check_simd_pc =g.generate_check_simd(); 175 check_simd_t check_simd = CAST_TO_FN_PTR(check_simd_t, check_simd_pc); 176 check_simd_fault_instr = (address)check_simd; 177 if (check_simd()) { 178 _features |= simd_m; 179 } 180 } 181 #endif 182 #endif 183 184 address check_mp_ext_pc = g.generate_check_mp_ext(); 185 check_mp_ext_t check_mp_ext = CAST_TO_FN_PTR(check_mp_ext_t, check_mp_ext_pc); 186 check_mp_ext_fault_instr = (address)check_mp_ext; 187 int dummy_local_variable; 188 if (check_mp_ext(&dummy_local_variable)) { 189 _features |= mp_ext_m; 190 } 191 192 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) { 193 warning("AES intrinsics are not available on this CPU"); 194 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 195 } 196 197 if (UseAES && !FLAG_IS_DEFAULT(UseAES)) { 198 warning("AES instructions are not available on this CPU"); 199 FLAG_SET_DEFAULT(UseAES, false); 200 } 201 202 if (UseAESCTRIntrinsics) { 203 warning("AES/CTR intrinsics are not available on this CPU"); 204 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 205 } 206 207 if (UseFMA) { 208 warning("FMA instructions are not available on this CPU"); 209 FLAG_SET_DEFAULT(UseFMA, false); 210 } 211 212 if (UseSHA) { 213 warning("SHA instructions are not available on this CPU"); 214 FLAG_SET_DEFAULT(UseSHA, false); 215 } 216 217 if (UseSHA1Intrinsics) { 218 warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU."); 219 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); 220 } 221 222 if (UseSHA256Intrinsics) { 223 warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU."); 224 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); 225 } 226 227 if (UseSHA512Intrinsics) { 228 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU."); 229 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); 230 } 231 232 if (UseCRC32Intrinsics) { 233 if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) 234 warning("CRC32 intrinsics are not available on this CPU"); 235 FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); 236 } 237 238 if (UseCRC32CIntrinsics) { 239 if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) 240 warning("CRC32C intrinsics are not available on this CPU"); 241 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false); 242 } 243 244 if (UseAdler32Intrinsics) { 245 warning("Adler32 intrinsics are not available on this CPU"); 246 FLAG_SET_DEFAULT(UseAdler32Intrinsics, false); 247 } 248 249 if (UseVectorizedMismatchIntrinsic) { 250 warning("vectorizedMismatch intrinsic is not available on this CPU."); 251 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); 252 } 253 254 #ifdef COMPILER2 255 // C2 is only supported on v7+ VFP at this time 256 if (_arm_arch < 7 || !has_vfp()) { 257 vm_exit_during_initialization("Server VM is only supported on ARMv7+ VFP"); 258 } 259 #endif 260 261 // ARM doesn't have special instructions for these but ldrex/ldrexd 262 // enable shorter instruction sequences that the ones based on cas. 263 _supports_atomic_getset4 = supports_ldrex(); 264 _supports_atomic_getadd4 = supports_ldrex(); 265 _supports_atomic_getset8 = supports_ldrexd(); 266 _supports_atomic_getadd8 = supports_ldrexd(); 267 268 #ifdef COMPILER2 269 assert(_supports_cx8 && _supports_atomic_getset4 && _supports_atomic_getadd4 270 && _supports_atomic_getset8 && _supports_atomic_getadd8, "C2: atomic operations must be supported"); 271 #endif 272 char buf[512]; 273 jio_snprintf(buf, sizeof(buf), "(ARMv%d)%s%s%s%s", 274 _arm_arch, 275 (has_vfp() ? ", vfp" : ""), 276 (has_vfp3_32() ? ", vfp3-32" : ""), 277 (has_simd() ? ", simd" : ""), 278 (has_multiprocessing_extensions() ? ", mp_ext" : "")); 279 280 // buf is started with ", " or is empty 281 _features_string = os::strdup(buf); 282 283 if (has_simd()) { 284 if (FLAG_IS_DEFAULT(UsePopCountInstruction)) { 285 FLAG_SET_DEFAULT(UsePopCountInstruction, true); 286 } 287 } else { 288 FLAG_SET_DEFAULT(UsePopCountInstruction, false); 289 } 290 291 if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) { 292 FLAG_SET_DEFAULT(AllocatePrefetchDistance, 128); 293 } 294 295 #ifdef COMPILER2 296 FLAG_SET_DEFAULT(UseFPUForSpilling, true); 297 298 if (FLAG_IS_DEFAULT(MaxVectorSize)) { 299 // FLAG_SET_DEFAULT(MaxVectorSize, has_simd() ? 16 : 8); 300 // SIMD/NEON can use 16, but default is 8 because currently 301 // larger than 8 will disable instruction scheduling 302 FLAG_SET_DEFAULT(MaxVectorSize, 8); 303 } else { 304 int max_vector_size = has_simd() ? 16 : 8; 305 if (MaxVectorSize > max_vector_size) { 306 warning("MaxVectorSize must be at most %i on this platform", max_vector_size); 307 FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size); 308 } 309 } 310 #endif 311 312 if (FLAG_IS_DEFAULT(Tier4CompileThreshold)) { 313 Tier4CompileThreshold = 10000; 314 } 315 if (FLAG_IS_DEFAULT(Tier3InvocationThreshold)) { 316 Tier3InvocationThreshold = 1000; 317 } 318 if (FLAG_IS_DEFAULT(Tier3CompileThreshold)) { 319 Tier3CompileThreshold = 5000; 320 } 321 if (FLAG_IS_DEFAULT(Tier3MinInvocationThreshold)) { 322 Tier3MinInvocationThreshold = 500; 323 } 324 325 UNSUPPORTED_OPTION(TypeProfileLevel); 326 UNSUPPORTED_OPTION(CriticalJNINatives); 327 328 FLAG_SET_DEFAULT(TypeProfileLevel, 0); // unsupported 329 330 // This machine does not allow unaligned memory accesses 331 if (UseUnalignedAccesses) { 332 if (!FLAG_IS_DEFAULT(UseUnalignedAccesses)) 333 warning("Unaligned memory access is not available on this CPU"); 334 FLAG_SET_DEFAULT(UseUnalignedAccesses, false); 335 } 336 337 _is_initialized = true; 338 } 339 340 bool VM_Version::use_biased_locking() { 341 get_os_cpu_info(); 342 // The cost of CAS on uniprocessor ARM v6 and later is low compared to the 343 // overhead related to slightly longer Biased Locking execution path. 344 // Testing shows no improvement when running with Biased Locking enabled 345 // on an ARMv6 and higher uniprocessor systems. The situation is different on 346 // ARMv5 and MP systems. 347 // 348 // Therefore the Biased Locking is enabled on ARMv5 and ARM MP only. 349 // 350 return (!os::is_MP() && (arm_arch() > 5)) ? false : true; 351 }