1 /*
   2  * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "code/codeCacheExtensions.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "runtime/java.hpp"
  30 #include "runtime/os.inline.hpp"
  31 #include "runtime/stubCodeGenerator.hpp"
  32 #include "vm_version_arm.hpp"
  33 #include <sys/auxv.h>
  34 #include <asm/hwcap.h>
  35 
  36 #ifndef HWCAP_AES
  37 #define HWCAP_AES 1 << 3
  38 #endif
  39 
  40 bool VM_Version::_is_initialized = false;
  41 bool VM_Version::_has_simd = false;
  42 
  43 extern "C" {
  44   typedef bool (*check_simd_t)();
  45 }
  46 
  47 
  48 #ifdef COMPILER2
  49 
  50 #define __ _masm->
  51 
  52 class VM_Version_StubGenerator: public StubCodeGenerator {
  53  public:
  54 
  55   VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
  56 
  57   address generate_check_simd() {
  58     StubCodeMark mark(this, "VM_Version", "check_simd");
  59     address start = __ pc();
  60 
  61     __ vcnt(Stemp, Stemp);
  62     __ mov(R0, 1);
  63     __ ret(LR);
  64 
  65     return start;
  66   };
  67 };
  68 
  69 #undef __
  70 
  71 #endif
  72 
  73 
  74 
  75 extern "C" address check_simd_fault_instr;
  76 
  77 
  78 void VM_Version::initialize() {
  79   ResourceMark rm;
  80 
  81   // TODO-AARCH64 List optional AArch64 features
  82   // For now just write the architecture name
  83   _features_string = "AArch64";
  84 
  85   // Making this stub must be FIRST use of assembler
  86   const int stub_size = 128;
  87   BufferBlob* stub_blob = BufferBlob::create("get_cpu_info", stub_size);
  88   if (stub_blob == NULL) {
  89     vm_exit_during_initialization("Unable to allocate get_cpu_info stub");
  90   }
  91 
  92   if (UseSHA) {
  93     warning("SHA instructions are not available on this CPU");
  94     FLAG_SET_DEFAULT(UseSHA, false);
  95   }
  96 
  97   if (UseSHA1Intrinsics) {
  98     warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
  99     FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
 100   }
 101 
 102   if (UseSHA256Intrinsics) {
 103     warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
 104     FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
 105   }
 106 
 107   if (UseSHA512Intrinsics) {
 108     warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
 109     FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
 110   }
 111 
 112   if (UseCRC32Intrinsics) {
 113     if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics))
 114       warning("CRC32 intrinsics are not available on this CPU");
 115     FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
 116   }
 117 
 118   if (UseCRC32CIntrinsics) {
 119     if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics))
 120       warning("CRC32C intrinsics are not available on this CPU");
 121     FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
 122   }
 123 
 124   if (UseAdler32Intrinsics) {
 125     warning("Adler32 intrinsics are not available on this CPU");
 126     FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
 127   }
 128 
 129   if (UseVectorizedMismatchIntrinsic) {
 130     warning("vectorizedMismatch intrinsic is not available on this CPU.");
 131     FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
 132   }
 133 
 134   CodeBuffer c(stub_blob);
 135 
 136 #ifdef COMPILER2
 137   VM_Version_StubGenerator g(&c);
 138 
 139   address check_simd_pc = g.generate_check_simd();
 140   if (check_simd_pc != NULL) {
 141     check_simd_t check_simd = CAST_TO_FN_PTR(check_simd_t, check_simd_pc);
 142     check_simd_fault_instr = (address)check_simd;
 143     _has_simd = check_simd();
 144   } else {
 145     assert(! _has_simd, "default _has_simd value must be 'false'");
 146   }
 147 #endif
 148 
 149   unsigned long auxv = getauxval(AT_HWCAP);
 150 
 151 #ifdef COMPILER2
 152   if (auxv & HWCAP_AES) {
 153     if (FLAG_IS_DEFAULT(UseAES)) {
 154       FLAG_SET_DEFAULT(UseAES, true);
 155     }
 156     if (!UseAES) {
 157       if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
 158         warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
 159       }
 160       FLAG_SET_DEFAULT(UseAESIntrinsics, false);
 161     } else {
 162       if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
 163         FLAG_SET_DEFAULT(UseAESIntrinsics, true);
 164       }
 165     }
 166   } else
 167 #endif
 168   if (UseAES || UseAESIntrinsics) {
 169     if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
 170       warning("AES instructions are not available on this CPU");
 171       FLAG_SET_DEFAULT(UseAES, false);
 172     }
 173     if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
 174       warning("AES intrinsics are not available on this CPU");
 175       FLAG_SET_DEFAULT(UseAESIntrinsics, false);
 176     }
 177   }
 178 
 179   if (UseAESCTRIntrinsics) {
 180     warning("AES/CTR intrinsics are not available on this CPU");
 181     FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
 182   }
 183 
 184   _supports_cx8 = true;
 185   _supports_atomic_getset4 = true;
 186   _supports_atomic_getadd4 = true;
 187   _supports_atomic_getset8 = true;
 188   _supports_atomic_getadd8 = true;
 189 
 190   // TODO-AARCH64 revise C2 flags
 191 
 192   if (has_simd()) {
 193     if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
 194       FLAG_SET_DEFAULT(UsePopCountInstruction, true);
 195     }
 196   }
 197 
 198   AllocatePrefetchDistance = 128;
 199 
 200 #ifdef COMPILER2
 201   FLAG_SET_DEFAULT(UseFPUForSpilling, true);
 202 
 203   if (FLAG_IS_DEFAULT(MaxVectorSize)) {
 204     // FLAG_SET_DEFAULT(MaxVectorSize, has_simd() ? 16 : 8);
 205     // SIMD/NEON can use 16, but default is 8 because currently
 206     // larger than 8 will disable instruction scheduling
 207     FLAG_SET_DEFAULT(MaxVectorSize, 8);
 208   }
 209 
 210   if (MaxVectorSize > 16) {
 211     FLAG_SET_DEFAULT(MaxVectorSize, 8);
 212   }
 213 #endif
 214 
 215   if (FLAG_IS_DEFAULT(Tier4CompileThreshold)) {
 216     Tier4CompileThreshold = 10000;
 217   }
 218   if (FLAG_IS_DEFAULT(Tier3InvocationThreshold)) {
 219     Tier3InvocationThreshold = 1000;
 220   }
 221   if (FLAG_IS_DEFAULT(Tier3CompileThreshold)) {
 222     Tier3CompileThreshold = 5000;
 223   }
 224   if (FLAG_IS_DEFAULT(Tier3MinInvocationThreshold)) {
 225     Tier3MinInvocationThreshold = 500;
 226   }
 227 
 228   FLAG_SET_DEFAULT(TypeProfileLevel, 0); // unsupported
 229 
 230   // This machine does not allow unaligned memory accesses
 231   if (UseUnalignedAccesses) {
 232     if (!FLAG_IS_DEFAULT(UseUnalignedAccesses))
 233       warning("Unaligned memory access is not available on this CPU");
 234     FLAG_SET_DEFAULT(UseUnalignedAccesses, false);
 235   }
 236 
 237   _is_initialized = true;
 238 }
 239 
 240 bool VM_Version::use_biased_locking() {
 241   // TODO-AARCH64 measure performance and revise
 242 
 243   // The cost of CAS on uniprocessor ARM v6 and later is low compared to the
 244   // overhead related to slightly longer Biased Locking execution path.
 245   // Testing shows no improvement when running with Biased Locking enabled
 246   // on an ARMv6 and higher uniprocessor systems.  The situation is different on
 247   // ARMv5 and MP systems.
 248   //
 249   // Therefore the Biased Locking is enabled on ARMv5 and ARM MP only.
 250   //
 251   return os::is_MP();
 252 }