1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2015, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "runtime/java.hpp"
  31 #include "runtime/stubCodeGenerator.hpp"
  32 #include "vm_version_aarch64.hpp"
  33 #ifdef TARGET_OS_FAMILY_linux
  34 # include "os_linux.inline.hpp"
  35 #endif
  36 
  37 #ifndef BUILTIN_SIM
  38 #include <sys/auxv.h>
  39 #include <asm/hwcap.h>
  40 #else
  41 #define getauxval(hwcap) 0
  42 #endif
  43 
  44 #ifndef HWCAP_AES
  45 #define HWCAP_AES   (1<<3)
  46 #endif
  47 
  48 #ifndef HWCAP_PMULL
  49 #define HWCAP_PMULL (1<<4)
  50 #endif
  51 
  52 #ifndef HWCAP_SHA1
  53 #define HWCAP_SHA1  (1<<5)
  54 #endif
  55 
  56 #ifndef HWCAP_SHA2
  57 #define HWCAP_SHA2  (1<<6)
  58 #endif
  59 
  60 #ifndef HWCAP_CRC32
  61 #define HWCAP_CRC32 (1<<7)
  62 #endif
  63 
  64 #ifndef HWCAP_ATOMICS
  65 #define HWCAP_ATOMICS (1<<8)
  66 #endif
  67 
  68 int VM_Version::_cpu;
  69 int VM_Version::_model;
  70 int VM_Version::_model2;
  71 int VM_Version::_variant;
  72 int VM_Version::_revision;
  73 int VM_Version::_stepping;
  74 
  75 static BufferBlob* stub_blob;
  76 static const int stub_size = 550;
  77 
  78 extern "C" {
  79   typedef void (*getPsrInfo_stub_t)(void*);
  80 }
  81 static getPsrInfo_stub_t getPsrInfo_stub = NULL;
  82 
  83 
  84 class VM_Version_StubGenerator: public StubCodeGenerator {
  85  public:
  86 
  87   VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
  88 
  89   address generate_getPsrInfo() {
  90     StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
  91 #   define __ _masm->
  92     address start = __ pc();
  93 
  94 #ifdef BUILTIN_SIM
  95     __ c_stub_prolog(1, 0, MacroAssembler::ret_type_void);
  96 #endif
  97 
  98     // void getPsrInfo(VM_Version::CpuidInfo* cpuid_info);
  99 
 100     address entry = __ pc();
 101 
 102     // TODO : redefine fields in CpuidInfo and generate
 103     // code to fill them in
 104 
 105     __ ret(lr);
 106 
 107 #   undef __
 108 
 109     return start;
 110   }
 111 };
 112 
 113 
 114 void VM_Version::get_processor_features() {
 115   _supports_cx8 = true;
 116   _supports_atomic_getset4 = true;
 117   _supports_atomic_getadd4 = true;
 118   _supports_atomic_getset8 = true;
 119   _supports_atomic_getadd8 = true;
 120 
 121   if (FLAG_IS_DEFAULT(AllocatePrefetchDistance))
 122     FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256);
 123   if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize))
 124     FLAG_SET_DEFAULT(AllocatePrefetchStepSize, 64);
 125   FLAG_SET_DEFAULT(PrefetchScanIntervalInBytes, 256);
 126   FLAG_SET_DEFAULT(PrefetchFieldsAhead, 256);
 127   if (FLAG_IS_DEFAULT(PrefetchCopyIntervalInBytes))
 128     FLAG_SET_DEFAULT(PrefetchCopyIntervalInBytes, 256);
 129   if ((PrefetchCopyIntervalInBytes & 7) || (PrefetchCopyIntervalInBytes >= 32768)) {
 130     warning("PrefetchCopyIntervalInBytes must be a multiple of 8 and < 32768");
 131     PrefetchCopyIntervalInBytes &= ~7;
 132     if (PrefetchCopyIntervalInBytes >= 32768)
 133       PrefetchCopyIntervalInBytes = 32760;
 134   }
 135 
 136   unsigned long auxv = getauxval(AT_HWCAP);
 137 
 138   char buf[512];
 139 
 140   _features = auxv;
 141 
 142   int cpu_lines = 0;
 143   if (FILE *f = fopen("/proc/cpuinfo", "r")) {
 144     char buf[128], *p;
 145     while (fgets(buf, sizeof (buf), f) != NULL) {
 146       if (p = strchr(buf, ':')) {
 147         long v = strtol(p+1, NULL, 0);
 148         if (strncmp(buf, "CPU implementer", sizeof "CPU implementer" - 1) == 0) {
 149           _cpu = v;
 150           cpu_lines++;
 151         } else if (strncmp(buf, "CPU variant", sizeof "CPU variant" - 1) == 0) {
 152           _variant = v;
 153         } else if (strncmp(buf, "CPU part", sizeof "CPU part" - 1) == 0) {
 154           if (_model != v)  _model2 = _model;
 155           _model = v;
 156         } else if (strncmp(buf, "CPU revision", sizeof "CPU revision" - 1) == 0) {
 157           _revision = v;
 158         }
 159       }
 160     }
 161     fclose(f);
 162   }
 163 
 164   // Enable vendor specific features
 165   if (_cpu == CPU_CAVIUM && _variant == 0) _features |= CPU_DMB_ATOMICS;
 166   if (_cpu == CPU_ARM && (_model == 0xd03 || _model2 == 0xd03)) _features |= CPU_A53MAC;
 167   // If an olde style /proc/cpuinfo (cpu_lines == 1) then if _model is an A57 (0xd07)
 168   // we assume the worst and assume we could be on a big little system and have
 169   // undisclosed A53 cores which we could be swapped to at any stage
 170   if (_cpu == CPU_ARM && cpu_lines == 1 && _model == 0xd07) _features |= CPU_A53MAC;
 171 
 172   sprintf(buf, "0x%02x:0x%x:0x%03x:%d", _cpu, _variant, _model, _revision);
 173   if (_model2) sprintf(buf+strlen(buf), "(0x%03x)", _model2);
 174   if (auxv & HWCAP_ASIMD) strcat(buf, ", simd");
 175   if (auxv & HWCAP_CRC32) strcat(buf, ", crc");
 176   if (auxv & HWCAP_AES)   strcat(buf, ", aes");
 177   if (auxv & HWCAP_SHA1)  strcat(buf, ", sha1");
 178   if (auxv & HWCAP_SHA2)  strcat(buf, ", sha256");
 179   if (auxv & HWCAP_ATOMICS) strcat(buf, ", lse");
 180 
 181   _features_string = os::strdup(buf);
 182 
 183   if (FLAG_IS_DEFAULT(UseCRC32)) {
 184     UseCRC32 = (auxv & HWCAP_CRC32) != 0;
 185   }
 186   if (UseCRC32 && (auxv & HWCAP_CRC32) == 0) {
 187     warning("UseCRC32 specified, but not supported on this CPU");
 188   }
 189 
 190   if (FLAG_IS_DEFAULT(UseAdler32Intrinsics)) {
 191     FLAG_SET_DEFAULT(UseAdler32Intrinsics, true);
 192   }
 193 
 194   if (UseVectorizedMismatchIntrinsic) {
 195     warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU.");
 196     FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
 197   }
 198 
 199   if (auxv & HWCAP_ATOMICS) {
 200     if (FLAG_IS_DEFAULT(UseLSE))
 201       FLAG_SET_DEFAULT(UseLSE, true);
 202   } else {
 203     if (UseLSE) {
 204       warning("UseLSE specified, but not supported on this CPU");
 205     }
 206   }
 207 
 208   if (auxv & HWCAP_AES) {
 209     UseAES = UseAES || FLAG_IS_DEFAULT(UseAES);
 210     UseAESIntrinsics =
 211         UseAESIntrinsics || (UseAES && FLAG_IS_DEFAULT(UseAESIntrinsics));
 212     if (UseAESIntrinsics && !UseAES) {
 213       warning("UseAESIntrinsics enabled, but UseAES not, enabling");
 214       UseAES = true;
 215     }
 216   } else {
 217     if (UseAES) {
 218       warning("UseAES specified, but not supported on this CPU");
 219     }
 220     if (UseAESIntrinsics) {
 221       warning("UseAESIntrinsics specified, but not supported on this CPU");
 222     }
 223   }
 224 
 225   if (UseAESCTRIntrinsics) {
 226     warning("AES/CTR intrinsics are not available on this CPU");
 227     FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
 228   }
 229 
 230   if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
 231     UseCRC32Intrinsics = true;
 232   }
 233 
 234   if (auxv & HWCAP_CRC32) {
 235     if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
 236       FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
 237     }
 238   } else if (UseCRC32CIntrinsics) {
 239     warning("CRC32C is not available on the CPU");
 240     FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
 241   }
 242 
 243   if (auxv & (HWCAP_SHA1 | HWCAP_SHA2)) {
 244     if (FLAG_IS_DEFAULT(UseSHA)) {
 245       FLAG_SET_DEFAULT(UseSHA, true);
 246     }
 247   } else if (UseSHA) {
 248     warning("SHA instructions are not available on this CPU");
 249     FLAG_SET_DEFAULT(UseSHA, false);
 250   }
 251 
 252   if (UseSHA && (auxv & HWCAP_SHA1)) {
 253     if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) {
 254       FLAG_SET_DEFAULT(UseSHA1Intrinsics, true);
 255     }
 256   } else if (UseSHA1Intrinsics) {
 257     warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
 258     FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
 259   }
 260 
 261   if (UseSHA && (auxv & HWCAP_SHA2)) {
 262     if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
 263       FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
 264     }
 265   } else if (UseSHA256Intrinsics) {
 266     warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
 267     FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
 268   }
 269 
 270   if (UseSHA512Intrinsics) {
 271     warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
 272     FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
 273   }
 274 
 275   if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
 276     FLAG_SET_DEFAULT(UseSHA, false);
 277   }
 278 
 279   if (auxv & HWCAP_PMULL) {
 280     if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {
 281       FLAG_SET_DEFAULT(UseGHASHIntrinsics, true);
 282     }
 283   } else if (UseGHASHIntrinsics) {
 284     warning("GHASH intrinsics are not available on this CPU");
 285     FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
 286   }
 287 
 288   // This machine allows unaligned memory accesses
 289   if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
 290     FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
 291   }
 292 
 293   if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
 294     UseMultiplyToLenIntrinsic = true;
 295   }
 296 
 297   if (FLAG_IS_DEFAULT(UseBarriersForVolatile)) {
 298     UseBarriersForVolatile = (_features & CPU_DMB_ATOMICS) != 0;
 299   }
 300 
 301   if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
 302     UsePopCountInstruction = true;
 303   }
 304 
 305   if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
 306     UseMontgomeryMultiplyIntrinsic = true;
 307   }
 308   if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
 309     UseMontgomerySquareIntrinsic = true;
 310   }
 311 
 312 #ifdef COMPILER2
 313   if (FLAG_IS_DEFAULT(OptoScheduling)) {
 314     OptoScheduling = true;
 315   }
 316 #endif
 317 }
 318 
 319 void VM_Version::initialize() {
 320   ResourceMark rm;
 321 
 322   stub_blob = BufferBlob::create("getPsrInfo_stub", stub_size);
 323   if (stub_blob == NULL) {
 324     vm_exit_during_initialization("Unable to allocate getPsrInfo_stub");
 325   }
 326 
 327   CodeBuffer c(stub_blob);
 328   VM_Version_StubGenerator g(&c);
 329   getPsrInfo_stub = CAST_TO_FN_PTR(getPsrInfo_stub_t,
 330                                    g.generate_getPsrInfo());
 331 
 332   get_processor_features();
 333 }