< prev index next >

src/hotspot/cpu/aarch64/vm_version_aarch64.cpp

Print this page
rev 60615 : 8231441: Initial SVE backend support
Reviewed-by: adinn, pli
Contributed-by: joshua.zhu@arm.com, yang.zhang@arm.com, ningsheng.jian@arm.com


  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "runtime/java.hpp"
  31 #include "runtime/os.hpp"
  32 #include "runtime/stubCodeGenerator.hpp"
  33 #include "runtime/vm_version.hpp"

  34 #include "utilities/macros.hpp"
  35 
  36 #include OS_HEADER_INLINE(os)
  37 
  38 #include <sys/auxv.h>
  39 #include <asm/hwcap.h>


  40 
  41 #ifndef HWCAP_AES
  42 #define HWCAP_AES   (1<<3)
  43 #endif
  44 
  45 #ifndef HWCAP_PMULL
  46 #define HWCAP_PMULL (1<<4)
  47 #endif
  48 
  49 #ifndef HWCAP_SHA1
  50 #define HWCAP_SHA1  (1<<5)
  51 #endif
  52 
  53 #ifndef HWCAP_SHA2
  54 #define HWCAP_SHA2  (1<<6)
  55 #endif
  56 
  57 #ifndef HWCAP_CRC32
  58 #define HWCAP_CRC32 (1<<7)
  59 #endif
  60 
  61 #ifndef HWCAP_ATOMICS
  62 #define HWCAP_ATOMICS (1<<8)
  63 #endif
  64 
  65 #ifndef HWCAP_SHA512
  66 #define HWCAP_SHA512 (1 << 21)
  67 #endif
  68 














  69 int VM_Version::_cpu;
  70 int VM_Version::_model;
  71 int VM_Version::_model2;
  72 int VM_Version::_variant;
  73 int VM_Version::_revision;
  74 int VM_Version::_stepping;
  75 bool VM_Version::_dcpop;

  76 VM_Version::PsrInfo VM_Version::_psr_info   = { 0, };
  77 
  78 static BufferBlob* stub_blob;
  79 static const int stub_size = 550;
  80 
  81 extern "C" {
  82   typedef void (*getPsrInfo_stub_t)(void*);
  83 }
  84 static getPsrInfo_stub_t getPsrInfo_stub = NULL;
  85 
  86 
  87 class VM_Version_StubGenerator: public StubCodeGenerator {
  88  public:
  89 
  90   VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
  91 
  92   address generate_getPsrInfo() {
  93     StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
  94 #   define __ _masm->
  95     address start = __ pc();


  98 
  99     address entry = __ pc();
 100 
 101     __ enter();
 102 
 103     __ get_dczid_el0(rscratch1);
 104     __ strw(rscratch1, Address(c_rarg0, in_bytes(VM_Version::dczid_el0_offset())));
 105 
 106     __ get_ctr_el0(rscratch1);
 107     __ strw(rscratch1, Address(c_rarg0, in_bytes(VM_Version::ctr_el0_offset())));
 108 
 109     __ leave();
 110     __ ret(lr);
 111 
 112 #   undef __
 113 
 114     return start;
 115   }
 116 };
 117 
 118 
 119 void VM_Version::get_processor_features() {
 120   _supports_cx8 = true;
 121   _supports_atomic_getset4 = true;
 122   _supports_atomic_getadd4 = true;
 123   _supports_atomic_getset8 = true;
 124   _supports_atomic_getadd8 = true;
 125 
 126   getPsrInfo_stub(&_psr_info);
 127 
 128   int dcache_line = VM_Version::dcache_line_size();
 129 
 130   // Limit AllocatePrefetchDistance so that it does not exceed the
 131   // constraint in AllocatePrefetchDistanceConstraintFunc.
 132   if (FLAG_IS_DEFAULT(AllocatePrefetchDistance))
 133     FLAG_SET_DEFAULT(AllocatePrefetchDistance, MIN2(512, 3*dcache_line));
 134 
 135   if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize))
 136     FLAG_SET_DEFAULT(AllocatePrefetchStepSize, dcache_line);
 137   if (FLAG_IS_DEFAULT(PrefetchScanIntervalInBytes))
 138     FLAG_SET_DEFAULT(PrefetchScanIntervalInBytes, 3*dcache_line);


 149       PrefetchCopyIntervalInBytes = 32760;
 150   }
 151 
 152   if (AllocatePrefetchDistance !=-1 && (AllocatePrefetchDistance & 7)) {
 153     warning("AllocatePrefetchDistance must be multiple of 8");
 154     AllocatePrefetchDistance &= ~7;
 155   }
 156 
 157   if (AllocatePrefetchStepSize & 7) {
 158     warning("AllocatePrefetchStepSize must be multiple of 8");
 159     AllocatePrefetchStepSize &= ~7;
 160   }
 161 
 162   if (SoftwarePrefetchHintDistance != -1 &&
 163        (SoftwarePrefetchHintDistance & 7)) {
 164     warning("SoftwarePrefetchHintDistance must be -1, or a multiple of 8");
 165     SoftwarePrefetchHintDistance &= ~7;
 166   }
 167 
 168   uint64_t auxv = getauxval(AT_HWCAP);

 169 
 170   char buf[512];
 171 
 172   _features = auxv;
 173 
 174   int cpu_lines = 0;
 175   if (FILE *f = fopen("/proc/cpuinfo", "r")) {
 176     // need a large buffer as the flags line may include lots of text
 177     char buf[1024], *p;
 178     while (fgets(buf, sizeof (buf), f) != NULL) {
 179       if ((p = strchr(buf, ':')) != NULL) {
 180         long v = strtol(p+1, NULL, 0);
 181         if (strncmp(buf, "CPU implementer", sizeof "CPU implementer" - 1) == 0) {
 182           _cpu = v;
 183           cpu_lines++;
 184         } else if (strncmp(buf, "CPU variant", sizeof "CPU variant" - 1) == 0) {
 185           _variant = v;
 186         } else if (strncmp(buf, "CPU part", sizeof "CPU part" - 1) == 0) {
 187           if (_model != v)  _model2 = _model;
 188           _model = v;


 274     if (FLAG_IS_DEFAULT(UseSimpleArrayEquals)) {
 275       FLAG_SET_DEFAULT(UseSimpleArrayEquals, true);
 276     }
 277   }
 278 
 279   if (_cpu == CPU_ARM && (_model == 0xd07 || _model2 == 0xd07)) _features |= CPU_STXR_PREFETCH;
 280   // If an olde style /proc/cpuinfo (cpu_lines == 1) then if _model is an A57 (0xd07)
 281   // we assume the worst and assume we could be on a big little system and have
 282   // undisclosed A53 cores which we could be swapped to at any stage
 283   if (_cpu == CPU_ARM && cpu_lines == 1 && _model == 0xd07) _features |= CPU_A53MAC;
 284 
 285   sprintf(buf, "0x%02x:0x%x:0x%03x:%d", _cpu, _variant, _model, _revision);
 286   if (_model2) sprintf(buf+strlen(buf), "(0x%03x)", _model2);
 287   if (auxv & HWCAP_ASIMD) strcat(buf, ", simd");
 288   if (auxv & HWCAP_CRC32) strcat(buf, ", crc");
 289   if (auxv & HWCAP_AES)   strcat(buf, ", aes");
 290   if (auxv & HWCAP_SHA1)  strcat(buf, ", sha1");
 291   if (auxv & HWCAP_SHA2)  strcat(buf, ", sha256");
 292   if (auxv & HWCAP_SHA512) strcat(buf, ", sha512");
 293   if (auxv & HWCAP_ATOMICS) strcat(buf, ", lse");


 294 
 295   _features_string = os::strdup(buf);
 296 
 297   if (FLAG_IS_DEFAULT(UseCRC32)) {
 298     UseCRC32 = (auxv & HWCAP_CRC32) != 0;
 299   }
 300 
 301   if (UseCRC32 && (auxv & HWCAP_CRC32) == 0) {
 302     warning("UseCRC32 specified, but not supported on this CPU");
 303     FLAG_SET_DEFAULT(UseCRC32, false);
 304   }
 305 
 306   if (FLAG_IS_DEFAULT(UseAdler32Intrinsics)) {
 307     FLAG_SET_DEFAULT(UseAdler32Intrinsics, true);
 308   }
 309 
 310   if (UseVectorizedMismatchIntrinsic) {
 311     warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU.");
 312     FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
 313   }


 413     if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {
 414       FLAG_SET_DEFAULT(UseGHASHIntrinsics, true);
 415     }
 416   } else if (UseGHASHIntrinsics) {
 417     warning("GHASH intrinsics are not available on this CPU");
 418     FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
 419   }
 420 
 421   if (is_zva_enabled()) {
 422     if (FLAG_IS_DEFAULT(UseBlockZeroing)) {
 423       FLAG_SET_DEFAULT(UseBlockZeroing, true);
 424     }
 425     if (FLAG_IS_DEFAULT(BlockZeroingLowLimit)) {
 426       FLAG_SET_DEFAULT(BlockZeroingLowLimit, 4 * VM_Version::zva_length());
 427     }
 428   } else if (UseBlockZeroing) {
 429     warning("DC ZVA is not available on this CPU");
 430     FLAG_SET_DEFAULT(UseBlockZeroing, false);
 431   }
 432 












 433   // This machine allows unaligned memory accesses
 434   if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
 435     FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
 436   }
 437 
 438   if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
 439     FLAG_SET_DEFAULT(UsePopCountInstruction, true);
 440   }
 441 
 442   if (!UsePopCountInstruction) {
 443     warning("UsePopCountInstruction is always enabled on this CPU");
 444     UsePopCountInstruction = true;
 445   }
 446 
 447 #ifdef COMPILER2
 448   if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
 449     UseMultiplyToLenIntrinsic = true;
 450   }
 451 
 452   if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
 453     UseSquareToLenIntrinsic = true;
 454   }
 455 
 456   if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
 457     UseMulAddIntrinsic = true;
 458   }
 459 
 460   if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
 461     UseMontgomeryMultiplyIntrinsic = true;
 462   }
 463   if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
 464     UseMontgomerySquareIntrinsic = true;












































 465   }
 466 
 467   if (FLAG_IS_DEFAULT(OptoScheduling)) {
 468     OptoScheduling = true;
 469   }
 470 
 471   if (FLAG_IS_DEFAULT(AlignVector)) {
 472     AlignVector = AvoidUnalignedAccesses;
 473   }
 474 #endif
 475 }
 476 
 477 void VM_Version::initialize() {
 478   ResourceMark rm;
 479 
 480   stub_blob = BufferBlob::create("getPsrInfo_stub", stub_size);
 481   if (stub_blob == NULL) {
 482     vm_exit_during_initialization("Unable to allocate getPsrInfo_stub");
 483   }
 484 


  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "runtime/java.hpp"
  31 #include "runtime/os.hpp"
  32 #include "runtime/stubCodeGenerator.hpp"
  33 #include "runtime/vm_version.hpp"
  34 #include "utilities/formatBuffer.hpp"
  35 #include "utilities/macros.hpp"
  36 
  37 #include OS_HEADER_INLINE(os)
  38 

  39 #include <asm/hwcap.h>
  40 #include <sys/auxv.h>
  41 #include <sys/prctl.h>
  42 
  43 #ifndef HWCAP_AES
  44 #define HWCAP_AES   (1<<3)
  45 #endif
  46 
  47 #ifndef HWCAP_PMULL
  48 #define HWCAP_PMULL (1<<4)
  49 #endif
  50 
  51 #ifndef HWCAP_SHA1
  52 #define HWCAP_SHA1  (1<<5)
  53 #endif
  54 
  55 #ifndef HWCAP_SHA2
  56 #define HWCAP_SHA2  (1<<6)
  57 #endif
  58 
  59 #ifndef HWCAP_CRC32
  60 #define HWCAP_CRC32 (1<<7)
  61 #endif
  62 
  63 #ifndef HWCAP_ATOMICS
  64 #define HWCAP_ATOMICS (1<<8)
  65 #endif
  66 
  67 #ifndef HWCAP_SHA512
  68 #define HWCAP_SHA512 (1 << 21)
  69 #endif
  70 
  71 #ifndef HWCAP_SVE
  72 #define HWCAP_SVE (1 << 22)
  73 #endif
  74 
  75 #ifndef HWCAP2_SVE2
  76 #define HWCAP2_SVE2 (1 << 1)
  77 #endif
  78 
  79 #ifndef PR_SVE_GET_VL
  80 // For old toolchains which do not have SVE related macros defined.
  81 #define PR_SVE_SET_VL   50
  82 #define PR_SVE_GET_VL   51
  83 #endif
  84 
  85 int VM_Version::_cpu;
  86 int VM_Version::_model;
  87 int VM_Version::_model2;
  88 int VM_Version::_variant;
  89 int VM_Version::_revision;
  90 int VM_Version::_stepping;
  91 bool VM_Version::_dcpop;
  92 int VM_Version::_initial_sve_vector_length;
  93 VM_Version::PsrInfo VM_Version::_psr_info   = { 0, };
  94 
  95 static BufferBlob* stub_blob;
  96 static const int stub_size = 550;
  97 
  98 extern "C" {
  99   typedef void (*getPsrInfo_stub_t)(void*);
 100 }
 101 static getPsrInfo_stub_t getPsrInfo_stub = NULL;
 102 
 103 
 104 class VM_Version_StubGenerator: public StubCodeGenerator {
 105  public:
 106 
 107   VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
 108 
 109   address generate_getPsrInfo() {
 110     StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
 111 #   define __ _masm->
 112     address start = __ pc();


 115 
 116     address entry = __ pc();
 117 
 118     __ enter();
 119 
 120     __ get_dczid_el0(rscratch1);
 121     __ strw(rscratch1, Address(c_rarg0, in_bytes(VM_Version::dczid_el0_offset())));
 122 
 123     __ get_ctr_el0(rscratch1);
 124     __ strw(rscratch1, Address(c_rarg0, in_bytes(VM_Version::ctr_el0_offset())));
 125 
 126     __ leave();
 127     __ ret(lr);
 128 
 129 #   undef __
 130 
 131     return start;
 132   }
 133 };
 134 

 135 void VM_Version::get_processor_features() {
 136   _supports_cx8 = true;
 137   _supports_atomic_getset4 = true;
 138   _supports_atomic_getadd4 = true;
 139   _supports_atomic_getset8 = true;
 140   _supports_atomic_getadd8 = true;
 141 
 142   getPsrInfo_stub(&_psr_info);
 143 
 144   int dcache_line = VM_Version::dcache_line_size();
 145 
 146   // Limit AllocatePrefetchDistance so that it does not exceed the
 147   // constraint in AllocatePrefetchDistanceConstraintFunc.
 148   if (FLAG_IS_DEFAULT(AllocatePrefetchDistance))
 149     FLAG_SET_DEFAULT(AllocatePrefetchDistance, MIN2(512, 3*dcache_line));
 150 
 151   if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize))
 152     FLAG_SET_DEFAULT(AllocatePrefetchStepSize, dcache_line);
 153   if (FLAG_IS_DEFAULT(PrefetchScanIntervalInBytes))
 154     FLAG_SET_DEFAULT(PrefetchScanIntervalInBytes, 3*dcache_line);


 165       PrefetchCopyIntervalInBytes = 32760;
 166   }
 167 
 168   if (AllocatePrefetchDistance !=-1 && (AllocatePrefetchDistance & 7)) {
 169     warning("AllocatePrefetchDistance must be multiple of 8");
 170     AllocatePrefetchDistance &= ~7;
 171   }
 172 
 173   if (AllocatePrefetchStepSize & 7) {
 174     warning("AllocatePrefetchStepSize must be multiple of 8");
 175     AllocatePrefetchStepSize &= ~7;
 176   }
 177 
 178   if (SoftwarePrefetchHintDistance != -1 &&
 179        (SoftwarePrefetchHintDistance & 7)) {
 180     warning("SoftwarePrefetchHintDistance must be -1, or a multiple of 8");
 181     SoftwarePrefetchHintDistance &= ~7;
 182   }
 183 
 184   uint64_t auxv = getauxval(AT_HWCAP);
 185   uint64_t auxv2 = getauxval(AT_HWCAP2);
 186 
 187   char buf[512];
 188 
 189   _features = auxv;
 190 
 191   int cpu_lines = 0;
 192   if (FILE *f = fopen("/proc/cpuinfo", "r")) {
 193     // need a large buffer as the flags line may include lots of text
 194     char buf[1024], *p;
 195     while (fgets(buf, sizeof (buf), f) != NULL) {
 196       if ((p = strchr(buf, ':')) != NULL) {
 197         long v = strtol(p+1, NULL, 0);
 198         if (strncmp(buf, "CPU implementer", sizeof "CPU implementer" - 1) == 0) {
 199           _cpu = v;
 200           cpu_lines++;
 201         } else if (strncmp(buf, "CPU variant", sizeof "CPU variant" - 1) == 0) {
 202           _variant = v;
 203         } else if (strncmp(buf, "CPU part", sizeof "CPU part" - 1) == 0) {
 204           if (_model != v)  _model2 = _model;
 205           _model = v;


 291     if (FLAG_IS_DEFAULT(UseSimpleArrayEquals)) {
 292       FLAG_SET_DEFAULT(UseSimpleArrayEquals, true);
 293     }
 294   }
 295 
 296   if (_cpu == CPU_ARM && (_model == 0xd07 || _model2 == 0xd07)) _features |= CPU_STXR_PREFETCH;
 297   // If an olde style /proc/cpuinfo (cpu_lines == 1) then if _model is an A57 (0xd07)
 298   // we assume the worst and assume we could be on a big little system and have
 299   // undisclosed A53 cores which we could be swapped to at any stage
 300   if (_cpu == CPU_ARM && cpu_lines == 1 && _model == 0xd07) _features |= CPU_A53MAC;
 301 
 302   sprintf(buf, "0x%02x:0x%x:0x%03x:%d", _cpu, _variant, _model, _revision);
 303   if (_model2) sprintf(buf+strlen(buf), "(0x%03x)", _model2);
 304   if (auxv & HWCAP_ASIMD) strcat(buf, ", simd");
 305   if (auxv & HWCAP_CRC32) strcat(buf, ", crc");
 306   if (auxv & HWCAP_AES)   strcat(buf, ", aes");
 307   if (auxv & HWCAP_SHA1)  strcat(buf, ", sha1");
 308   if (auxv & HWCAP_SHA2)  strcat(buf, ", sha256");
 309   if (auxv & HWCAP_SHA512) strcat(buf, ", sha512");
 310   if (auxv & HWCAP_ATOMICS) strcat(buf, ", lse");
 311   if (auxv & HWCAP_SVE) strcat(buf, ", sve");
 312   if (auxv2 & HWCAP2_SVE2) strcat(buf, ", sve2");
 313 
 314   _features_string = os::strdup(buf);
 315 
 316   if (FLAG_IS_DEFAULT(UseCRC32)) {
 317     UseCRC32 = (auxv & HWCAP_CRC32) != 0;
 318   }
 319 
 320   if (UseCRC32 && (auxv & HWCAP_CRC32) == 0) {
 321     warning("UseCRC32 specified, but not supported on this CPU");
 322     FLAG_SET_DEFAULT(UseCRC32, false);
 323   }
 324 
 325   if (FLAG_IS_DEFAULT(UseAdler32Intrinsics)) {
 326     FLAG_SET_DEFAULT(UseAdler32Intrinsics, true);
 327   }
 328 
 329   if (UseVectorizedMismatchIntrinsic) {
 330     warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU.");
 331     FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
 332   }


 432     if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {
 433       FLAG_SET_DEFAULT(UseGHASHIntrinsics, true);
 434     }
 435   } else if (UseGHASHIntrinsics) {
 436     warning("GHASH intrinsics are not available on this CPU");
 437     FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
 438   }
 439 
 440   if (is_zva_enabled()) {
 441     if (FLAG_IS_DEFAULT(UseBlockZeroing)) {
 442       FLAG_SET_DEFAULT(UseBlockZeroing, true);
 443     }
 444     if (FLAG_IS_DEFAULT(BlockZeroingLowLimit)) {
 445       FLAG_SET_DEFAULT(BlockZeroingLowLimit, 4 * VM_Version::zva_length());
 446     }
 447   } else if (UseBlockZeroing) {
 448     warning("DC ZVA is not available on this CPU");
 449     FLAG_SET_DEFAULT(UseBlockZeroing, false);
 450   }
 451 
 452   if (auxv & HWCAP_SVE) {
 453     if (FLAG_IS_DEFAULT(UseSVE)) {
 454       FLAG_SET_DEFAULT(UseSVE, (auxv2 & HWCAP2_SVE2) ? 2 : 1);
 455     }
 456     if (UseSVE > 0) {
 457       _initial_sve_vector_length = prctl(PR_SVE_GET_VL);
 458     }
 459   } else if (UseSVE > 0) {
 460     warning("UseSVE specified, but not supported on current CPU. Disabling SVE.");
 461     FLAG_SET_DEFAULT(UseSVE, 0);
 462   }
 463 
 464   // This machine allows unaligned memory accesses
 465   if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
 466     FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
 467   }
 468 
 469   if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
 470     FLAG_SET_DEFAULT(UsePopCountInstruction, true);
 471   }
 472 
 473   if (!UsePopCountInstruction) {
 474     warning("UsePopCountInstruction is always enabled on this CPU");
 475     UsePopCountInstruction = true;
 476   }
 477 
 478 #ifdef COMPILER2
 479   if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
 480     UseMultiplyToLenIntrinsic = true;
 481   }
 482 
 483   if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
 484     UseSquareToLenIntrinsic = true;
 485   }
 486 
 487   if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
 488     UseMulAddIntrinsic = true;
 489   }
 490 
 491   if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
 492     UseMontgomeryMultiplyIntrinsic = true;
 493   }
 494   if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
 495     UseMontgomerySquareIntrinsic = true;
 496   }
 497 
 498   if (UseSVE > 0) {
 499     if (FLAG_IS_DEFAULT(MaxVectorSize)) {
 500       MaxVectorSize = _initial_sve_vector_length;
 501     } else if (MaxVectorSize < 16) {
 502       warning("SVE does not support vector length less than 16 bytes. Disabling SVE.");
 503       UseSVE = 0;
 504     } else if ((MaxVectorSize % 16) == 0 && is_power_of_2(MaxVectorSize)) {
 505       int new_vl = prctl(PR_SVE_SET_VL, MaxVectorSize);
 506       _initial_sve_vector_length = new_vl;
 507       // If MaxVectorSize is larger than system largest supported SVE vector length, above prctl()
 508       // call will set task vector length to the system largest supported value. So, we also update
 509       // MaxVectorSize to that largest supported value.
 510       if (new_vl < 0) {
 511         vm_exit_during_initialization(
 512           err_msg("Current system does not support SVE vector length for MaxVectorSize: %d",
 513                   (int)MaxVectorSize));
 514       } else if (new_vl != MaxVectorSize) {
 515         warning("Current system only supports max SVE vector length %d. Set MaxVectorSize to %d",
 516                 new_vl, new_vl);
 517       }
 518       MaxVectorSize = new_vl;
 519     } else {
 520       vm_exit_during_initialization(err_msg("Unsupported MaxVectorSize: %d", (int)MaxVectorSize));
 521     }
 522   }
 523 
 524   if (UseSVE == 0) {  // NEON
 525     int min_vector_size = 8;
 526     int max_vector_size = 16;
 527     if (!FLAG_IS_DEFAULT(MaxVectorSize)) {
 528       if (!is_power_of_2(MaxVectorSize)) {
 529         vm_exit_during_initialization(err_msg("Unsupported MaxVectorSize: %d", (int)MaxVectorSize));
 530       } else if (MaxVectorSize < min_vector_size) {
 531         warning("MaxVectorSize must be at least %i on this platform", min_vector_size);
 532         FLAG_SET_DEFAULT(MaxVectorSize, min_vector_size);
 533       } else if (MaxVectorSize > max_vector_size) {
 534         warning("MaxVectorSize must be at most %i on this platform", max_vector_size);
 535         FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size);
 536       }
 537     } else {
 538       FLAG_SET_DEFAULT(MaxVectorSize, 16);
 539     }
 540   }
 541 
 542   if (FLAG_IS_DEFAULT(OptoScheduling)) {
 543     OptoScheduling = true;
 544   }
 545 
 546   if (FLAG_IS_DEFAULT(AlignVector)) {
 547     AlignVector = AvoidUnalignedAccesses;
 548   }
 549 #endif
 550 }
 551 
 552 void VM_Version::initialize() {
 553   ResourceMark rm;
 554 
 555   stub_blob = BufferBlob::create("getPsrInfo_stub", stub_size);
 556   if (stub_blob == NULL) {
 557     vm_exit_during_initialization("Unable to allocate getPsrInfo_stub");
 558   }
 559 
< prev index next >