1 /*
  2  * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2015, 2020, Red Hat Inc. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "asm/macroAssembler.hpp"
 28 #include "asm/macroAssembler.inline.hpp"
 29 #include "memory/resourceArea.hpp"
 30 #include "runtime/java.hpp"
 31 #include "runtime/os.hpp"
 32 #include "runtime/stubCodeGenerator.hpp"
 33 #include "runtime/vm_version.hpp"
 34 #include "utilities/formatBuffer.hpp"
 35 #include "utilities/macros.hpp"
 36 
 37 #include OS_HEADER_INLINE(os)
 38 
 39 #include <asm/hwcap.h>
 40 #include <sys/auxv.h>
 41 #include <sys/prctl.h>
 42 
 43 #ifndef HWCAP_AES
 44 #define HWCAP_AES   (1<<3)
 45 #endif
 46 
 47 #ifndef HWCAP_PMULL
 48 #define HWCAP_PMULL (1<<4)
 49 #endif
 50 
 51 #ifndef HWCAP_SHA1
 52 #define HWCAP_SHA1  (1<<5)
 53 #endif
 54 
 55 #ifndef HWCAP_SHA2
 56 #define HWCAP_SHA2  (1<<6)
 57 #endif
 58 
 59 #ifndef HWCAP_CRC32
 60 #define HWCAP_CRC32 (1<<7)
 61 #endif
 62 
 63 #ifndef HWCAP_ATOMICS
 64 #define HWCAP_ATOMICS (1<<8)
 65 #endif
 66 
 67 #ifndef HWCAP_SHA512
 68 #define HWCAP_SHA512 (1 << 21)
 69 #endif
 70 
 71 #ifndef HWCAP_SVE
 72 #define HWCAP_SVE (1 << 22)
 73 #endif
 74 
 75 #ifndef HWCAP2_SVE2
 76 #define HWCAP2_SVE2 (1 << 1)
 77 #endif
 78 
 79 #ifndef PR_SVE_GET_VL
 80 // For old toolchains which do not have SVE related macros defined.
 81 #define PR_SVE_SET_VL   50
 82 #define PR_SVE_GET_VL   51
 83 #endif
 84 
 85 int VM_Version::_cpu;
 86 int VM_Version::_model;
 87 int VM_Version::_model2;
 88 int VM_Version::_variant;
 89 int VM_Version::_revision;
 90 int VM_Version::_stepping;
 91 bool VM_Version::_dcpop;
 92 int VM_Version::_initial_sve_vector_length;
 93 VM_Version::PsrInfo VM_Version::_psr_info   = { 0, };
 94 
 95 static BufferBlob* stub_blob;
 96 static const int stub_size = 550;
 97 
 98 extern "C" {
 99   typedef void (*getPsrInfo_stub_t)(void*);
100 }
101 static getPsrInfo_stub_t getPsrInfo_stub = NULL;
102 
103 
104 class VM_Version_StubGenerator: public StubCodeGenerator {
105  public:
106 
107   VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
108 
109   address generate_getPsrInfo() {
110     StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
111 #   define __ _masm->
112     address start = __ pc();
113 
114     // void getPsrInfo(VM_Version::PsrInfo* psr_info);
115 
116     address entry = __ pc();
117 
118     __ enter();
119 
120     __ get_dczid_el0(rscratch1);
121     __ strw(rscratch1, Address(c_rarg0, in_bytes(VM_Version::dczid_el0_offset())));
122 
123     __ get_ctr_el0(rscratch1);
124     __ strw(rscratch1, Address(c_rarg0, in_bytes(VM_Version::ctr_el0_offset())));
125 
126     __ leave();
127     __ ret(lr);
128 
129 #   undef __
130 
131     return start;
132   }
133 };
134 
135 void VM_Version::get_processor_features() {
136   _supports_cx8 = true;
137   _supports_atomic_getset4 = true;
138   _supports_atomic_getadd4 = true;
139   _supports_atomic_getset8 = true;
140   _supports_atomic_getadd8 = true;
141 
142   getPsrInfo_stub(&_psr_info);
143 
144   int dcache_line = VM_Version::dcache_line_size();
145 
146   // Limit AllocatePrefetchDistance so that it does not exceed the
147   // constraint in AllocatePrefetchDistanceConstraintFunc.
148   if (FLAG_IS_DEFAULT(AllocatePrefetchDistance))
149     FLAG_SET_DEFAULT(AllocatePrefetchDistance, MIN2(512, 3*dcache_line));
150 
151   if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize))
152     FLAG_SET_DEFAULT(AllocatePrefetchStepSize, dcache_line);
153   if (FLAG_IS_DEFAULT(PrefetchScanIntervalInBytes))
154     FLAG_SET_DEFAULT(PrefetchScanIntervalInBytes, 3*dcache_line);
155   if (FLAG_IS_DEFAULT(PrefetchCopyIntervalInBytes))
156     FLAG_SET_DEFAULT(PrefetchCopyIntervalInBytes, 3*dcache_line);
157   if (FLAG_IS_DEFAULT(SoftwarePrefetchHintDistance))
158     FLAG_SET_DEFAULT(SoftwarePrefetchHintDistance, 3*dcache_line);
159 
160   if (PrefetchCopyIntervalInBytes != -1 &&
161        ((PrefetchCopyIntervalInBytes & 7) || (PrefetchCopyIntervalInBytes >= 32768))) {
162     warning("PrefetchCopyIntervalInBytes must be -1, or a multiple of 8 and < 32768");
163     PrefetchCopyIntervalInBytes &= ~7;
164     if (PrefetchCopyIntervalInBytes >= 32768)
165       PrefetchCopyIntervalInBytes = 32760;
166   }
167 
168   if (AllocatePrefetchDistance !=-1 && (AllocatePrefetchDistance & 7)) {
169     warning("AllocatePrefetchDistance must be multiple of 8");
170     AllocatePrefetchDistance &= ~7;
171   }
172 
173   if (AllocatePrefetchStepSize & 7) {
174     warning("AllocatePrefetchStepSize must be multiple of 8");
175     AllocatePrefetchStepSize &= ~7;
176   }
177 
178   if (SoftwarePrefetchHintDistance != -1 &&
179        (SoftwarePrefetchHintDistance & 7)) {
180     warning("SoftwarePrefetchHintDistance must be -1, or a multiple of 8");
181     SoftwarePrefetchHintDistance &= ~7;
182   }
183 
184   uint64_t auxv = getauxval(AT_HWCAP);
185   uint64_t auxv2 = getauxval(AT_HWCAP2);
186 
187   char buf[512];
188 
189   _features = auxv;
190 
191   int cpu_lines = 0;
192   if (FILE *f = fopen("/proc/cpuinfo", "r")) {
193     // need a large buffer as the flags line may include lots of text
194     char buf[1024], *p;
195     while (fgets(buf, sizeof (buf), f) != NULL) {
196       if ((p = strchr(buf, ':')) != NULL) {
197         long v = strtol(p+1, NULL, 0);
198         if (strncmp(buf, "CPU implementer", sizeof "CPU implementer" - 1) == 0) {
199           _cpu = v;
200           cpu_lines++;
201         } else if (strncmp(buf, "CPU variant", sizeof "CPU variant" - 1) == 0) {
202           _variant = v;
203         } else if (strncmp(buf, "CPU part", sizeof "CPU part" - 1) == 0) {
204           if (_model != v)  _model2 = _model;
205           _model = v;
206         } else if (strncmp(buf, "CPU revision", sizeof "CPU revision" - 1) == 0) {
207           _revision = v;
208         } else if (strncmp(buf, "flags", sizeof("flags") - 1) == 0) {
209           if (strstr(p+1, "dcpop")) {
210             _dcpop = true;
211           }
212         }
213       }
214     }
215     fclose(f);
216   }
217 
218   if (os::supports_map_sync()) {
219     // if dcpop is available publish data cache line flush size via
220     // generic field, otherwise let if default to zero thereby
221     // disabling writeback
222     if (_dcpop) {
223       _data_cache_line_flush_size = dcache_line;
224     }
225   }
226 
227   // Enable vendor specific features
228 
229   // Ampere eMAG
230   if (_cpu == CPU_AMCC && (_model == 0) && (_variant == 0x3)) {
231     if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) {
232       FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true);
233     }
234     if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
235       FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
236     }
237     if (FLAG_IS_DEFAULT(UseSIMDForArrayEquals)) {
238       FLAG_SET_DEFAULT(UseSIMDForArrayEquals, !(_revision == 1 || _revision == 2));
239     }
240   }
241 
242   // ThunderX
243   if (_cpu == CPU_CAVIUM && (_model == 0xA1)) {
244     guarantee(_variant != 0, "Pre-release hardware no longer supported.");
245     if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) {
246       FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true);
247     }
248     if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
249       FLAG_SET_DEFAULT(UseSIMDForMemoryOps, (_variant > 0));
250     }
251     if (FLAG_IS_DEFAULT(UseSIMDForArrayEquals)) {
252       FLAG_SET_DEFAULT(UseSIMDForArrayEquals, false);
253     }
254   }
255 
256   // ThunderX2
257   if ((_cpu == CPU_CAVIUM && (_model == 0xAF)) ||
258       (_cpu == CPU_BROADCOM && (_model == 0x516))) {
259     if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) {
260       FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true);
261     }
262     if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
263       FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
264     }
265   }
266 
267   // HiSilicon TSV110
268   if (_cpu == CPU_HISILICON && _model == 0xd01) {
269     if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) {
270       FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true);
271     }
272     if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
273       FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
274     }
275   }
276 
277   // Cortex A53
278   if (_cpu == CPU_ARM && (_model == 0xd03 || _model2 == 0xd03)) {
279     _features |= CPU_A53MAC;
280     if (FLAG_IS_DEFAULT(UseSIMDForArrayEquals)) {
281       FLAG_SET_DEFAULT(UseSIMDForArrayEquals, false);
282     }
283   }
284 
285   // Cortex A73
286   if (_cpu == CPU_ARM && (_model == 0xd09 || _model2 == 0xd09)) {
287     if (FLAG_IS_DEFAULT(SoftwarePrefetchHintDistance)) {
288       FLAG_SET_DEFAULT(SoftwarePrefetchHintDistance, -1);
289     }
290     // A73 is faster with short-and-easy-for-speculative-execution-loop
291     if (FLAG_IS_DEFAULT(UseSimpleArrayEquals)) {
292       FLAG_SET_DEFAULT(UseSimpleArrayEquals, true);
293     }
294   }
295 
296   if (_cpu == CPU_ARM && (_model == 0xd07 || _model2 == 0xd07)) _features |= CPU_STXR_PREFETCH;
297   // If an olde style /proc/cpuinfo (cpu_lines == 1) then if _model is an A57 (0xd07)
298   // we assume the worst and assume we could be on a big little system and have
299   // undisclosed A53 cores which we could be swapped to at any stage
300   if (_cpu == CPU_ARM && cpu_lines == 1 && _model == 0xd07) _features |= CPU_A53MAC;
301 
302   sprintf(buf, "0x%02x:0x%x:0x%03x:%d", _cpu, _variant, _model, _revision);
303   if (_model2) sprintf(buf+strlen(buf), "(0x%03x)", _model2);
304   if (auxv & HWCAP_ASIMD) strcat(buf, ", simd");
305   if (auxv & HWCAP_CRC32) strcat(buf, ", crc");
306   if (auxv & HWCAP_AES)   strcat(buf, ", aes");
307   if (auxv & HWCAP_SHA1)  strcat(buf, ", sha1");
308   if (auxv & HWCAP_SHA2)  strcat(buf, ", sha256");
309   if (auxv & HWCAP_SHA512) strcat(buf, ", sha512");
310   if (auxv & HWCAP_ATOMICS) strcat(buf, ", lse");
311   if (auxv & HWCAP_SVE) strcat(buf, ", sve");
312   if (auxv2 & HWCAP2_SVE2) strcat(buf, ", sve2");
313 
314   _features_string = os::strdup(buf);
315 
316   if (FLAG_IS_DEFAULT(UseCRC32)) {
317     UseCRC32 = (auxv & HWCAP_CRC32) != 0;
318   }
319 
320   if (UseCRC32 && (auxv & HWCAP_CRC32) == 0) {
321     warning("UseCRC32 specified, but not supported on this CPU");
322     FLAG_SET_DEFAULT(UseCRC32, false);
323   }
324 
325   if (FLAG_IS_DEFAULT(UseAdler32Intrinsics)) {
326     FLAG_SET_DEFAULT(UseAdler32Intrinsics, true);
327   }
328 
329   if (UseVectorizedMismatchIntrinsic) {
330     warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU.");
331     FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
332   }
333 
334   if (auxv & HWCAP_ATOMICS) {
335     if (FLAG_IS_DEFAULT(UseLSE))
336       FLAG_SET_DEFAULT(UseLSE, true);
337   } else {
338     if (UseLSE) {
339       warning("UseLSE specified, but not supported on this CPU");
340       FLAG_SET_DEFAULT(UseLSE, false);
341     }
342   }
343 
344   if (auxv & HWCAP_AES) {
345     UseAES = UseAES || FLAG_IS_DEFAULT(UseAES);
346     UseAESIntrinsics =
347         UseAESIntrinsics || (UseAES && FLAG_IS_DEFAULT(UseAESIntrinsics));
348     if (UseAESIntrinsics && !UseAES) {
349       warning("UseAESIntrinsics enabled, but UseAES not, enabling");
350       UseAES = true;
351     }
352   } else {
353     if (UseAES) {
354       warning("AES instructions are not available on this CPU");
355       FLAG_SET_DEFAULT(UseAES, false);
356     }
357     if (UseAESIntrinsics) {
358       warning("AES intrinsics are not available on this CPU");
359       FLAG_SET_DEFAULT(UseAESIntrinsics, false);
360     }
361   }
362 
363   if (UseAESCTRIntrinsics) {
364     warning("AES/CTR intrinsics are not available on this CPU");
365     FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
366   }
367 
368   if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
369     UseCRC32Intrinsics = true;
370   }
371 
372   if (auxv & HWCAP_CRC32) {
373     if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
374       FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
375     }
376   } else if (UseCRC32CIntrinsics) {
377     warning("CRC32C is not available on the CPU");
378     FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
379   }
380 
381   if (FLAG_IS_DEFAULT(UseFMA)) {
382     FLAG_SET_DEFAULT(UseFMA, true);
383   }
384 
385   if (UseMD5Intrinsics) {
386     warning("MD5 intrinsics are not available on this CPU");
387     FLAG_SET_DEFAULT(UseMD5Intrinsics, false);
388   }
389 
390   if (auxv & (HWCAP_SHA1 | HWCAP_SHA2)) {
391     if (FLAG_IS_DEFAULT(UseSHA)) {
392       FLAG_SET_DEFAULT(UseSHA, true);
393     }
394   } else if (UseSHA) {
395     warning("SHA instructions are not available on this CPU");
396     FLAG_SET_DEFAULT(UseSHA, false);
397   }
398 
399   if (UseSHA && (auxv & HWCAP_SHA1)) {
400     if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) {
401       FLAG_SET_DEFAULT(UseSHA1Intrinsics, true);
402     }
403   } else if (UseSHA1Intrinsics) {
404     warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
405     FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
406   }
407 
408   if (UseSHA && (auxv & HWCAP_SHA2)) {
409     if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
410       FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
411     }
412   } else if (UseSHA256Intrinsics) {
413     warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
414     FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
415   }
416 
417   if (UseSHA && (auxv & HWCAP_SHA512)) {
418     // Do not auto-enable UseSHA512Intrinsics until it has been fully tested on hardware
419     // if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) {
420       // FLAG_SET_DEFAULT(UseSHA512Intrinsics, true);
421     // }
422   } else if (UseSHA512Intrinsics) {
423     warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
424     FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
425   }
426 
427   if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
428     FLAG_SET_DEFAULT(UseSHA, false);
429   }
430 
431   if (auxv & HWCAP_PMULL) {
432     if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {
433       FLAG_SET_DEFAULT(UseGHASHIntrinsics, true);
434     }
435   } else if (UseGHASHIntrinsics) {
436     warning("GHASH intrinsics are not available on this CPU");
437     FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
438   }
439 
440   if (is_zva_enabled()) {
441     if (FLAG_IS_DEFAULT(UseBlockZeroing)) {
442       FLAG_SET_DEFAULT(UseBlockZeroing, true);
443     }
444     if (FLAG_IS_DEFAULT(BlockZeroingLowLimit)) {
445       FLAG_SET_DEFAULT(BlockZeroingLowLimit, 4 * VM_Version::zva_length());
446     }
447   } else if (UseBlockZeroing) {
448     warning("DC ZVA is not available on this CPU");
449     FLAG_SET_DEFAULT(UseBlockZeroing, false);
450   }
451 
452   if (auxv & HWCAP_SVE) {
453     if (FLAG_IS_DEFAULT(UseSVE)) {
454       FLAG_SET_DEFAULT(UseSVE, (auxv2 & HWCAP2_SVE2) ? 2 : 1);
455     }
456     if (UseSVE > 0) {
457       _initial_sve_vector_length = prctl(PR_SVE_GET_VL);
458     }
459   } else if (UseSVE > 0) {
460     warning("UseSVE specified, but not supported on current CPU. Disabling SVE.");
461     FLAG_SET_DEFAULT(UseSVE, 0);
462   }
463 
464   // This machine allows unaligned memory accesses
465   if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
466     FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
467   }
468 
469   if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
470     FLAG_SET_DEFAULT(UsePopCountInstruction, true);
471   }
472 
473   if (!UsePopCountInstruction) {
474     warning("UsePopCountInstruction is always enabled on this CPU");
475     UsePopCountInstruction = true;
476   }
477 
478 #ifdef COMPILER2
479   if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
480     UseMultiplyToLenIntrinsic = true;
481   }
482 
483   if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
484     UseSquareToLenIntrinsic = true;
485   }
486 
487   if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
488     UseMulAddIntrinsic = true;
489   }
490 
491   if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
492     UseMontgomeryMultiplyIntrinsic = true;
493   }
494   if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
495     UseMontgomerySquareIntrinsic = true;
496   }
497 
498   if (UseSVE > 0) {
499     if (FLAG_IS_DEFAULT(MaxVectorSize)) {
500       MaxVectorSize = _initial_sve_vector_length;
501     } else if (MaxVectorSize < 16) {
502       warning("SVE does not support vector length less than 16 bytes. Disabling SVE.");
503       UseSVE = 0;
504     } else if ((MaxVectorSize % 16) == 0 && is_power_of_2(MaxVectorSize)) {
505       int new_vl = prctl(PR_SVE_SET_VL, MaxVectorSize);
506       _initial_sve_vector_length = new_vl;
507       // If MaxVectorSize is larger than system largest supported SVE vector length, above prctl()
508       // call will set task vector length to the system largest supported value. So, we also update
509       // MaxVectorSize to that largest supported value.
510       if (new_vl < 0) {
511         vm_exit_during_initialization(
512           err_msg("Current system does not support SVE vector length for MaxVectorSize: %d",
513                   (int)MaxVectorSize));
514       } else if (new_vl != MaxVectorSize) {
515         warning("Current system only supports max SVE vector length %d. Set MaxVectorSize to %d",
516                 new_vl, new_vl);
517       }
518       MaxVectorSize = new_vl;
519     } else {
520       vm_exit_during_initialization(err_msg("Unsupported MaxVectorSize: %d", (int)MaxVectorSize));
521     }
522   }
523 
524   if (UseSVE == 0) {  // NEON
525     int min_vector_size = 8;
526     int max_vector_size = 16;
527     if (!FLAG_IS_DEFAULT(MaxVectorSize)) {
528       if (!is_power_of_2(MaxVectorSize)) {
529         vm_exit_during_initialization(err_msg("Unsupported MaxVectorSize: %d", (int)MaxVectorSize));
530       } else if (MaxVectorSize < min_vector_size) {
531         warning("MaxVectorSize must be at least %i on this platform", min_vector_size);
532         FLAG_SET_DEFAULT(MaxVectorSize, min_vector_size);
533       } else if (MaxVectorSize > max_vector_size) {
534         warning("MaxVectorSize must be at most %i on this platform", max_vector_size);
535         FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size);
536       }
537     } else {
538       FLAG_SET_DEFAULT(MaxVectorSize, 16);
539     }
540   }
541 
542   if (FLAG_IS_DEFAULT(OptoScheduling)) {
543     OptoScheduling = true;
544   }
545 
546   if (FLAG_IS_DEFAULT(AlignVector)) {
547     AlignVector = AvoidUnalignedAccesses;
548   }
549 #endif
550 }
551 
552 void VM_Version::initialize() {
553   ResourceMark rm;
554 
555   stub_blob = BufferBlob::create("getPsrInfo_stub", stub_size);
556   if (stub_blob == NULL) {
557     vm_exit_during_initialization("Unable to allocate getPsrInfo_stub");
558   }
559 
560   CodeBuffer c(stub_blob);
561   VM_Version_StubGenerator g(&c);
562   getPsrInfo_stub = CAST_TO_FN_PTR(getPsrInfo_stub_t,
563                                    g.generate_getPsrInfo());
564 
565   get_processor_features();
566 
567   UNSUPPORTED_OPTION(CriticalJNINatives);
568 }