rev 56859 : 8233787: Break cycle in vm_version* includes
Reviewed-by:
1 /*
2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "jvm.h"
27 #include "asm/macroAssembler.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "logging/log.hpp"
30 #include "logging/logStream.hpp"
31 #include "memory/resourceArea.hpp"
32 #include "runtime/java.hpp"
33 #include "runtime/os.hpp"
34 #include "runtime/stubCodeGenerator.hpp"
35 #include "utilities/virtualizationSupport.hpp"
36 #include "vm_version_x86.hpp"
37
38 #include OS_HEADER_INLINE(os)
39
40 int VM_Version::_cpu;
41 int VM_Version::_model;
42 int VM_Version::_stepping;
43 VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
44
45 // Address of instruction which causes SEGV
46 address VM_Version::_cpuinfo_segv_addr = 0;
47 // Address of instruction after the one which causes SEGV
48 address VM_Version::_cpuinfo_cont_addr = 0;
49
50 static BufferBlob* stub_blob;
51 static const int stub_size = 1100;
52
53 extern "C" {
54 typedef void (*get_cpu_info_stub_t)(void*);
55 }
56 static get_cpu_info_stub_t get_cpu_info_stub = NULL;
57
58
59 class VM_Version_StubGenerator: public StubCodeGenerator {
60 public:
61
62 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
63
64 address generate_get_cpu_info() {
65 // Flags to test CPU type.
66 const uint32_t HS_EFL_AC = 0x40000;
67 const uint32_t HS_EFL_ID = 0x200000;
68 // Values for when we don't have a CPUID instruction.
69 const int CPU_FAMILY_SHIFT = 8;
70 const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT);
71 const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT);
72 bool use_evex = FLAG_IS_DEFAULT(UseAVX) || (UseAVX > 2);
73
74 Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
75 Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, ext_cpuid8, done, wrapup;
76 Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check;
77
78 StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub");
79 # define __ _masm->
80
81 address start = __ pc();
82
83 //
84 // void get_cpu_info(VM_Version::CpuidInfo* cpuid_info);
85 //
86 // LP64: rcx and rdx are first and second argument registers on windows
87
88 __ push(rbp);
89 #ifdef _LP64
90 __ mov(rbp, c_rarg0); // cpuid_info address
91 #else
92 __ movptr(rbp, Address(rsp, 8)); // cpuid_info address
93 #endif
94 __ push(rbx);
95 __ push(rsi);
96 __ pushf(); // preserve rbx, and flags
97 __ pop(rax);
98 __ push(rax);
99 __ mov(rcx, rax);
100 //
101 // if we are unable to change the AC flag, we have a 386
102 //
103 __ xorl(rax, HS_EFL_AC);
104 __ push(rax);
105 __ popf();
106 __ pushf();
107 __ pop(rax);
108 __ cmpptr(rax, rcx);
109 __ jccb(Assembler::notEqual, detect_486);
110
111 __ movl(rax, CPU_FAMILY_386);
112 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax);
113 __ jmp(done);
114
115 //
116 // If we are unable to change the ID flag, we have a 486 which does
117 // not support the "cpuid" instruction.
118 //
119 __ bind(detect_486);
120 __ mov(rax, rcx);
121 __ xorl(rax, HS_EFL_ID);
122 __ push(rax);
123 __ popf();
124 __ pushf();
125 __ pop(rax);
126 __ cmpptr(rcx, rax);
127 __ jccb(Assembler::notEqual, detect_586);
128
129 __ bind(cpu486);
130 __ movl(rax, CPU_FAMILY_486);
131 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax);
132 __ jmp(done);
133
134 //
135 // At this point, we have a chip which supports the "cpuid" instruction
136 //
137 __ bind(detect_586);
138 __ xorl(rax, rax);
139 __ cpuid();
140 __ orl(rax, rax);
141 __ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input
142 // value of at least 1, we give up and
143 // assume a 486
144 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
145 __ movl(Address(rsi, 0), rax);
146 __ movl(Address(rsi, 4), rbx);
147 __ movl(Address(rsi, 8), rcx);
148 __ movl(Address(rsi,12), rdx);
149
150 __ cmpl(rax, 0xa); // Is cpuid(0xB) supported?
151 __ jccb(Assembler::belowEqual, std_cpuid4);
152
153 //
154 // cpuid(0xB) Processor Topology
155 //
156 __ movl(rax, 0xb);
157 __ xorl(rcx, rcx); // Threads level
158 __ cpuid();
159
160 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset())));
161 __ movl(Address(rsi, 0), rax);
162 __ movl(Address(rsi, 4), rbx);
163 __ movl(Address(rsi, 8), rcx);
164 __ movl(Address(rsi,12), rdx);
165
166 __ movl(rax, 0xb);
167 __ movl(rcx, 1); // Cores level
168 __ cpuid();
169 __ push(rax);
170 __ andl(rax, 0x1f); // Determine if valid topology level
171 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level
172 __ andl(rax, 0xffff);
173 __ pop(rax);
174 __ jccb(Assembler::equal, std_cpuid4);
175
176 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset())));
177 __ movl(Address(rsi, 0), rax);
178 __ movl(Address(rsi, 4), rbx);
179 __ movl(Address(rsi, 8), rcx);
180 __ movl(Address(rsi,12), rdx);
181
182 __ movl(rax, 0xb);
183 __ movl(rcx, 2); // Packages level
184 __ cpuid();
185 __ push(rax);
186 __ andl(rax, 0x1f); // Determine if valid topology level
187 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level
188 __ andl(rax, 0xffff);
189 __ pop(rax);
190 __ jccb(Assembler::equal, std_cpuid4);
191
192 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset())));
193 __ movl(Address(rsi, 0), rax);
194 __ movl(Address(rsi, 4), rbx);
195 __ movl(Address(rsi, 8), rcx);
196 __ movl(Address(rsi,12), rdx);
197
198 //
199 // cpuid(0x4) Deterministic cache params
200 //
201 __ bind(std_cpuid4);
202 __ movl(rax, 4);
203 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported?
204 __ jccb(Assembler::greater, std_cpuid1);
205
206 __ xorl(rcx, rcx); // L1 cache
207 __ cpuid();
208 __ push(rax);
209 __ andl(rax, 0x1f); // Determine if valid cache parameters used
210 __ orl(rax, rax); // eax[4:0] == 0 indicates invalid cache
211 __ pop(rax);
212 __ jccb(Assembler::equal, std_cpuid1);
213
214 __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset())));
215 __ movl(Address(rsi, 0), rax);
216 __ movl(Address(rsi, 4), rbx);
217 __ movl(Address(rsi, 8), rcx);
218 __ movl(Address(rsi,12), rdx);
219
220 //
221 // Standard cpuid(0x1)
222 //
223 __ bind(std_cpuid1);
224 __ movl(rax, 1);
225 __ cpuid();
226 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
227 __ movl(Address(rsi, 0), rax);
228 __ movl(Address(rsi, 4), rbx);
229 __ movl(Address(rsi, 8), rcx);
230 __ movl(Address(rsi,12), rdx);
231
232 //
233 // Check if OS has enabled XGETBV instruction to access XCR0
234 // (OSXSAVE feature flag) and CPU supports AVX
235 //
236 __ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx
237 __ cmpl(rcx, 0x18000000);
238 __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported
239
240 //
241 // XCR0, XFEATURE_ENABLED_MASK register
242 //
243 __ xorl(rcx, rcx); // zero for XCR0 register
244 __ xgetbv();
245 __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset())));
246 __ movl(Address(rsi, 0), rax);
247 __ movl(Address(rsi, 4), rdx);
248
249 //
250 // cpuid(0x7) Structured Extended Features
251 //
252 __ bind(sef_cpuid);
253 __ movl(rax, 7);
254 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported?
255 __ jccb(Assembler::greater, ext_cpuid);
256
257 __ xorl(rcx, rcx);
258 __ cpuid();
259 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
260 __ movl(Address(rsi, 0), rax);
261 __ movl(Address(rsi, 4), rbx);
262 __ movl(Address(rsi, 8), rcx);
263 __ movl(Address(rsi, 12), rdx);
264
265 //
266 // Extended cpuid(0x80000000)
267 //
268 __ bind(ext_cpuid);
269 __ movl(rax, 0x80000000);
270 __ cpuid();
271 __ cmpl(rax, 0x80000000); // Is cpuid(0x80000001) supported?
272 __ jcc(Assembler::belowEqual, done);
273 __ cmpl(rax, 0x80000004); // Is cpuid(0x80000005) supported?
274 __ jcc(Assembler::belowEqual, ext_cpuid1);
275 __ cmpl(rax, 0x80000006); // Is cpuid(0x80000007) supported?
276 __ jccb(Assembler::belowEqual, ext_cpuid5);
277 __ cmpl(rax, 0x80000007); // Is cpuid(0x80000008) supported?
278 __ jccb(Assembler::belowEqual, ext_cpuid7);
279 __ cmpl(rax, 0x80000008); // Is cpuid(0x80000009 and above) supported?
280 __ jccb(Assembler::belowEqual, ext_cpuid8);
281 __ cmpl(rax, 0x8000001E); // Is cpuid(0x8000001E) supported?
282 __ jccb(Assembler::below, ext_cpuid8);
283 //
284 // Extended cpuid(0x8000001E)
285 //
286 __ movl(rax, 0x8000001E);
287 __ cpuid();
288 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1E_offset())));
289 __ movl(Address(rsi, 0), rax);
290 __ movl(Address(rsi, 4), rbx);
291 __ movl(Address(rsi, 8), rcx);
292 __ movl(Address(rsi,12), rdx);
293
294 //
295 // Extended cpuid(0x80000008)
296 //
297 __ bind(ext_cpuid8);
298 __ movl(rax, 0x80000008);
299 __ cpuid();
300 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset())));
301 __ movl(Address(rsi, 0), rax);
302 __ movl(Address(rsi, 4), rbx);
303 __ movl(Address(rsi, 8), rcx);
304 __ movl(Address(rsi,12), rdx);
305
306 //
307 // Extended cpuid(0x80000007)
308 //
309 __ bind(ext_cpuid7);
310 __ movl(rax, 0x80000007);
311 __ cpuid();
312 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid7_offset())));
313 __ movl(Address(rsi, 0), rax);
314 __ movl(Address(rsi, 4), rbx);
315 __ movl(Address(rsi, 8), rcx);
316 __ movl(Address(rsi,12), rdx);
317
318 //
319 // Extended cpuid(0x80000005)
320 //
321 __ bind(ext_cpuid5);
322 __ movl(rax, 0x80000005);
323 __ cpuid();
324 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset())));
325 __ movl(Address(rsi, 0), rax);
326 __ movl(Address(rsi, 4), rbx);
327 __ movl(Address(rsi, 8), rcx);
328 __ movl(Address(rsi,12), rdx);
329
330 //
331 // Extended cpuid(0x80000001)
332 //
333 __ bind(ext_cpuid1);
334 __ movl(rax, 0x80000001);
335 __ cpuid();
336 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset())));
337 __ movl(Address(rsi, 0), rax);
338 __ movl(Address(rsi, 4), rbx);
339 __ movl(Address(rsi, 8), rcx);
340 __ movl(Address(rsi,12), rdx);
341
342 //
343 // Check if OS has enabled XGETBV instruction to access XCR0
344 // (OSXSAVE feature flag) and CPU supports AVX
345 //
346 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
347 __ movl(rcx, 0x18000000); // cpuid1 bits osxsave | avx
348 __ andl(rcx, Address(rsi, 8)); // cpuid1 bits osxsave | avx
349 __ cmpl(rcx, 0x18000000);
350 __ jccb(Assembler::notEqual, done); // jump if AVX is not supported
351
352 __ movl(rax, 0x6);
353 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
354 __ cmpl(rax, 0x6);
355 __ jccb(Assembler::equal, start_simd_check); // return if AVX is not supported
356
357 // we need to bridge farther than imm8, so we use this island as a thunk
358 __ bind(done);
359 __ jmp(wrapup);
360
361 __ bind(start_simd_check);
362 //
363 // Some OSs have a bug when upper 128/256bits of YMM/ZMM
364 // registers are not restored after a signal processing.
365 // Generate SEGV here (reference through NULL)
366 // and check upper YMM/ZMM bits after it.
367 //
368 intx saved_useavx = UseAVX;
369 intx saved_usesse = UseSSE;
370 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f
371 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
372 __ movl(rax, 0x10000);
373 __ andl(rax, Address(rsi, 4)); // xcr0 bits sse | ymm
374 __ cmpl(rax, 0x10000);
375 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported
376 // check _cpuid_info.xem_xcr0_eax.bits.opmask
377 // check _cpuid_info.xem_xcr0_eax.bits.zmm512
378 // check _cpuid_info.xem_xcr0_eax.bits.zmm32
379 __ movl(rax, 0xE0);
380 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
381 __ cmpl(rax, 0xE0);
382 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported
383
384 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
385 __ movl(rax, Address(rsi, 0));
386 __ cmpl(rax, 0x50654); // If it is Skylake
387 __ jcc(Assembler::equal, legacy_setup);
388 // If UseAVX is unitialized or is set by the user to include EVEX
389 if (use_evex) {
390 // EVEX setup: run in lowest evex mode
391 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
392 UseAVX = 3;
393 UseSSE = 2;
394 #ifdef _WINDOWS
395 // xmm5-xmm15 are not preserved by caller on windows
396 // https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
397 __ subptr(rsp, 64);
398 __ evmovdqul(Address(rsp, 0), xmm7, Assembler::AVX_512bit);
399 #ifdef _LP64
400 __ subptr(rsp, 64);
401 __ evmovdqul(Address(rsp, 0), xmm8, Assembler::AVX_512bit);
402 __ subptr(rsp, 64);
403 __ evmovdqul(Address(rsp, 0), xmm31, Assembler::AVX_512bit);
404 #endif // _LP64
405 #endif // _WINDOWS
406
407 // load value into all 64 bytes of zmm7 register
408 __ movl(rcx, VM_Version::ymm_test_value());
409 __ movdl(xmm0, rcx);
410 __ vpbroadcastd(xmm0, xmm0, Assembler::AVX_512bit);
411 __ evmovdqul(xmm7, xmm0, Assembler::AVX_512bit);
412 #ifdef _LP64
413 __ evmovdqul(xmm8, xmm0, Assembler::AVX_512bit);
414 __ evmovdqul(xmm31, xmm0, Assembler::AVX_512bit);
415 #endif
416 VM_Version::clean_cpuFeatures();
417 __ jmp(save_restore_except);
418 }
419
420 __ bind(legacy_setup);
421 // AVX setup
422 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts
423 UseAVX = 1;
424 UseSSE = 2;
425 #ifdef _WINDOWS
426 __ subptr(rsp, 32);
427 __ vmovdqu(Address(rsp, 0), xmm7);
428 #ifdef _LP64
429 __ subptr(rsp, 32);
430 __ vmovdqu(Address(rsp, 0), xmm8);
431 __ subptr(rsp, 32);
432 __ vmovdqu(Address(rsp, 0), xmm15);
433 #endif // _LP64
434 #endif // _WINDOWS
435
436 // load value into all 32 bytes of ymm7 register
437 __ movl(rcx, VM_Version::ymm_test_value());
438
439 __ movdl(xmm0, rcx);
440 __ pshufd(xmm0, xmm0, 0x00);
441 __ vinsertf128_high(xmm0, xmm0);
442 __ vmovdqu(xmm7, xmm0);
443 #ifdef _LP64
444 __ vmovdqu(xmm8, xmm0);
445 __ vmovdqu(xmm15, xmm0);
446 #endif
447 VM_Version::clean_cpuFeatures();
448
449 __ bind(save_restore_except);
450 __ xorl(rsi, rsi);
451 VM_Version::set_cpuinfo_segv_addr(__ pc());
452 // Generate SEGV
453 __ movl(rax, Address(rsi, 0));
454
455 VM_Version::set_cpuinfo_cont_addr(__ pc());
456 // Returns here after signal. Save xmm0 to check it later.
457
458 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f
459 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
460 __ movl(rax, 0x10000);
461 __ andl(rax, Address(rsi, 4));
462 __ cmpl(rax, 0x10000);
463 __ jcc(Assembler::notEqual, legacy_save_restore);
464 // check _cpuid_info.xem_xcr0_eax.bits.opmask
465 // check _cpuid_info.xem_xcr0_eax.bits.zmm512
466 // check _cpuid_info.xem_xcr0_eax.bits.zmm32
467 __ movl(rax, 0xE0);
468 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
469 __ cmpl(rax, 0xE0);
470 __ jcc(Assembler::notEqual, legacy_save_restore);
471
472 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
473 __ movl(rax, Address(rsi, 0));
474 __ cmpl(rax, 0x50654); // If it is Skylake
475 __ jcc(Assembler::equal, legacy_save_restore);
476
477 // If UseAVX is unitialized or is set by the user to include EVEX
478 if (use_evex) {
479 // EVEX check: run in lowest evex mode
480 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
481 UseAVX = 3;
482 UseSSE = 2;
483 __ lea(rsi, Address(rbp, in_bytes(VM_Version::zmm_save_offset())));
484 __ evmovdqul(Address(rsi, 0), xmm0, Assembler::AVX_512bit);
485 __ evmovdqul(Address(rsi, 64), xmm7, Assembler::AVX_512bit);
486 #ifdef _LP64
487 __ evmovdqul(Address(rsi, 128), xmm8, Assembler::AVX_512bit);
488 __ evmovdqul(Address(rsi, 192), xmm31, Assembler::AVX_512bit);
489 #endif
490
491 #ifdef _WINDOWS
492 #ifdef _LP64
493 __ evmovdqul(xmm31, Address(rsp, 0), Assembler::AVX_512bit);
494 __ addptr(rsp, 64);
495 __ evmovdqul(xmm8, Address(rsp, 0), Assembler::AVX_512bit);
496 __ addptr(rsp, 64);
497 #endif // _LP64
498 __ evmovdqul(xmm7, Address(rsp, 0), Assembler::AVX_512bit);
499 __ addptr(rsp, 64);
500 #endif // _WINDOWS
501 generate_vzeroupper(wrapup);
502 VM_Version::clean_cpuFeatures();
503 UseAVX = saved_useavx;
504 UseSSE = saved_usesse;
505 __ jmp(wrapup);
506 }
507
508 __ bind(legacy_save_restore);
509 // AVX check
510 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts
511 UseAVX = 1;
512 UseSSE = 2;
513 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset())));
514 __ vmovdqu(Address(rsi, 0), xmm0);
515 __ vmovdqu(Address(rsi, 32), xmm7);
516 #ifdef _LP64
517 __ vmovdqu(Address(rsi, 64), xmm8);
518 __ vmovdqu(Address(rsi, 96), xmm15);
519 #endif
520
521 #ifdef _WINDOWS
522 #ifdef _LP64
523 __ vmovdqu(xmm15, Address(rsp, 0));
524 __ addptr(rsp, 32);
525 __ vmovdqu(xmm8, Address(rsp, 0));
526 __ addptr(rsp, 32);
527 #endif // _LP64
528 __ vmovdqu(xmm7, Address(rsp, 0));
529 __ addptr(rsp, 32);
530 #endif // _WINDOWS
531 generate_vzeroupper(wrapup);
532 VM_Version::clean_cpuFeatures();
533 UseAVX = saved_useavx;
534 UseSSE = saved_usesse;
535
536 __ bind(wrapup);
537 __ popf();
538 __ pop(rsi);
539 __ pop(rbx);
540 __ pop(rbp);
541 __ ret(0);
542
543 # undef __
544
545 return start;
546 };
547 void generate_vzeroupper(Label& L_wrapup) {
548 # define __ _masm->
549 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
550 __ cmpl(Address(rsi, 4), 0x756e6547); // 'uneG'
551 __ jcc(Assembler::notEqual, L_wrapup);
552 __ movl(rcx, 0x0FFF0FF0);
553 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
554 __ andl(rcx, Address(rsi, 0));
555 __ cmpl(rcx, 0x00050670); // If it is Xeon Phi 3200/5200/7200
556 __ jcc(Assembler::equal, L_wrapup);
557 __ cmpl(rcx, 0x00080650); // If it is Future Xeon Phi
558 __ jcc(Assembler::equal, L_wrapup);
559 __ vzeroupper();
560 # undef __
561 }
562 };
563
564 void VM_Version::get_processor_features() {
565
566 _cpu = 4; // 486 by default
567 _model = 0;
568 _stepping = 0;
569 _features = 0;
570 _logical_processors_per_package = 1;
571 // i486 internal cache is both I&D and has a 16-byte line size
572 _L1_data_cache_line_size = 16;
573
574 // Get raw processor info
575
576 get_cpu_info_stub(&_cpuid_info);
577
578 assert_is_initialized();
579 _cpu = extended_cpu_family();
580 _model = extended_cpu_model();
581 _stepping = cpu_stepping();
582
583 if (cpu_family() > 4) { // it supports CPUID
584 _features = feature_flags();
585 // Logical processors are only available on P4s and above,
586 // and only if hyperthreading is available.
587 _logical_processors_per_package = logical_processor_count();
588 _L1_data_cache_line_size = L1_line_size();
589 }
590
591 _supports_cx8 = supports_cmpxchg8();
592 // xchg and xadd instructions
593 _supports_atomic_getset4 = true;
594 _supports_atomic_getadd4 = true;
595 LP64_ONLY(_supports_atomic_getset8 = true);
596 LP64_ONLY(_supports_atomic_getadd8 = true);
597
598 #ifdef _LP64
599 // OS should support SSE for x64 and hardware should support at least SSE2.
600 if (!VM_Version::supports_sse2()) {
601 vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported");
602 }
603 // in 64 bit the use of SSE2 is the minimum
604 if (UseSSE < 2) UseSSE = 2;
605 #endif
606
607 #ifdef AMD64
608 // flush_icache_stub have to be generated first.
609 // That is why Icache line size is hard coded in ICache class,
610 // see icache_x86.hpp. It is also the reason why we can't use
611 // clflush instruction in 32-bit VM since it could be running
612 // on CPU which does not support it.
613 //
614 // The only thing we can do is to verify that flushed
615 // ICache::line_size has correct value.
616 guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported");
617 // clflush_size is size in quadwords (8 bytes).
618 guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported");
619 #endif
620
621 #ifdef _LP64
622 // assigning this field effectively enables Unsafe.writebackMemory()
623 // by initing UnsafeConstant.DATA_CACHE_LINE_FLUSH_SIZE to non-zero
624 // that is only implemented on x86_64 and only if the OS plays ball
625 if (os::supports_map_sync()) {
626 // publish data cache line flush size to generic field, otherwise
627 // let if default to zero thereby disabling writeback
628 _data_cache_line_flush_size = _cpuid_info.std_cpuid1_ebx.bits.clflush_size * 8;
629 }
630 #endif
631 // If the OS doesn't support SSE, we can't use this feature even if the HW does
632 if (!os::supports_sse())
633 _features &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2);
634
635 if (UseSSE < 4) {
636 _features &= ~CPU_SSE4_1;
637 _features &= ~CPU_SSE4_2;
638 }
639
640 if (UseSSE < 3) {
641 _features &= ~CPU_SSE3;
642 _features &= ~CPU_SSSE3;
643 _features &= ~CPU_SSE4A;
644 }
645
646 if (UseSSE < 2)
647 _features &= ~CPU_SSE2;
648
649 if (UseSSE < 1)
650 _features &= ~CPU_SSE;
651
652 //since AVX instructions is slower than SSE in some ZX cpus, force USEAVX=0.
653 if (is_zx() && ((cpu_family() == 6) || (cpu_family() == 7))) {
654 UseAVX = 0;
655 }
656
657 // first try initial setting and detect what we can support
658 int use_avx_limit = 0;
659 if (UseAVX > 0) {
660 if (UseAVX > 2 && supports_evex()) {
661 use_avx_limit = 3;
662 } else if (UseAVX > 1 && supports_avx2()) {
663 use_avx_limit = 2;
664 } else if (UseAVX > 0 && supports_avx()) {
665 use_avx_limit = 1;
666 } else {
667 use_avx_limit = 0;
668 }
669 }
670 if (FLAG_IS_DEFAULT(UseAVX)) {
671 FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
672 if (is_intel_family_core() && _model == CPU_MODEL_SKYLAKE && _stepping < 5) {
673 FLAG_SET_DEFAULT(UseAVX, 2); //Set UseAVX=2 for Skylake
674 }
675 } else if (UseAVX > use_avx_limit) {
676 warning("UseAVX=%d is not supported on this CPU, setting it to UseAVX=%d", (int) UseAVX, use_avx_limit);
677 FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
678 } else if (UseAVX < 0) {
679 warning("UseAVX=%d is not valid, setting it to UseAVX=0", (int) UseAVX);
680 FLAG_SET_DEFAULT(UseAVX, 0);
681 }
682
683 if (UseAVX < 3) {
684 _features &= ~CPU_AVX512F;
685 _features &= ~CPU_AVX512DQ;
686 _features &= ~CPU_AVX512CD;
687 _features &= ~CPU_AVX512BW;
688 _features &= ~CPU_AVX512VL;
689 _features &= ~CPU_AVX512_VPOPCNTDQ;
690 _features &= ~CPU_VPCLMULQDQ;
691 _features &= ~CPU_VAES;
692 }
693
694 if (UseAVX < 2)
695 _features &= ~CPU_AVX2;
696
697 if (UseAVX < 1) {
698 _features &= ~CPU_AVX;
699 _features &= ~CPU_VZEROUPPER;
700 }
701
702 if (logical_processors_per_package() == 1) {
703 // HT processor could be installed on a system which doesn't support HT.
704 _features &= ~CPU_HT;
705 }
706
707 if (is_intel()) { // Intel cpus specific settings
708 if (is_knights_family()) {
709 _features &= ~CPU_VZEROUPPER;
710 }
711 }
712
713 char buf[256];
714 jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
715 cores_per_cpu(), threads_per_core(),
716 cpu_family(), _model, _stepping,
717 (supports_cmov() ? ", cmov" : ""),
718 (supports_cmpxchg8() ? ", cx8" : ""),
719 (supports_fxsr() ? ", fxsr" : ""),
720 (supports_mmx() ? ", mmx" : ""),
721 (supports_sse() ? ", sse" : ""),
722 (supports_sse2() ? ", sse2" : ""),
723 (supports_sse3() ? ", sse3" : ""),
724 (supports_ssse3()? ", ssse3": ""),
725 (supports_sse4_1() ? ", sse4.1" : ""),
726 (supports_sse4_2() ? ", sse4.2" : ""),
727 (supports_popcnt() ? ", popcnt" : ""),
728 (supports_avx() ? ", avx" : ""),
729 (supports_avx2() ? ", avx2" : ""),
730 (supports_aes() ? ", aes" : ""),
731 (supports_clmul() ? ", clmul" : ""),
732 (supports_erms() ? ", erms" : ""),
733 (supports_rtm() ? ", rtm" : ""),
734 (supports_mmx_ext() ? ", mmxext" : ""),
735 (supports_3dnow_prefetch() ? ", 3dnowpref" : ""),
736 (supports_lzcnt() ? ", lzcnt": ""),
737 (supports_sse4a() ? ", sse4a": ""),
738 (supports_ht() ? ", ht": ""),
739 (supports_tsc() ? ", tsc": ""),
740 (supports_tscinv_bit() ? ", tscinvbit": ""),
741 (supports_tscinv() ? ", tscinv": ""),
742 (supports_bmi1() ? ", bmi1" : ""),
743 (supports_bmi2() ? ", bmi2" : ""),
744 (supports_adx() ? ", adx" : ""),
745 (supports_evex() ? ", evex" : ""),
746 (supports_sha() ? ", sha" : ""),
747 (supports_fma() ? ", fma" : ""));
748 _features_string = os::strdup(buf);
749
750 // UseSSE is set to the smaller of what hardware supports and what
751 // the command line requires. I.e., you cannot set UseSSE to 2 on
752 // older Pentiums which do not support it.
753 int use_sse_limit = 0;
754 if (UseSSE > 0) {
755 if (UseSSE > 3 && supports_sse4_1()) {
756 use_sse_limit = 4;
757 } else if (UseSSE > 2 && supports_sse3()) {
758 use_sse_limit = 3;
759 } else if (UseSSE > 1 && supports_sse2()) {
760 use_sse_limit = 2;
761 } else if (UseSSE > 0 && supports_sse()) {
762 use_sse_limit = 1;
763 } else {
764 use_sse_limit = 0;
765 }
766 }
767 if (FLAG_IS_DEFAULT(UseSSE)) {
768 FLAG_SET_DEFAULT(UseSSE, use_sse_limit);
769 } else if (UseSSE > use_sse_limit) {
770 warning("UseSSE=%d is not supported on this CPU, setting it to UseSSE=%d", (int) UseSSE, use_sse_limit);
771 FLAG_SET_DEFAULT(UseSSE, use_sse_limit);
772 } else if (UseSSE < 0) {
773 warning("UseSSE=%d is not valid, setting it to UseSSE=0", (int) UseSSE);
774 FLAG_SET_DEFAULT(UseSSE, 0);
775 }
776
777 // Use AES instructions if available.
778 if (supports_aes()) {
779 if (FLAG_IS_DEFAULT(UseAES)) {
780 FLAG_SET_DEFAULT(UseAES, true);
781 }
782 if (!UseAES) {
783 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
784 warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
785 }
786 FLAG_SET_DEFAULT(UseAESIntrinsics, false);
787 } else {
788 if (UseSSE > 2) {
789 if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
790 FLAG_SET_DEFAULT(UseAESIntrinsics, true);
791 }
792 } else {
793 // The AES intrinsic stubs require AES instruction support (of course)
794 // but also require sse3 mode or higher for instructions it use.
795 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
796 warning("X86 AES intrinsics require SSE3 instructions or higher. Intrinsics will be disabled.");
797 }
798 FLAG_SET_DEFAULT(UseAESIntrinsics, false);
799 }
800
801 // --AES-CTR begins--
802 if (!UseAESIntrinsics) {
803 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
804 warning("AES-CTR intrinsics require UseAESIntrinsics flag to be enabled. Intrinsics will be disabled.");
805 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
806 }
807 } else {
808 if (supports_sse4_1()) {
809 if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
810 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true);
811 }
812 } else {
813 // The AES-CTR intrinsic stubs require AES instruction support (of course)
814 // but also require sse4.1 mode or higher for instructions it use.
815 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
816 warning("X86 AES-CTR intrinsics require SSE4.1 instructions or higher. Intrinsics will be disabled.");
817 }
818 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
819 }
820 }
821 // --AES-CTR ends--
822 }
823 } else if (UseAES || UseAESIntrinsics || UseAESCTRIntrinsics) {
824 if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
825 warning("AES instructions are not available on this CPU");
826 FLAG_SET_DEFAULT(UseAES, false);
827 }
828 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
829 warning("AES intrinsics are not available on this CPU");
830 FLAG_SET_DEFAULT(UseAESIntrinsics, false);
831 }
832 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
833 warning("AES-CTR intrinsics are not available on this CPU");
834 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
835 }
836 }
837
838 // Use CLMUL instructions if available.
839 if (supports_clmul()) {
840 if (FLAG_IS_DEFAULT(UseCLMUL)) {
841 UseCLMUL = true;
842 }
843 } else if (UseCLMUL) {
844 if (!FLAG_IS_DEFAULT(UseCLMUL))
845 warning("CLMUL instructions not available on this CPU (AVX may also be required)");
846 FLAG_SET_DEFAULT(UseCLMUL, false);
847 }
848
849 if (UseCLMUL && (UseSSE > 2)) {
850 if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
851 UseCRC32Intrinsics = true;
852 }
853 } else if (UseCRC32Intrinsics) {
854 if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics))
855 warning("CRC32 Intrinsics requires CLMUL instructions (not available on this CPU)");
856 FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
857 }
858
859 if (supports_sse4_2() && supports_clmul()) {
860 if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
861 UseCRC32CIntrinsics = true;
862 }
863 } else if (UseCRC32CIntrinsics) {
864 if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
865 warning("CRC32C intrinsics are not available on this CPU");
866 }
867 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
868 }
869
870 // GHASH/GCM intrinsics
871 if (UseCLMUL && (UseSSE > 2)) {
872 if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {
873 UseGHASHIntrinsics = true;
874 }
875 } else if (UseGHASHIntrinsics) {
876 if (!FLAG_IS_DEFAULT(UseGHASHIntrinsics))
877 warning("GHASH intrinsic requires CLMUL and SSE2 instructions on this CPU");
878 FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
879 }
880
881 // Base64 Intrinsics (Check the condition for which the intrinsic will be active)
882 if ((UseAVX > 2) && supports_avx512vl() && supports_avx512bw()) {
883 if (FLAG_IS_DEFAULT(UseBASE64Intrinsics)) {
884 UseBASE64Intrinsics = true;
885 }
886 } else if (UseBASE64Intrinsics) {
887 if (!FLAG_IS_DEFAULT(UseBASE64Intrinsics))
888 warning("Base64 intrinsic requires EVEX instructions on this CPU");
889 FLAG_SET_DEFAULT(UseBASE64Intrinsics, false);
890 }
891
892 if (supports_fma() && UseSSE >= 2) { // Check UseSSE since FMA code uses SSE instructions
893 if (FLAG_IS_DEFAULT(UseFMA)) {
894 UseFMA = true;
895 }
896 } else if (UseFMA) {
897 warning("FMA instructions are not available on this CPU");
898 FLAG_SET_DEFAULT(UseFMA, false);
899 }
900
901 if (supports_sha() LP64_ONLY(|| supports_avx2() && supports_bmi2())) {
902 if (FLAG_IS_DEFAULT(UseSHA)) {
903 UseSHA = true;
904 }
905 } else if (UseSHA) {
906 warning("SHA instructions are not available on this CPU");
907 FLAG_SET_DEFAULT(UseSHA, false);
908 }
909
910 if (supports_sha() && supports_sse4_1() && UseSHA) {
911 if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) {
912 FLAG_SET_DEFAULT(UseSHA1Intrinsics, true);
913 }
914 } else if (UseSHA1Intrinsics) {
915 warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
916 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
917 }
918
919 if (supports_sse4_1() && UseSHA) {
920 if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
921 FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
922 }
923 } else if (UseSHA256Intrinsics) {
924 warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
925 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
926 }
927
928 #ifdef _LP64
929 // These are only supported on 64-bit
930 if (UseSHA && supports_avx2() && supports_bmi2()) {
931 if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) {
932 FLAG_SET_DEFAULT(UseSHA512Intrinsics, true);
933 }
934 } else
935 #endif
936 if (UseSHA512Intrinsics) {
937 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
938 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
939 }
940
941 if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
942 FLAG_SET_DEFAULT(UseSHA, false);
943 }
944
945 if (UseAdler32Intrinsics) {
946 warning("Adler32Intrinsics not available on this CPU.");
947 FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
948 }
949
950 if (!supports_rtm() && UseRTMLocking) {
951 // Can't continue because UseRTMLocking affects UseBiasedLocking flag
952 // setting during arguments processing. See use_biased_locking().
953 // VM_Version_init() is executed after UseBiasedLocking is used
954 // in Thread::allocate().
955 vm_exit_during_initialization("RTM instructions are not available on this CPU");
956 }
957
958 #if INCLUDE_RTM_OPT
959 if (UseRTMLocking) {
960 if (is_client_compilation_mode_vm()) {
961 // Only C2 does RTM locking optimization.
962 // Can't continue because UseRTMLocking affects UseBiasedLocking flag
963 // setting during arguments processing. See use_biased_locking().
964 vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
965 }
966 if (is_intel_family_core()) {
967 if ((_model == CPU_MODEL_HASWELL_E3) ||
968 (_model == CPU_MODEL_HASWELL_E7 && _stepping < 3) ||
969 (_model == CPU_MODEL_BROADWELL && _stepping < 4)) {
970 // currently a collision between SKL and HSW_E3
971 if (!UnlockExperimentalVMOptions && UseAVX < 3) {
972 vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this "
973 "platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag.");
974 } else {
975 warning("UseRTMLocking is only available as experimental option on this platform.");
976 }
977 }
978 }
979 if (!FLAG_IS_CMDLINE(UseRTMLocking)) {
980 // RTM locking should be used only for applications with
981 // high lock contention. For now we do not use it by default.
982 vm_exit_during_initialization("UseRTMLocking flag should be only set on command line");
983 }
984 } else { // !UseRTMLocking
985 if (UseRTMForStackLocks) {
986 if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) {
987 warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off");
988 }
989 FLAG_SET_DEFAULT(UseRTMForStackLocks, false);
990 }
991 if (UseRTMDeopt) {
992 FLAG_SET_DEFAULT(UseRTMDeopt, false);
993 }
994 if (PrintPreciseRTMLockingStatistics) {
995 FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false);
996 }
997 }
998 #else
999 if (UseRTMLocking) {
1000 // Only C2 does RTM locking optimization.
1001 // Can't continue because UseRTMLocking affects UseBiasedLocking flag
1002 // setting during arguments processing. See use_biased_locking().
1003 vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
1004 }
1005 #endif
1006
1007 #ifdef COMPILER2
1008 if (UseFPUForSpilling) {
1009 if (UseSSE < 2) {
1010 // Only supported with SSE2+
1011 FLAG_SET_DEFAULT(UseFPUForSpilling, false);
1012 }
1013 }
1014 #endif
1015
1016 #if COMPILER2_OR_JVMCI
1017 int max_vector_size = 0;
1018 if (UseSSE < 2) {
1019 // Vectors (in XMM) are only supported with SSE2+
1020 // SSE is always 2 on x64.
1021 max_vector_size = 0;
1022 } else if (UseAVX == 0 || !os_supports_avx_vectors()) {
1023 // 16 byte vectors (in XMM) are supported with SSE2+
1024 max_vector_size = 16;
1025 } else if (UseAVX == 1 || UseAVX == 2) {
1026 // 32 bytes vectors (in YMM) are only supported with AVX+
1027 max_vector_size = 32;
1028 } else if (UseAVX > 2) {
1029 // 64 bytes vectors (in ZMM) are only supported with AVX 3
1030 max_vector_size = 64;
1031 }
1032
1033 #ifdef _LP64
1034 int min_vector_size = 4; // We require MaxVectorSize to be at least 4 on 64bit
1035 #else
1036 int min_vector_size = 0;
1037 #endif
1038
1039 if (!FLAG_IS_DEFAULT(MaxVectorSize)) {
1040 if (MaxVectorSize < min_vector_size) {
1041 warning("MaxVectorSize must be at least %i on this platform", min_vector_size);
1042 FLAG_SET_DEFAULT(MaxVectorSize, min_vector_size);
1043 }
1044 if (MaxVectorSize > max_vector_size) {
1045 warning("MaxVectorSize must be at most %i on this platform", max_vector_size);
1046 FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size);
1047 }
1048 if (!is_power_of_2(MaxVectorSize)) {
1049 warning("MaxVectorSize must be a power of 2, setting to default: %i", max_vector_size);
1050 FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size);
1051 }
1052 } else {
1053 // If default, use highest supported configuration
1054 FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size);
1055 }
1056
1057 #if defined(COMPILER2) && defined(ASSERT)
1058 if (MaxVectorSize > 0) {
1059 if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) {
1060 tty->print_cr("State of YMM registers after signal handle:");
1061 int nreg = 2 LP64_ONLY(+2);
1062 const char* ymm_name[4] = {"0", "7", "8", "15"};
1063 for (int i = 0; i < nreg; i++) {
1064 tty->print("YMM%s:", ymm_name[i]);
1065 for (int j = 7; j >=0; j--) {
1066 tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]);
1067 }
1068 tty->cr();
1069 }
1070 }
1071 }
1072 #endif // COMPILER2 && ASSERT
1073
1074 if (!FLAG_IS_DEFAULT(AVX3Threshold)) {
1075 if (!is_power_of_2(AVX3Threshold)) {
1076 warning("AVX3Threshold must be a power of 2");
1077 FLAG_SET_DEFAULT(AVX3Threshold, 4096);
1078 }
1079 }
1080
1081 #ifdef _LP64
1082 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
1083 UseMultiplyToLenIntrinsic = true;
1084 }
1085 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
1086 UseSquareToLenIntrinsic = true;
1087 }
1088 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
1089 UseMulAddIntrinsic = true;
1090 }
1091 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
1092 UseMontgomeryMultiplyIntrinsic = true;
1093 }
1094 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
1095 UseMontgomerySquareIntrinsic = true;
1096 }
1097 #else
1098 if (UseMultiplyToLenIntrinsic) {
1099 if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
1100 warning("multiplyToLen intrinsic is not available in 32-bit VM");
1101 }
1102 FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false);
1103 }
1104 if (UseMontgomeryMultiplyIntrinsic) {
1105 if (!FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
1106 warning("montgomeryMultiply intrinsic is not available in 32-bit VM");
1107 }
1108 FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, false);
1109 }
1110 if (UseMontgomerySquareIntrinsic) {
1111 if (!FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
1112 warning("montgomerySquare intrinsic is not available in 32-bit VM");
1113 }
1114 FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, false);
1115 }
1116 if (UseSquareToLenIntrinsic) {
1117 if (!FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
1118 warning("squareToLen intrinsic is not available in 32-bit VM");
1119 }
1120 FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false);
1121 }
1122 if (UseMulAddIntrinsic) {
1123 if (!FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
1124 warning("mulAdd intrinsic is not available in 32-bit VM");
1125 }
1126 FLAG_SET_DEFAULT(UseMulAddIntrinsic, false);
1127 }
1128 #endif // _LP64
1129 #endif // COMPILER2_OR_JVMCI
1130
1131 // On new cpus instructions which update whole XMM register should be used
1132 // to prevent partial register stall due to dependencies on high half.
1133 //
1134 // UseXmmLoadAndClearUpper == true --> movsd(xmm, mem)
1135 // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem)
1136 // UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm).
1137 // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm).
1138
1139
1140 if (is_zx()) { // ZX cpus specific settings
1141 if (FLAG_IS_DEFAULT(UseStoreImmI16)) {
1142 UseStoreImmI16 = false; // don't use it on ZX cpus
1143 }
1144 if ((cpu_family() == 6) || (cpu_family() == 7)) {
1145 if (FLAG_IS_DEFAULT(UseAddressNop)) {
1146 // Use it on all ZX cpus
1147 UseAddressNop = true;
1148 }
1149 }
1150 if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) {
1151 UseXmmLoadAndClearUpper = true; // use movsd on all ZX cpus
1152 }
1153 if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) {
1154 if (supports_sse3()) {
1155 UseXmmRegToRegMoveAll = true; // use movaps, movapd on new ZX cpus
1156 } else {
1157 UseXmmRegToRegMoveAll = false;
1158 }
1159 }
1160 if (((cpu_family() == 6) || (cpu_family() == 7)) && supports_sse3()) { // new ZX cpus
1161 #ifdef COMPILER2
1162 if (FLAG_IS_DEFAULT(MaxLoopPad)) {
1163 // For new ZX cpus do the next optimization:
1164 // don't align the beginning of a loop if there are enough instructions
1165 // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp)
1166 // in current fetch line (OptoLoopAlignment) or the padding
1167 // is big (> MaxLoopPad).
1168 // Set MaxLoopPad to 11 for new ZX cpus to reduce number of
1169 // generated NOP instructions. 11 is the largest size of one
1170 // address NOP instruction '0F 1F' (see Assembler::nop(i)).
1171 MaxLoopPad = 11;
1172 }
1173 #endif // COMPILER2
1174 if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
1175 UseXMMForArrayCopy = true; // use SSE2 movq on new ZX cpus
1176 }
1177 if (supports_sse4_2()) { // new ZX cpus
1178 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1179 UseUnalignedLoadStores = true; // use movdqu on newest ZX cpus
1180 }
1181 }
1182 if (supports_sse4_2()) {
1183 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
1184 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true);
1185 }
1186 } else {
1187 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
1188 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled.");
1189 }
1190 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false);
1191 }
1192 }
1193
1194 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) {
1195 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
1196 }
1197 }
1198
1199 if (is_amd_family()) { // AMD cpus specific settings
1200 if (supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop)) {
1201 // Use it on new AMD cpus starting from Opteron.
1202 UseAddressNop = true;
1203 }
1204 if (supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift)) {
1205 // Use it on new AMD cpus starting from Opteron.
1206 UseNewLongLShift = true;
1207 }
1208 if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) {
1209 if (supports_sse4a()) {
1210 UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron
1211 } else {
1212 UseXmmLoadAndClearUpper = false;
1213 }
1214 }
1215 if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) {
1216 if (supports_sse4a()) {
1217 UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h'
1218 } else {
1219 UseXmmRegToRegMoveAll = false;
1220 }
1221 }
1222 if (FLAG_IS_DEFAULT(UseXmmI2F)) {
1223 if (supports_sse4a()) {
1224 UseXmmI2F = true;
1225 } else {
1226 UseXmmI2F = false;
1227 }
1228 }
1229 if (FLAG_IS_DEFAULT(UseXmmI2D)) {
1230 if (supports_sse4a()) {
1231 UseXmmI2D = true;
1232 } else {
1233 UseXmmI2D = false;
1234 }
1235 }
1236 if (supports_sse4_2()) {
1237 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
1238 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true);
1239 }
1240 } else {
1241 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
1242 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled.");
1243 }
1244 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false);
1245 }
1246
1247 // some defaults for AMD family 15h
1248 if (cpu_family() == 0x15) {
1249 // On family 15h processors default is no sw prefetch
1250 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
1251 FLAG_SET_DEFAULT(AllocatePrefetchStyle, 0);
1252 }
1253 // Also, if some other prefetch style is specified, default instruction type is PREFETCHW
1254 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
1255 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
1256 }
1257 // On family 15h processors use XMM and UnalignedLoadStores for Array Copy
1258 if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
1259 FLAG_SET_DEFAULT(UseXMMForArrayCopy, true);
1260 }
1261 if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1262 FLAG_SET_DEFAULT(UseUnalignedLoadStores, true);
1263 }
1264 }
1265
1266 #ifdef COMPILER2
1267 if (cpu_family() < 0x17 && MaxVectorSize > 16) {
1268 // Limit vectors size to 16 bytes on AMD cpus < 17h.
1269 FLAG_SET_DEFAULT(MaxVectorSize, 16);
1270 }
1271 #endif // COMPILER2
1272
1273 // Some defaults for AMD family 17h || Hygon family 18h
1274 if (cpu_family() == 0x17 || cpu_family() == 0x18) {
1275 // On family 17h processors use XMM and UnalignedLoadStores for Array Copy
1276 if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
1277 FLAG_SET_DEFAULT(UseXMMForArrayCopy, true);
1278 }
1279 if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1280 FLAG_SET_DEFAULT(UseUnalignedLoadStores, true);
1281 }
1282 #ifdef COMPILER2
1283 if (supports_sse4_2() && FLAG_IS_DEFAULT(UseFPUForSpilling)) {
1284 FLAG_SET_DEFAULT(UseFPUForSpilling, true);
1285 }
1286 #endif
1287 }
1288 }
1289
1290 if (is_intel()) { // Intel cpus specific settings
1291 if (FLAG_IS_DEFAULT(UseStoreImmI16)) {
1292 UseStoreImmI16 = false; // don't use it on Intel cpus
1293 }
1294 if (cpu_family() == 6 || cpu_family() == 15) {
1295 if (FLAG_IS_DEFAULT(UseAddressNop)) {
1296 // Use it on all Intel cpus starting from PentiumPro
1297 UseAddressNop = true;
1298 }
1299 }
1300 if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) {
1301 UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus
1302 }
1303 if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) {
1304 if (supports_sse3()) {
1305 UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus
1306 } else {
1307 UseXmmRegToRegMoveAll = false;
1308 }
1309 }
1310 if (cpu_family() == 6 && supports_sse3()) { // New Intel cpus
1311 #ifdef COMPILER2
1312 if (FLAG_IS_DEFAULT(MaxLoopPad)) {
1313 // For new Intel cpus do the next optimization:
1314 // don't align the beginning of a loop if there are enough instructions
1315 // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp)
1316 // in current fetch line (OptoLoopAlignment) or the padding
1317 // is big (> MaxLoopPad).
1318 // Set MaxLoopPad to 11 for new Intel cpus to reduce number of
1319 // generated NOP instructions. 11 is the largest size of one
1320 // address NOP instruction '0F 1F' (see Assembler::nop(i)).
1321 MaxLoopPad = 11;
1322 }
1323 #endif // COMPILER2
1324 if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
1325 UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus
1326 }
1327 if ((supports_sse4_2() && supports_ht()) || supports_avx()) { // Newest Intel cpus
1328 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1329 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
1330 }
1331 }
1332 if (supports_sse4_2()) {
1333 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
1334 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true);
1335 }
1336 } else {
1337 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
1338 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled.");
1339 }
1340 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false);
1341 }
1342 }
1343 if (is_atom_family() || is_knights_family()) {
1344 #ifdef COMPILER2
1345 if (FLAG_IS_DEFAULT(OptoScheduling)) {
1346 OptoScheduling = true;
1347 }
1348 #endif
1349 if (supports_sse4_2()) { // Silvermont
1350 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1351 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
1352 }
1353 }
1354 if (FLAG_IS_DEFAULT(UseIncDec)) {
1355 FLAG_SET_DEFAULT(UseIncDec, false);
1356 }
1357 }
1358 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) {
1359 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
1360 }
1361 }
1362
1363 #ifdef _LP64
1364 if (UseSSE42Intrinsics) {
1365 if (FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) {
1366 UseVectorizedMismatchIntrinsic = true;
1367 }
1368 } else if (UseVectorizedMismatchIntrinsic) {
1369 if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic))
1370 warning("vectorizedMismatch intrinsics are not available on this CPU");
1371 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
1372 }
1373 #else
1374 if (UseVectorizedMismatchIntrinsic) {
1375 if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) {
1376 warning("vectorizedMismatch intrinsic is not available in 32-bit VM");
1377 }
1378 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
1379 }
1380 #endif // _LP64
1381
1382 // Use count leading zeros count instruction if available.
1383 if (supports_lzcnt()) {
1384 if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) {
1385 UseCountLeadingZerosInstruction = true;
1386 }
1387 } else if (UseCountLeadingZerosInstruction) {
1388 warning("lzcnt instruction is not available on this CPU");
1389 FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false);
1390 }
1391
1392 // Use count trailing zeros instruction if available
1393 if (supports_bmi1()) {
1394 // tzcnt does not require VEX prefix
1395 if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) {
1396 if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) {
1397 // Don't use tzcnt if BMI1 is switched off on command line.
1398 UseCountTrailingZerosInstruction = false;
1399 } else {
1400 UseCountTrailingZerosInstruction = true;
1401 }
1402 }
1403 } else if (UseCountTrailingZerosInstruction) {
1404 warning("tzcnt instruction is not available on this CPU");
1405 FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false);
1406 }
1407
1408 // BMI instructions (except tzcnt) use an encoding with VEX prefix.
1409 // VEX prefix is generated only when AVX > 0.
1410 if (supports_bmi1() && supports_avx()) {
1411 if (FLAG_IS_DEFAULT(UseBMI1Instructions)) {
1412 UseBMI1Instructions = true;
1413 }
1414 } else if (UseBMI1Instructions) {
1415 warning("BMI1 instructions are not available on this CPU (AVX is also required)");
1416 FLAG_SET_DEFAULT(UseBMI1Instructions, false);
1417 }
1418
1419 if (supports_bmi2() && supports_avx()) {
1420 if (FLAG_IS_DEFAULT(UseBMI2Instructions)) {
1421 UseBMI2Instructions = true;
1422 }
1423 } else if (UseBMI2Instructions) {
1424 warning("BMI2 instructions are not available on this CPU (AVX is also required)");
1425 FLAG_SET_DEFAULT(UseBMI2Instructions, false);
1426 }
1427
1428 // Use population count instruction if available.
1429 if (supports_popcnt()) {
1430 if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
1431 UsePopCountInstruction = true;
1432 }
1433 } else if (UsePopCountInstruction) {
1434 warning("POPCNT instruction is not available on this CPU");
1435 FLAG_SET_DEFAULT(UsePopCountInstruction, false);
1436 }
1437
1438 // Use fast-string operations if available.
1439 if (supports_erms()) {
1440 if (FLAG_IS_DEFAULT(UseFastStosb)) {
1441 UseFastStosb = true;
1442 }
1443 } else if (UseFastStosb) {
1444 warning("fast-string operations are not available on this CPU");
1445 FLAG_SET_DEFAULT(UseFastStosb, false);
1446 }
1447
1448 // Use XMM/YMM MOVDQU instruction for Object Initialization
1449 if (!UseFastStosb && UseSSE >= 2 && UseUnalignedLoadStores) {
1450 if (FLAG_IS_DEFAULT(UseXMMForObjInit)) {
1451 UseXMMForObjInit = true;
1452 }
1453 } else if (UseXMMForObjInit) {
1454 warning("UseXMMForObjInit requires SSE2 and unaligned load/stores. Feature is switched off.");
1455 FLAG_SET_DEFAULT(UseXMMForObjInit, false);
1456 }
1457
1458 #ifdef COMPILER2
1459 if (FLAG_IS_DEFAULT(AlignVector)) {
1460 // Modern processors allow misaligned memory operations for vectors.
1461 AlignVector = !UseUnalignedLoadStores;
1462 }
1463 #endif // COMPILER2
1464
1465 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
1466 if (AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch()) {
1467 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 0);
1468 } else if (!supports_sse() && supports_3dnow_prefetch()) {
1469 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
1470 }
1471 }
1472
1473 // Allocation prefetch settings
1474 intx cache_line_size = prefetch_data_size();
1475 if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize) &&
1476 (cache_line_size > AllocatePrefetchStepSize)) {
1477 FLAG_SET_DEFAULT(AllocatePrefetchStepSize, cache_line_size);
1478 }
1479
1480 if ((AllocatePrefetchDistance == 0) && (AllocatePrefetchStyle != 0)) {
1481 assert(!FLAG_IS_DEFAULT(AllocatePrefetchDistance), "default value should not be 0");
1482 if (!FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
1483 warning("AllocatePrefetchDistance is set to 0 which disable prefetching. Ignoring AllocatePrefetchStyle flag.");
1484 }
1485 FLAG_SET_DEFAULT(AllocatePrefetchStyle, 0);
1486 }
1487
1488 if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
1489 bool use_watermark_prefetch = (AllocatePrefetchStyle == 2);
1490 FLAG_SET_DEFAULT(AllocatePrefetchDistance, allocate_prefetch_distance(use_watermark_prefetch));
1491 }
1492
1493 if (is_intel() && cpu_family() == 6 && supports_sse3()) {
1494 if (FLAG_IS_DEFAULT(AllocatePrefetchLines) &&
1495 supports_sse4_2() && supports_ht()) { // Nehalem based cpus
1496 FLAG_SET_DEFAULT(AllocatePrefetchLines, 4);
1497 }
1498 #ifdef COMPILER2
1499 if (FLAG_IS_DEFAULT(UseFPUForSpilling) && supports_sse4_2()) {
1500 FLAG_SET_DEFAULT(UseFPUForSpilling, true);
1501 }
1502 #endif
1503 }
1504
1505 if (is_zx() && ((cpu_family() == 6) || (cpu_family() == 7)) && supports_sse4_2()) {
1506 #ifdef COMPILER2
1507 if (FLAG_IS_DEFAULT(UseFPUForSpilling)) {
1508 FLAG_SET_DEFAULT(UseFPUForSpilling, true);
1509 }
1510 #endif
1511 }
1512
1513 #ifdef _LP64
1514 // Prefetch settings
1515
1516 // Prefetch interval for gc copy/scan == 9 dcache lines. Derived from
1517 // 50-warehouse specjbb runs on a 2-way 1.8ghz opteron using a 4gb heap.
1518 // Tested intervals from 128 to 2048 in increments of 64 == one cache line.
1519 // 256 bytes (4 dcache lines) was the nearest runner-up to 576.
1520
1521 // gc copy/scan is disabled if prefetchw isn't supported, because
1522 // Prefetch::write emits an inlined prefetchw on Linux.
1523 // Do not use the 3dnow prefetchw instruction. It isn't supported on em64t.
1524 // The used prefetcht0 instruction works for both amd64 and em64t.
1525
1526 if (FLAG_IS_DEFAULT(PrefetchCopyIntervalInBytes)) {
1527 FLAG_SET_DEFAULT(PrefetchCopyIntervalInBytes, 576);
1528 }
1529 if (FLAG_IS_DEFAULT(PrefetchScanIntervalInBytes)) {
1530 FLAG_SET_DEFAULT(PrefetchScanIntervalInBytes, 576);
1531 }
1532 if (FLAG_IS_DEFAULT(PrefetchFieldsAhead)) {
1533 FLAG_SET_DEFAULT(PrefetchFieldsAhead, 1);
1534 }
1535 #endif
1536
1537 if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
1538 (cache_line_size > ContendedPaddingWidth))
1539 ContendedPaddingWidth = cache_line_size;
1540
1541 // This machine allows unaligned memory accesses
1542 if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
1543 FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
1544 }
1545
1546 #ifndef PRODUCT
1547 if (log_is_enabled(Info, os, cpu)) {
1548 LogStream ls(Log(os, cpu)::info());
1549 outputStream* log = &ls;
1550 log->print_cr("Logical CPUs per core: %u",
1551 logical_processors_per_package());
1552 log->print_cr("L1 data cache line size: %u", L1_data_cache_line_size());
1553 log->print("UseSSE=%d", (int) UseSSE);
1554 if (UseAVX > 0) {
1555 log->print(" UseAVX=%d", (int) UseAVX);
1556 }
1557 if (UseAES) {
1558 log->print(" UseAES=1");
1559 }
1560 #ifdef COMPILER2
1561 if (MaxVectorSize > 0) {
1562 log->print(" MaxVectorSize=%d", (int) MaxVectorSize);
1563 }
1564 #endif
1565 log->cr();
1566 log->print("Allocation");
1567 if (AllocatePrefetchStyle <= 0 || (UseSSE == 0 && !supports_3dnow_prefetch())) {
1568 log->print_cr(": no prefetching");
1569 } else {
1570 log->print(" prefetching: ");
1571 if (UseSSE == 0 && supports_3dnow_prefetch()) {
1572 log->print("PREFETCHW");
1573 } else if (UseSSE >= 1) {
1574 if (AllocatePrefetchInstr == 0) {
1575 log->print("PREFETCHNTA");
1576 } else if (AllocatePrefetchInstr == 1) {
1577 log->print("PREFETCHT0");
1578 } else if (AllocatePrefetchInstr == 2) {
1579 log->print("PREFETCHT2");
1580 } else if (AllocatePrefetchInstr == 3) {
1581 log->print("PREFETCHW");
1582 }
1583 }
1584 if (AllocatePrefetchLines > 1) {
1585 log->print_cr(" at distance %d, %d lines of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchLines, (int) AllocatePrefetchStepSize);
1586 } else {
1587 log->print_cr(" at distance %d, one line of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchStepSize);
1588 }
1589 }
1590
1591 if (PrefetchCopyIntervalInBytes > 0) {
1592 log->print_cr("PrefetchCopyIntervalInBytes %d", (int) PrefetchCopyIntervalInBytes);
1593 }
1594 if (PrefetchScanIntervalInBytes > 0) {
1595 log->print_cr("PrefetchScanIntervalInBytes %d", (int) PrefetchScanIntervalInBytes);
1596 }
1597 if (PrefetchFieldsAhead > 0) {
1598 log->print_cr("PrefetchFieldsAhead %d", (int) PrefetchFieldsAhead);
1599 }
1600 if (ContendedPaddingWidth > 0) {
1601 log->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth);
1602 }
1603 }
1604 #endif // !PRODUCT
1605 }
1606
1607 void VM_Version::print_platform_virtualization_info(outputStream* st) {
1608 VirtualizationType vrt = VM_Version::get_detected_virtualization();
1609 if (vrt == XenHVM) {
1610 st->print_cr("Xen hardware-assisted virtualization detected");
1611 } else if (vrt == KVM) {
1612 st->print_cr("KVM virtualization detected");
1613 } else if (vrt == VMWare) {
1614 st->print_cr("VMWare virtualization detected");
1615 VirtualizationSupport::print_virtualization_info(st);
1616 } else if (vrt == HyperV) {
1617 st->print_cr("HyperV virtualization detected");
1618 }
1619 }
1620
1621 void VM_Version::check_virt_cpuid(uint32_t idx, uint32_t *regs) {
1622 // TODO support 32 bit
1623 #if defined(_LP64)
1624 #if defined(_MSC_VER)
1625 // Allocate space for the code
1626 const int code_size = 100;
1627 ResourceMark rm;
1628 CodeBuffer cb("detect_virt", code_size, 0);
1629 MacroAssembler* a = new MacroAssembler(&cb);
1630 address code = a->pc();
1631 void (*test)(uint32_t idx, uint32_t *regs) = (void(*)(uint32_t idx, uint32_t *regs))code;
1632
1633 a->movq(r9, rbx); // save nonvolatile register
1634
1635 // next line would not work on 32-bit
1636 a->movq(rax, c_rarg0 /* rcx */);
1637 a->movq(r8, c_rarg1 /* rdx */);
1638 a->cpuid();
1639 a->movl(Address(r8, 0), rax);
1640 a->movl(Address(r8, 4), rbx);
1641 a->movl(Address(r8, 8), rcx);
1642 a->movl(Address(r8, 12), rdx);
1643
1644 a->movq(rbx, r9); // restore nonvolatile register
1645 a->ret(0);
1646
1647 uint32_t *code_end = (uint32_t *)a->pc();
1648 a->flush();
1649
1650 // execute code
1651 (*test)(idx, regs);
1652 #elif defined(__GNUC__)
1653 __asm__ volatile (
1654 " cpuid;"
1655 " mov %%eax,(%1);"
1656 " mov %%ebx,4(%1);"
1657 " mov %%ecx,8(%1);"
1658 " mov %%edx,12(%1);"
1659 : "+a" (idx)
1660 : "S" (regs)
1661 : "ebx", "ecx", "edx", "memory" );
1662 #endif
1663 #endif
1664 }
1665
1666
1667 bool VM_Version::use_biased_locking() {
1668 #if INCLUDE_RTM_OPT
1669 // RTM locking is most useful when there is high lock contention and
1670 // low data contention. With high lock contention the lock is usually
1671 // inflated and biased locking is not suitable for that case.
1672 // RTM locking code requires that biased locking is off.
1673 // Note: we can't switch off UseBiasedLocking in get_processor_features()
1674 // because it is used by Thread::allocate() which is called before
1675 // VM_Version::initialize().
1676 if (UseRTMLocking && UseBiasedLocking) {
1677 if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
1678 FLAG_SET_DEFAULT(UseBiasedLocking, false);
1679 } else {
1680 warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." );
1681 UseBiasedLocking = false;
1682 }
1683 }
1684 #endif
1685 return UseBiasedLocking;
1686 }
1687
1688 // On Xen, the cpuid instruction returns
1689 // eax / registers[0]: Version of Xen
1690 // ebx / registers[1]: chars 'XenV'
1691 // ecx / registers[2]: chars 'MMXe'
1692 // edx / registers[3]: chars 'nVMM'
1693 //
1694 // On KVM / VMWare / MS Hyper-V, the cpuid instruction returns
1695 // ebx / registers[1]: chars 'KVMK' / 'VMwa' / 'Micr'
1696 // ecx / registers[2]: chars 'VMKV' / 'reVM' / 'osof'
1697 // edx / registers[3]: chars 'M' / 'ware' / 't Hv'
1698 //
1699 // more information :
1700 // https://kb.vmware.com/s/article/1009458
1701 //
1702 void VM_Version::check_virtualizations() {
1703 #if defined(_LP64)
1704 uint32_t registers[4];
1705 char signature[13];
1706 uint32_t base;
1707 signature[12] = '\0';
1708 memset((void*)registers, 0, 4*sizeof(uint32_t));
1709
1710 for (base = 0x40000000; base < 0x40010000; base += 0x100) {
1711 check_virt_cpuid(base, registers);
1712
1713 *(uint32_t *)(signature + 0) = registers[1];
1714 *(uint32_t *)(signature + 4) = registers[2];
1715 *(uint32_t *)(signature + 8) = registers[3];
1716
1717 if (strncmp("VMwareVMware", signature, 12) == 0) {
1718 Abstract_VM_Version::_detected_virtualization = VMWare;
1719 // check for extended metrics from guestlib
1720 VirtualizationSupport::initialize();
1721 }
1722
1723 if (strncmp("Microsoft Hv", signature, 12) == 0) {
1724 Abstract_VM_Version::_detected_virtualization = HyperV;
1725 }
1726
1727 if (strncmp("KVMKVMKVM", signature, 9) == 0) {
1728 Abstract_VM_Version::_detected_virtualization = KVM;
1729 }
1730
1731 if (strncmp("XenVMMXenVMM", signature, 12) == 0) {
1732 Abstract_VM_Version::_detected_virtualization = XenHVM;
1733 }
1734 }
1735 #endif
1736 }
1737
1738 void VM_Version::initialize() {
1739 ResourceMark rm;
1740 // Making this stub must be FIRST use of assembler
1741
1742 stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size);
1743 if (stub_blob == NULL) {
1744 vm_exit_during_initialization("Unable to allocate get_cpu_info_stub");
1745 }
1746 CodeBuffer c(stub_blob);
1747 VM_Version_StubGenerator g(&c);
1748 get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t,
1749 g.generate_get_cpu_info());
1750
1751 get_processor_features();
1752 if (cpu_family() > 4) { // it supports CPUID
1753 check_virtualizations();
1754 }
1755 }
--- EOF ---