rev 47400 : [mq]: cmpxchg_ptr
1 /*
2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/assembler.inline.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "compiler/disassembler.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "prims/jvm.h"
32 #include "runtime/java.hpp"
33 #include "runtime/os.hpp"
34 #include "runtime/stubCodeGenerator.hpp"
35 #include "utilities/align.hpp"
36 #include "utilities/defaultStream.hpp"
37 #include "utilities/globalDefinitions.hpp"
38 #include "vm_version_ppc.hpp"
39
40 # include <sys/sysinfo.h>
41
42 bool VM_Version::_is_determine_features_test_running = false;
43 uint64_t VM_Version::_dscr_val = 0;
44
45 #define MSG(flag) \
46 if (flag && !FLAG_IS_DEFAULT(flag)) \
47 jio_fprintf(defaultStream::error_stream(), \
48 "warning: -XX:+" #flag " requires -XX:+UseSIGTRAP\n" \
49 " -XX:+" #flag " will be disabled!\n");
50
51 void VM_Version::initialize() {
52
53 // Test which instructions are supported and measure cache line size.
54 determine_features();
55
56 // If PowerArchitecturePPC64 hasn't been specified explicitly determine from features.
57 if (FLAG_IS_DEFAULT(PowerArchitecturePPC64)) {
58 if (VM_Version::has_lqarx()) {
59 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 8);
60 } else if (VM_Version::has_popcntw()) {
61 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 7);
62 } else if (VM_Version::has_cmpb()) {
63 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 6);
64 } else if (VM_Version::has_popcntb()) {
65 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 5);
66 } else {
67 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 0);
68 }
69 }
70
71 bool PowerArchitecturePPC64_ok = false;
72 switch (PowerArchitecturePPC64) {
73 case 8: if (!VM_Version::has_lqarx() ) break;
74 case 7: if (!VM_Version::has_popcntw()) break;
75 case 6: if (!VM_Version::has_cmpb() ) break;
76 case 5: if (!VM_Version::has_popcntb()) break;
77 case 0: PowerArchitecturePPC64_ok = true; break;
78 default: break;
79 }
80 guarantee(PowerArchitecturePPC64_ok, "PowerArchitecturePPC64 cannot be set to "
81 UINTX_FORMAT " on this machine", PowerArchitecturePPC64);
82
83 // Power 8: Configure Data Stream Control Register.
84 if (PowerArchitecturePPC64 >= 8 && has_mfdscr()) {
85 config_dscr();
86 }
87
88 if (!UseSIGTRAP) {
89 MSG(TrapBasedICMissChecks);
90 MSG(TrapBasedNotEntrantChecks);
91 MSG(TrapBasedNullChecks);
92 FLAG_SET_ERGO(bool, TrapBasedNotEntrantChecks, false);
93 FLAG_SET_ERGO(bool, TrapBasedNullChecks, false);
94 FLAG_SET_ERGO(bool, TrapBasedICMissChecks, false);
95 }
96
97 #ifdef COMPILER2
98 if (!UseSIGTRAP) {
99 MSG(TrapBasedRangeChecks);
100 FLAG_SET_ERGO(bool, TrapBasedRangeChecks, false);
101 }
102
103 // On Power6 test for section size.
104 if (PowerArchitecturePPC64 == 6) {
105 determine_section_size();
106 // TODO: PPC port } else {
107 // TODO: PPC port PdScheduling::power6SectorSize = 0x20;
108 }
109
110 if (PowerArchitecturePPC64 >= 8) {
111 if (FLAG_IS_DEFAULT(SuperwordUseVSX)) {
112 FLAG_SET_ERGO(bool, SuperwordUseVSX, true);
113 }
114 } else {
115 if (SuperwordUseVSX) {
116 warning("SuperwordUseVSX specified, but needs at least Power8.");
117 FLAG_SET_DEFAULT(SuperwordUseVSX, false);
118 }
119 }
120 MaxVectorSize = SuperwordUseVSX ? 16 : 8;
121 #endif
122
123 // Create and print feature-string.
124 char buf[(num_features+1) * 16]; // Max 16 chars per feature.
125 jio_snprintf(buf, sizeof(buf),
126 "ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
127 (has_fsqrt() ? " fsqrt" : ""),
128 (has_isel() ? " isel" : ""),
129 (has_lxarxeh() ? " lxarxeh" : ""),
130 (has_cmpb() ? " cmpb" : ""),
131 //(has_mftgpr()? " mftgpr" : ""),
132 (has_popcntb() ? " popcntb" : ""),
133 (has_popcntw() ? " popcntw" : ""),
134 (has_fcfids() ? " fcfids" : ""),
135 (has_vand() ? " vand" : ""),
136 (has_lqarx() ? " lqarx" : ""),
137 (has_vcipher() ? " aes" : ""),
138 (has_vpmsumb() ? " vpmsumb" : ""),
139 (has_tcheck() ? " tcheck" : ""),
140 (has_mfdscr() ? " mfdscr" : ""),
141 (has_vsx() ? " vsx" : ""),
142 (has_ldbrx() ? " ldbrx" : ""),
143 (has_stdbrx() ? " stdbrx" : ""),
144 (has_vshasig() ? " sha" : "")
145 // Make sure number of %s matches num_features!
146 );
147 _features_string = os::strdup(buf);
148 if (Verbose) {
149 print_features();
150 }
151
152 // PPC64 supports 8-byte compare-exchange operations (see Atomic::cmpxchg)
153 // and 'atomic long memory ops' (see Unsafe_GetLongVolatile).
154 _supports_cx8 = true;
155
156 // Used by C1.
157 _supports_atomic_getset4 = true;
158 _supports_atomic_getadd4 = true;
159 _supports_atomic_getset8 = true;
160 _supports_atomic_getadd8 = true;
161
162 UseSSE = 0; // Only on x86 and x64
163
164 intx cache_line_size = L1_data_cache_line_size();
165
166 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) AllocatePrefetchStyle = 1;
167
168 if (AllocatePrefetchStyle == 4) {
169 AllocatePrefetchStepSize = cache_line_size; // Need exact value.
170 if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) AllocatePrefetchLines = 12; // Use larger blocks by default.
171 if (AllocatePrefetchDistance < 0) AllocatePrefetchDistance = 2*cache_line_size; // Default is not defined?
172 } else {
173 if (cache_line_size > AllocatePrefetchStepSize) AllocatePrefetchStepSize = cache_line_size;
174 if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) AllocatePrefetchLines = 3; // Optimistic value.
175 if (AllocatePrefetchDistance < 0) AllocatePrefetchDistance = 3*cache_line_size; // Default is not defined?
176 }
177
178 assert(AllocatePrefetchLines > 0, "invalid value");
179 if (AllocatePrefetchLines < 1) { // Set valid value in product VM.
180 AllocatePrefetchLines = 1; // Conservative value.
181 }
182
183 if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size) {
184 AllocatePrefetchStyle = 1; // Fall back if inappropriate.
185 }
186
187 assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
188
189 // If defined(VM_LITTLE_ENDIAN) and running on Power8 or newer hardware,
190 // the implementation uses the vector instructions available with Power8.
191 // In all other cases, the implementation uses only generally available instructions.
192 if (!UseCRC32Intrinsics) {
193 if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
194 FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
195 }
196 }
197
198 // Implementation does not use any of the vector instructions available with Power8.
199 // Their exploitation is still pending (aka "work in progress").
200 if (!UseCRC32CIntrinsics) {
201 if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
202 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true);
203 }
204 }
205
206 // TODO: Provide implementation.
207 if (UseAdler32Intrinsics) {
208 warning("Adler32Intrinsics not available on this CPU.");
209 FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
210 }
211
212 // The AES intrinsic stubs require AES instruction support.
213 if (has_vcipher()) {
214 if (FLAG_IS_DEFAULT(UseAES)) {
215 UseAES = true;
216 }
217 } else if (UseAES) {
218 if (!FLAG_IS_DEFAULT(UseAES))
219 warning("AES instructions are not available on this CPU");
220 FLAG_SET_DEFAULT(UseAES, false);
221 }
222
223 if (UseAES && has_vcipher()) {
224 if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
225 UseAESIntrinsics = true;
226 }
227 } else if (UseAESIntrinsics) {
228 if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
229 warning("AES intrinsics are not available on this CPU");
230 FLAG_SET_DEFAULT(UseAESIntrinsics, false);
231 }
232
233 if (UseAESCTRIntrinsics) {
234 warning("AES/CTR intrinsics are not available on this CPU");
235 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
236 }
237
238 if (UseGHASHIntrinsics) {
239 warning("GHASH intrinsics are not available on this CPU");
240 FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
241 }
242
243 if (FLAG_IS_DEFAULT(UseFMA)) {
244 FLAG_SET_DEFAULT(UseFMA, true);
245 }
246
247 if (has_vshasig()) {
248 if (FLAG_IS_DEFAULT(UseSHA)) {
249 UseSHA = true;
250 }
251 } else if (UseSHA) {
252 if (!FLAG_IS_DEFAULT(UseSHA))
253 warning("SHA instructions are not available on this CPU");
254 FLAG_SET_DEFAULT(UseSHA, false);
255 }
256
257 if (UseSHA1Intrinsics) {
258 warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
259 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
260 }
261
262 if (UseSHA && has_vshasig()) {
263 if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
264 FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
265 }
266 } else if (UseSHA256Intrinsics) {
267 warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
268 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
269 }
270
271 if (UseSHA && has_vshasig()) {
272 if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) {
273 FLAG_SET_DEFAULT(UseSHA512Intrinsics, true);
274 }
275 } else if (UseSHA512Intrinsics) {
276 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
277 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
278 }
279
280 if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
281 FLAG_SET_DEFAULT(UseSHA, false);
282 }
283
284 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
285 UseSquareToLenIntrinsic = true;
286 }
287 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
288 UseMulAddIntrinsic = true;
289 }
290 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
291 UseMultiplyToLenIntrinsic = true;
292 }
293 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
294 UseMontgomeryMultiplyIntrinsic = true;
295 }
296 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
297 UseMontgomerySquareIntrinsic = true;
298 }
299
300 if (UseVectorizedMismatchIntrinsic) {
301 warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU.");
302 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
303 }
304
305
306 // Adjust RTM (Restricted Transactional Memory) flags.
307 if (UseRTMLocking) {
308 // If CPU or OS are too old:
309 // Can't continue because UseRTMLocking affects UseBiasedLocking flag
310 // setting during arguments processing. See use_biased_locking().
311 // VM_Version_init() is executed after UseBiasedLocking is used
312 // in Thread::allocate().
313 if (!has_tcheck()) {
314 vm_exit_during_initialization("RTM instructions are not available on this CPU");
315 }
316 bool os_too_old = true;
317 #ifdef AIX
318 // Actually, this is supported since AIX 7.1.. Unfortunately, this first
319 // contained bugs, so that it can only be enabled after AIX 7.1.3.30.
320 // The Java property os.version, which is used in RTM tests to decide
321 // whether the feature is available, only knows major and minor versions.
322 // We don't want to change this property, as user code might depend on it.
323 // So the tests can not check on subversion 3.30, and we only enable RTM
324 // with AIX 7.2.
325 if (os::Aix::os_version() >= 0x07020000) { // At least AIX 7.2.
326 os_too_old = false;
327 }
328 #endif
329 #ifdef LINUX
330 // At least Linux kernel 4.2, as the problematic behavior of syscalls
331 // being called in the middle of a transaction has been addressed.
332 // Please, refer to commit b4b56f9ecab40f3b4ef53e130c9f6663be491894
333 // in Linux kernel source tree: https://goo.gl/Kc5i7A
334 if (os::Linux::os_version_is_known()) {
335 if (os::Linux::os_version() >= 0x040200)
336 os_too_old = false;
337 } else {
338 vm_exit_during_initialization("RTM can not be enabled: kernel version is unknown.");
339 }
340 #endif
341 if (os_too_old) {
342 vm_exit_during_initialization("RTM is not supported on this OS version.");
343 }
344 }
345
346 if (UseRTMLocking) {
347 #if INCLUDE_RTM_OPT
348 if (!UnlockExperimentalVMOptions) {
349 vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. "
350 "It must be enabled via -XX:+UnlockExperimentalVMOptions flag.");
351 } else {
352 warning("UseRTMLocking is only available as experimental option on this platform.");
353 }
354 if (!FLAG_IS_CMDLINE(UseRTMLocking)) {
355 // RTM locking should be used only for applications with
356 // high lock contention. For now we do not use it by default.
357 vm_exit_during_initialization("UseRTMLocking flag should be only set on command line");
358 }
359 #else
360 // Only C2 does RTM locking optimization.
361 // Can't continue because UseRTMLocking affects UseBiasedLocking flag
362 // setting during arguments processing. See use_biased_locking().
363 vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
364 #endif
365 } else { // !UseRTMLocking
366 if (UseRTMForStackLocks) {
367 if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) {
368 warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off");
369 }
370 FLAG_SET_DEFAULT(UseRTMForStackLocks, false);
371 }
372 if (UseRTMDeopt) {
373 FLAG_SET_DEFAULT(UseRTMDeopt, false);
374 }
375 if (PrintPreciseRTMLockingStatistics) {
376 FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false);
377 }
378 }
379
380 // This machine allows unaligned memory accesses
381 if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
382 FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
383 }
384 }
385
386 bool VM_Version::use_biased_locking() {
387 #if INCLUDE_RTM_OPT
388 // RTM locking is most useful when there is high lock contention and
389 // low data contention. With high lock contention the lock is usually
390 // inflated and biased locking is not suitable for that case.
391 // RTM locking code requires that biased locking is off.
392 // Note: we can't switch off UseBiasedLocking in get_processor_features()
393 // because it is used by Thread::allocate() which is called before
394 // VM_Version::initialize().
395 if (UseRTMLocking && UseBiasedLocking) {
396 if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
397 FLAG_SET_DEFAULT(UseBiasedLocking, false);
398 } else {
399 warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." );
400 UseBiasedLocking = false;
401 }
402 }
403 #endif
404 return UseBiasedLocking;
405 }
406
407 void VM_Version::print_features() {
408 tty->print_cr("Version: %s L1_data_cache_line_size=%d", features_string(), L1_data_cache_line_size());
409 }
410
411 #ifdef COMPILER2
412 // Determine section size on power6: If section size is 8 instructions,
413 // there should be a difference between the two testloops of ~15 %. If
414 // no difference is detected the section is assumed to be 32 instructions.
415 void VM_Version::determine_section_size() {
416
417 int unroll = 80;
418
419 const int code_size = (2* unroll * 32 + 100)*BytesPerInstWord;
420
421 // Allocate space for the code.
422 ResourceMark rm;
423 CodeBuffer cb("detect_section_size", code_size, 0);
424 MacroAssembler* a = new MacroAssembler(&cb);
425
426 uint32_t *code = (uint32_t *)a->pc();
427 // Emit code.
428 void (*test1)() = (void(*)())(void *)a->function_entry();
429
430 Label l1;
431
432 a->li(R4, 1);
433 a->sldi(R4, R4, 28);
434 a->b(l1);
435 a->align(CodeEntryAlignment);
436
437 a->bind(l1);
438
439 for (int i = 0; i < unroll; i++) {
440 // Schleife 1
441 // ------- sector 0 ------------
442 // ;; 0
443 a->nop(); // 1
444 a->fpnop0(); // 2
445 a->fpnop1(); // 3
446 a->addi(R4,R4, -1); // 4
447
448 // ;; 1
449 a->nop(); // 5
450 a->fmr(F6, F6); // 6
451 a->fmr(F7, F7); // 7
452 a->endgroup(); // 8
453 // ------- sector 8 ------------
454
455 // ;; 2
456 a->nop(); // 9
457 a->nop(); // 10
458 a->fmr(F8, F8); // 11
459 a->fmr(F9, F9); // 12
460
461 // ;; 3
462 a->nop(); // 13
463 a->fmr(F10, F10); // 14
464 a->fmr(F11, F11); // 15
465 a->endgroup(); // 16
466 // -------- sector 16 -------------
467
468 // ;; 4
469 a->nop(); // 17
470 a->nop(); // 18
471 a->fmr(F15, F15); // 19
472 a->fmr(F16, F16); // 20
473
474 // ;; 5
475 a->nop(); // 21
476 a->fmr(F17, F17); // 22
477 a->fmr(F18, F18); // 23
478 a->endgroup(); // 24
479 // ------- sector 24 ------------
480
481 // ;; 6
482 a->nop(); // 25
483 a->nop(); // 26
484 a->fmr(F19, F19); // 27
485 a->fmr(F20, F20); // 28
486
487 // ;; 7
488 a->nop(); // 29
489 a->fmr(F21, F21); // 30
490 a->fmr(F22, F22); // 31
491 a->brnop0(); // 32
492
493 // ------- sector 32 ------------
494 }
495
496 // ;; 8
497 a->cmpdi(CCR0, R4, unroll); // 33
498 a->bge(CCR0, l1); // 34
499 a->blr();
500
501 // Emit code.
502 void (*test2)() = (void(*)())(void *)a->function_entry();
503 // uint32_t *code = (uint32_t *)a->pc();
504
505 Label l2;
506
507 a->li(R4, 1);
508 a->sldi(R4, R4, 28);
509 a->b(l2);
510 a->align(CodeEntryAlignment);
511
512 a->bind(l2);
513
514 for (int i = 0; i < unroll; i++) {
515 // Schleife 2
516 // ------- sector 0 ------------
517 // ;; 0
518 a->brnop0(); // 1
519 a->nop(); // 2
520 //a->cmpdi(CCR0, R4, unroll);
521 a->fpnop0(); // 3
522 a->fpnop1(); // 4
523 a->addi(R4,R4, -1); // 5
524
525 // ;; 1
526
527 a->nop(); // 6
528 a->fmr(F6, F6); // 7
529 a->fmr(F7, F7); // 8
530 // ------- sector 8 ---------------
531
532 // ;; 2
533 a->endgroup(); // 9
534
535 // ;; 3
536 a->nop(); // 10
537 a->nop(); // 11
538 a->fmr(F8, F8); // 12
539
540 // ;; 4
541 a->fmr(F9, F9); // 13
542 a->nop(); // 14
543 a->fmr(F10, F10); // 15
544
545 // ;; 5
546 a->fmr(F11, F11); // 16
547 // -------- sector 16 -------------
548
549 // ;; 6
550 a->endgroup(); // 17
551
552 // ;; 7
553 a->nop(); // 18
554 a->nop(); // 19
555 a->fmr(F15, F15); // 20
556
557 // ;; 8
558 a->fmr(F16, F16); // 21
559 a->nop(); // 22
560 a->fmr(F17, F17); // 23
561
562 // ;; 9
563 a->fmr(F18, F18); // 24
564 // -------- sector 24 -------------
565
566 // ;; 10
567 a->endgroup(); // 25
568
569 // ;; 11
570 a->nop(); // 26
571 a->nop(); // 27
572 a->fmr(F19, F19); // 28
573
574 // ;; 12
575 a->fmr(F20, F20); // 29
576 a->nop(); // 30
577 a->fmr(F21, F21); // 31
578
579 // ;; 13
580 a->fmr(F22, F22); // 32
581 }
582
583 // -------- sector 32 -------------
584 // ;; 14
585 a->cmpdi(CCR0, R4, unroll); // 33
586 a->bge(CCR0, l2); // 34
587
588 a->blr();
589 uint32_t *code_end = (uint32_t *)a->pc();
590 a->flush();
591
592 double loop1_seconds,loop2_seconds, rel_diff;
593 uint64_t start1, stop1;
594
595 start1 = os::current_thread_cpu_time(false);
596 (*test1)();
597 stop1 = os::current_thread_cpu_time(false);
598 loop1_seconds = (stop1- start1) / (1000 *1000 *1000.0);
599
600
601 start1 = os::current_thread_cpu_time(false);
602 (*test2)();
603 stop1 = os::current_thread_cpu_time(false);
604
605 loop2_seconds = (stop1 - start1) / (1000 *1000 *1000.0);
606
607 rel_diff = (loop2_seconds - loop1_seconds) / loop1_seconds *100;
608
609 if (PrintAssembly) {
610 ttyLocker ttyl;
611 tty->print_cr("Decoding section size detection stub at " INTPTR_FORMAT " before execution:", p2i(code));
612 Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
613 tty->print_cr("Time loop1 :%f", loop1_seconds);
614 tty->print_cr("Time loop2 :%f", loop2_seconds);
615 tty->print_cr("(time2 - time1) / time1 = %f %%", rel_diff);
616
617 if (rel_diff > 12.0) {
618 tty->print_cr("Section Size 8 Instructions");
619 } else{
620 tty->print_cr("Section Size 32 Instructions or Power5");
621 }
622 }
623
624 #if 0 // TODO: PPC port
625 // Set sector size (if not set explicitly).
626 if (FLAG_IS_DEFAULT(Power6SectorSize128PPC64)) {
627 if (rel_diff > 12.0) {
628 PdScheduling::power6SectorSize = 0x20;
629 } else {
630 PdScheduling::power6SectorSize = 0x80;
631 }
632 } else if (Power6SectorSize128PPC64) {
633 PdScheduling::power6SectorSize = 0x80;
634 } else {
635 PdScheduling::power6SectorSize = 0x20;
636 }
637 #endif
638 if (UsePower6SchedulerPPC64) Unimplemented();
639 }
640 #endif // COMPILER2
641
642 void VM_Version::determine_features() {
643 #if defined(ABI_ELFv2)
644 // 1 InstWord per call for the blr instruction.
645 const int code_size = (num_features+1+2*1)*BytesPerInstWord;
646 #else
647 // 7 InstWords for each call (function descriptor + blr instruction).
648 const int code_size = (num_features+1+2*7)*BytesPerInstWord;
649 #endif
650 int features = 0;
651
652 // create test area
653 enum { BUFFER_SIZE = 2*4*K }; // Needs to be >=2* max cache line size (cache line size can't exceed min page size).
654 char test_area[BUFFER_SIZE];
655 char *mid_of_test_area = &test_area[BUFFER_SIZE>>1];
656
657 // Allocate space for the code.
658 ResourceMark rm;
659 CodeBuffer cb("detect_cpu_features", code_size, 0);
660 MacroAssembler* a = new MacroAssembler(&cb);
661
662 // Must be set to true so we can generate the test code.
663 _features = VM_Version::all_features_m;
664
665 // Emit code.
666 void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->function_entry();
667 uint32_t *code = (uint32_t *)a->pc();
668 // Don't use R0 in ldarx.
669 // Keep R3_ARG1 unmodified, it contains &field (see below).
670 // Keep R4_ARG2 unmodified, it contains offset = 0 (see below).
671 a->fsqrt(F3, F4); // code[0] -> fsqrt_m
672 a->fsqrts(F3, F4); // code[1] -> fsqrts_m
673 a->isel(R7, R5, R6, 0); // code[2] -> isel_m
674 a->ldarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[3] -> lxarx_m
675 a->cmpb(R7, R5, R6); // code[4] -> cmpb
676 a->popcntb(R7, R5); // code[5] -> popcntb
677 a->popcntw(R7, R5); // code[6] -> popcntw
678 a->fcfids(F3, F4); // code[7] -> fcfids
679 a->vand(VR0, VR0, VR0); // code[8] -> vand
680 // arg0 of lqarx must be an even register, (arg1 + arg2) must be a multiple of 16
681 a->lqarx_unchecked(R6, R3_ARG1, R4_ARG2, 1); // code[9] -> lqarx_m
682 a->vcipher(VR0, VR1, VR2); // code[10] -> vcipher
683 a->vpmsumb(VR0, VR1, VR2); // code[11] -> vpmsumb
684 a->tcheck(0); // code[12] -> tcheck
685 a->mfdscr(R0); // code[13] -> mfdscr
686 a->lxvd2x(VSR0, R3_ARG1); // code[14] -> vsx
687 a->ldbrx(R7, R3_ARG1, R4_ARG2); // code[15] -> ldbrx
688 a->stdbrx(R7, R3_ARG1, R4_ARG2); // code[16] -> stdbrx
689 a->vshasigmaw(VR0, VR1, 1, 0xF); // code[17] -> vshasig
690 a->blr();
691
692 // Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
693 void (*zero_cacheline_func_ptr)(char*) = (void(*)(char*))(void *)a->function_entry();
694 a->dcbz(R3_ARG1); // R3_ARG1 = addr
695 a->blr();
696
697 uint32_t *code_end = (uint32_t *)a->pc();
698 a->flush();
699 _features = VM_Version::unknown_m;
700
701 // Print the detection code.
702 if (PrintAssembly) {
703 ttyLocker ttyl;
704 tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " before execution:", p2i(code));
705 Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
706 }
707
708 // Measure cache line size.
709 memset(test_area, 0xFF, BUFFER_SIZE); // Fill test area with 0xFF.
710 (*zero_cacheline_func_ptr)(mid_of_test_area); // Call function which executes dcbz to the middle.
711 int count = 0; // count zeroed bytes
712 for (int i = 0; i < BUFFER_SIZE; i++) if (test_area[i] == 0) count++;
713 guarantee(is_power_of_2(count), "cache line size needs to be a power of 2");
714 _L1_data_cache_line_size = count;
715
716 // Execute code. Illegal instructions will be replaced by 0 in the signal handler.
717 VM_Version::_is_determine_features_test_running = true;
718 // We must align the first argument to 16 bytes because of the lqarx check.
719 (*test)(align_up((address)mid_of_test_area, 16), 0);
720 VM_Version::_is_determine_features_test_running = false;
721
722 // determine which instructions are legal.
723 int feature_cntr = 0;
724 if (code[feature_cntr++]) features |= fsqrt_m;
725 if (code[feature_cntr++]) features |= fsqrts_m;
726 if (code[feature_cntr++]) features |= isel_m;
727 if (code[feature_cntr++]) features |= lxarxeh_m;
728 if (code[feature_cntr++]) features |= cmpb_m;
729 if (code[feature_cntr++]) features |= popcntb_m;
730 if (code[feature_cntr++]) features |= popcntw_m;
731 if (code[feature_cntr++]) features |= fcfids_m;
732 if (code[feature_cntr++]) features |= vand_m;
733 if (code[feature_cntr++]) features |= lqarx_m;
734 if (code[feature_cntr++]) features |= vcipher_m;
735 if (code[feature_cntr++]) features |= vpmsumb_m;
736 if (code[feature_cntr++]) features |= tcheck_m;
737 if (code[feature_cntr++]) features |= mfdscr_m;
738 if (code[feature_cntr++]) features |= vsx_m;
739 if (code[feature_cntr++]) features |= ldbrx_m;
740 if (code[feature_cntr++]) features |= stdbrx_m;
741 if (code[feature_cntr++]) features |= vshasig_m;
742
743 // Print the detection code.
744 if (PrintAssembly) {
745 ttyLocker ttyl;
746 tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " after execution:", p2i(code));
747 Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
748 }
749
750 _features = features;
751 }
752
753 // Power 8: Configure Data Stream Control Register.
754 void VM_Version::config_dscr() {
755 // 7 InstWords for each call (function descriptor + blr instruction).
756 const int code_size = (2+2*7)*BytesPerInstWord;
757
758 // Allocate space for the code.
759 ResourceMark rm;
760 CodeBuffer cb("config_dscr", code_size, 0);
761 MacroAssembler* a = new MacroAssembler(&cb);
762
763 // Emit code.
764 uint64_t (*get_dscr)() = (uint64_t(*)())(void *)a->function_entry();
765 uint32_t *code = (uint32_t *)a->pc();
766 a->mfdscr(R3);
767 a->blr();
768
769 void (*set_dscr)(long) = (void(*)(long))(void *)a->function_entry();
770 a->mtdscr(R3);
771 a->blr();
772
773 uint32_t *code_end = (uint32_t *)a->pc();
774 a->flush();
775
776 // Print the detection code.
777 if (PrintAssembly) {
778 ttyLocker ttyl;
779 tty->print_cr("Decoding dscr configuration stub at " INTPTR_FORMAT " before execution:", p2i(code));
780 Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
781 }
782
783 // Apply the configuration if needed.
784 _dscr_val = (*get_dscr)();
785 if (Verbose) {
786 tty->print_cr("dscr value was 0x%lx" , _dscr_val);
787 }
788 bool change_requested = false;
789 if (DSCR_PPC64 != (uintx)-1) {
790 _dscr_val = DSCR_PPC64;
791 change_requested = true;
792 }
793 if (DSCR_DPFD_PPC64 <= 7) {
794 uint64_t mask = 0x7;
795 if ((_dscr_val & mask) != DSCR_DPFD_PPC64) {
796 _dscr_val = (_dscr_val & ~mask) | (DSCR_DPFD_PPC64);
797 change_requested = true;
798 }
799 }
800 if (DSCR_URG_PPC64 <= 7) {
801 uint64_t mask = 0x7 << 6;
802 if ((_dscr_val & mask) != DSCR_DPFD_PPC64 << 6) {
803 _dscr_val = (_dscr_val & ~mask) | (DSCR_URG_PPC64 << 6);
804 change_requested = true;
805 }
806 }
807 if (change_requested) {
808 (*set_dscr)(_dscr_val);
809 if (Verbose) {
810 tty->print_cr("dscr was set to 0x%lx" , (*get_dscr)());
811 }
812 }
813 }
814
815 static uint64_t saved_features = 0;
816
817 void VM_Version::allow_all() {
818 saved_features = _features;
819 _features = all_features_m;
820 }
821
822 void VM_Version::revert() {
823 _features = saved_features;
824 }
--- EOF ---