1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2018, SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "jvm.h" 28 #include "asm/assembler.inline.hpp" 29 #include "asm/macroAssembler.inline.hpp" 30 #include "compiler/disassembler.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "runtime/java.hpp" 33 #include "runtime/os.hpp" 34 #include "runtime/stubCodeGenerator.hpp" 35 #include "utilities/align.hpp" 36 #include "utilities/defaultStream.hpp" 37 #include "utilities/globalDefinitions.hpp" 38 #include "vm_version_ppc.hpp" 39 40 #include <sys/sysinfo.h> 41 42 #if defined(LINUX) && defined(VM_LITTLE_ENDIAN) 43 #include <sys/auxv.h> 44 45 #ifndef PPC_FEATURE2_HTM_NOSC 46 #define PPC_FEATURE2_HTM_NOSC (1 << 24) 47 #endif 48 #endif 49 50 bool VM_Version::_is_determine_features_test_running = false; 51 uint64_t VM_Version::_dscr_val = 0; 52 53 #define MSG(flag) \ 54 if (flag && !FLAG_IS_DEFAULT(flag)) \ 55 jio_fprintf(defaultStream::error_stream(), \ 56 "warning: -XX:+" #flag " requires -XX:+UseSIGTRAP\n" \ 57 " -XX:+" #flag " will be disabled!\n"); 58 59 void VM_Version::initialize() { 60 61 // Test which instructions are supported and measure cache line size. 62 determine_features(); 63 64 // If PowerArchitecturePPC64 hasn't been specified explicitly determine from features. 65 if (FLAG_IS_DEFAULT(PowerArchitecturePPC64)) { 66 if (VM_Version::has_darn()) { 67 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 9); 68 } else if (VM_Version::has_lqarx()) { 69 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 8); 70 } else if (VM_Version::has_popcntw()) { 71 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 7); 72 } else if (VM_Version::has_cmpb()) { 73 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 6); 74 } else if (VM_Version::has_popcntb()) { 75 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 5); 76 } else { 77 FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 0); 78 } 79 } 80 81 bool PowerArchitecturePPC64_ok = false; 82 switch (PowerArchitecturePPC64) { 83 case 9: if (!VM_Version::has_darn() ) break; 84 case 8: if (!VM_Version::has_lqarx() ) break; 85 case 7: if (!VM_Version::has_popcntw()) break; 86 case 6: if (!VM_Version::has_cmpb() ) break; 87 case 5: if (!VM_Version::has_popcntb()) break; 88 case 0: PowerArchitecturePPC64_ok = true; break; 89 default: break; 90 } 91 guarantee(PowerArchitecturePPC64_ok, "PowerArchitecturePPC64 cannot be set to " 92 UINTX_FORMAT " on this machine", PowerArchitecturePPC64); 93 94 // Power 8: Configure Data Stream Control Register. 95 if (PowerArchitecturePPC64 >= 8 && has_mfdscr()) { 96 config_dscr(); 97 } 98 99 if (!UseSIGTRAP) { 100 MSG(TrapBasedICMissChecks); 101 MSG(TrapBasedNotEntrantChecks); 102 MSG(TrapBasedNullChecks); 103 FLAG_SET_ERGO(bool, TrapBasedNotEntrantChecks, false); 104 FLAG_SET_ERGO(bool, TrapBasedNullChecks, false); 105 FLAG_SET_ERGO(bool, TrapBasedICMissChecks, false); 106 } 107 108 #ifdef COMPILER2 109 if (!UseSIGTRAP) { 110 MSG(TrapBasedRangeChecks); 111 FLAG_SET_ERGO(bool, TrapBasedRangeChecks, false); 112 } 113 114 // On Power6 test for section size. 115 if (PowerArchitecturePPC64 == 6) { 116 determine_section_size(); 117 // TODO: PPC port } else { 118 // TODO: PPC port PdScheduling::power6SectorSize = 0x20; 119 } 120 121 if (PowerArchitecturePPC64 >= 8) { 122 if (FLAG_IS_DEFAULT(SuperwordUseVSX)) { 123 FLAG_SET_ERGO(bool, SuperwordUseVSX, true); 124 } 125 } else { 126 if (SuperwordUseVSX) { 127 warning("SuperwordUseVSX specified, but needs at least Power8."); 128 FLAG_SET_DEFAULT(SuperwordUseVSX, false); 129 } 130 } 131 MaxVectorSize = SuperwordUseVSX ? 16 : 8; 132 133 if (PowerArchitecturePPC64 >= 9) { 134 if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstructionsPPC64)) { 135 FLAG_SET_ERGO(bool, UseCountTrailingZerosInstructionsPPC64, true); 136 } 137 } else { 138 if (UseCountTrailingZerosInstructionsPPC64) { 139 warning("UseCountTrailingZerosInstructionsPPC64 specified, but needs at least Power9."); 140 FLAG_SET_DEFAULT(UseCountTrailingZerosInstructionsPPC64, false); 141 } 142 } 143 #endif 144 145 // Create and print feature-string. 146 char buf[(num_features+1) * 16]; // Max 16 chars per feature. 147 jio_snprintf(buf, sizeof(buf), 148 "ppc64%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 149 (has_fsqrt() ? " fsqrt" : ""), 150 (has_isel() ? " isel" : ""), 151 (has_lxarxeh() ? " lxarxeh" : ""), 152 (has_cmpb() ? " cmpb" : ""), 153 (has_popcntb() ? " popcntb" : ""), 154 (has_popcntw() ? " popcntw" : ""), 155 (has_fcfids() ? " fcfids" : ""), 156 (has_vand() ? " vand" : ""), 157 (has_lqarx() ? " lqarx" : ""), 158 (has_vcipher() ? " aes" : ""), 159 (has_vpmsumb() ? " vpmsumb" : ""), 160 (has_mfdscr() ? " mfdscr" : ""), 161 (has_vsx() ? " vsx" : ""), 162 (has_ldbrx() ? " ldbrx" : ""), 163 (has_stdbrx() ? " stdbrx" : ""), 164 (has_vshasig() ? " sha" : ""), 165 (has_tm() ? " rtm" : ""), 166 (has_darn() ? " darn" : "") 167 // Make sure number of %s matches num_features! 168 ); 169 _features_string = os::strdup(buf); 170 if (Verbose) { 171 print_features(); 172 } 173 174 // PPC64 supports 8-byte compare-exchange operations (see Atomic::cmpxchg) 175 // and 'atomic long memory ops' (see Unsafe_GetLongVolatile). 176 _supports_cx8 = true; 177 178 // Used by C1. 179 _supports_atomic_getset4 = true; 180 _supports_atomic_getadd4 = true; 181 _supports_atomic_getset8 = true; 182 _supports_atomic_getadd8 = true; 183 184 UseSSE = 0; // Only on x86 and x64 185 186 intx cache_line_size = L1_data_cache_line_size(); 187 188 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) AllocatePrefetchStyle = 1; 189 190 if (AllocatePrefetchStyle == 4) { 191 AllocatePrefetchStepSize = cache_line_size; // Need exact value. 192 if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) AllocatePrefetchLines = 12; // Use larger blocks by default. 193 if (AllocatePrefetchDistance < 0) AllocatePrefetchDistance = 2*cache_line_size; // Default is not defined? 194 } else { 195 if (cache_line_size > AllocatePrefetchStepSize) AllocatePrefetchStepSize = cache_line_size; 196 if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) AllocatePrefetchLines = 3; // Optimistic value. 197 if (AllocatePrefetchDistance < 0) AllocatePrefetchDistance = 3*cache_line_size; // Default is not defined? 198 } 199 200 assert(AllocatePrefetchLines > 0, "invalid value"); 201 if (AllocatePrefetchLines < 1) { // Set valid value in product VM. 202 AllocatePrefetchLines = 1; // Conservative value. 203 } 204 205 if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size) { 206 AllocatePrefetchStyle = 1; // Fall back if inappropriate. 207 } 208 209 assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive"); 210 211 // If running on Power8 or newer hardware, the implementation uses the available vector instructions. 212 // In all other cases, the implementation uses only generally available instructions. 213 if (!UseCRC32Intrinsics) { 214 if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { 215 FLAG_SET_DEFAULT(UseCRC32Intrinsics, true); 216 } 217 } 218 219 // Implementation does not use any of the vector instructions available with Power8. 220 // Their exploitation is still pending (aka "work in progress"). 221 if (!UseCRC32CIntrinsics) { 222 if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) { 223 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, true); 224 } 225 } 226 227 // TODO: Provide implementation. 228 if (UseAdler32Intrinsics) { 229 warning("Adler32Intrinsics not available on this CPU."); 230 FLAG_SET_DEFAULT(UseAdler32Intrinsics, false); 231 } 232 233 // The AES intrinsic stubs require AES instruction support. 234 if (has_vcipher()) { 235 if (FLAG_IS_DEFAULT(UseAES)) { 236 UseAES = true; 237 } 238 } else if (UseAES) { 239 if (!FLAG_IS_DEFAULT(UseAES)) 240 warning("AES instructions are not available on this CPU"); 241 FLAG_SET_DEFAULT(UseAES, false); 242 } 243 244 if (UseAES && has_vcipher()) { 245 if (FLAG_IS_DEFAULT(UseAESIntrinsics)) { 246 UseAESIntrinsics = true; 247 } 248 } else if (UseAESIntrinsics) { 249 if (!FLAG_IS_DEFAULT(UseAESIntrinsics)) 250 warning("AES intrinsics are not available on this CPU"); 251 FLAG_SET_DEFAULT(UseAESIntrinsics, false); 252 } 253 254 if (UseAESCTRIntrinsics) { 255 warning("AES/CTR intrinsics are not available on this CPU"); 256 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false); 257 } 258 259 if (UseGHASHIntrinsics) { 260 warning("GHASH intrinsics are not available on this CPU"); 261 FLAG_SET_DEFAULT(UseGHASHIntrinsics, false); 262 } 263 264 if (FLAG_IS_DEFAULT(UseFMA)) { 265 FLAG_SET_DEFAULT(UseFMA, true); 266 } 267 268 if (has_vshasig()) { 269 if (FLAG_IS_DEFAULT(UseSHA)) { 270 UseSHA = true; 271 } 272 } else if (UseSHA) { 273 if (!FLAG_IS_DEFAULT(UseSHA)) 274 warning("SHA instructions are not available on this CPU"); 275 FLAG_SET_DEFAULT(UseSHA, false); 276 } 277 278 if (UseSHA1Intrinsics) { 279 warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU."); 280 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); 281 } 282 283 if (UseSHA && has_vshasig()) { 284 if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) { 285 FLAG_SET_DEFAULT(UseSHA256Intrinsics, true); 286 } 287 } else if (UseSHA256Intrinsics) { 288 warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU."); 289 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); 290 } 291 292 if (UseSHA && has_vshasig()) { 293 if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) { 294 FLAG_SET_DEFAULT(UseSHA512Intrinsics, true); 295 } 296 } else if (UseSHA512Intrinsics) { 297 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU."); 298 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); 299 } 300 301 if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) { 302 FLAG_SET_DEFAULT(UseSHA, false); 303 } 304 305 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) { 306 UseSquareToLenIntrinsic = true; 307 } 308 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) { 309 UseMulAddIntrinsic = true; 310 } 311 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { 312 UseMultiplyToLenIntrinsic = true; 313 } 314 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { 315 UseMontgomeryMultiplyIntrinsic = true; 316 } 317 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { 318 UseMontgomerySquareIntrinsic = true; 319 } 320 321 if (UseVectorizedMismatchIntrinsic) { 322 warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU."); 323 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false); 324 } 325 326 327 // Adjust RTM (Restricted Transactional Memory) flags. 328 if (UseRTMLocking) { 329 // If CPU or OS do not support TM: 330 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 331 // setting during arguments processing. See use_biased_locking(). 332 // VM_Version_init() is executed after UseBiasedLocking is used 333 // in Thread::allocate(). 334 if (PowerArchitecturePPC64 < 8) { 335 vm_exit_during_initialization("RTM instructions are not available on this CPU."); 336 } 337 338 if (!has_tm()) { 339 vm_exit_during_initialization("RTM is not supported on this OS version."); 340 } 341 } 342 343 if (UseRTMLocking) { 344 #if INCLUDE_RTM_OPT 345 if (!FLAG_IS_CMDLINE(UseRTMLocking)) { 346 // RTM locking should be used only for applications with 347 // high lock contention. For now we do not use it by default. 348 vm_exit_during_initialization("UseRTMLocking flag should be only set on command line"); 349 } 350 #else 351 // Only C2 does RTM locking optimization. 352 // Can't continue because UseRTMLocking affects UseBiasedLocking flag 353 // setting during arguments processing. See use_biased_locking(). 354 vm_exit_during_initialization("RTM locking optimization is not supported in this VM"); 355 #endif 356 } else { // !UseRTMLocking 357 if (UseRTMForStackLocks) { 358 if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) { 359 warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off"); 360 } 361 FLAG_SET_DEFAULT(UseRTMForStackLocks, false); 362 } 363 if (UseRTMDeopt) { 364 FLAG_SET_DEFAULT(UseRTMDeopt, false); 365 } 366 if (PrintPreciseRTMLockingStatistics) { 367 FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false); 368 } 369 } 370 371 // This machine allows unaligned memory accesses 372 if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) { 373 FLAG_SET_DEFAULT(UseUnalignedAccesses, true); 374 } 375 } 376 377 bool VM_Version::use_biased_locking() { 378 #if INCLUDE_RTM_OPT 379 // RTM locking is most useful when there is high lock contention and 380 // low data contention. With high lock contention the lock is usually 381 // inflated and biased locking is not suitable for that case. 382 // RTM locking code requires that biased locking is off. 383 // Note: we can't switch off UseBiasedLocking in get_processor_features() 384 // because it is used by Thread::allocate() which is called before 385 // VM_Version::initialize(). 386 if (UseRTMLocking && UseBiasedLocking) { 387 if (FLAG_IS_DEFAULT(UseBiasedLocking)) { 388 FLAG_SET_DEFAULT(UseBiasedLocking, false); 389 } else { 390 warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." ); 391 UseBiasedLocking = false; 392 } 393 } 394 #endif 395 return UseBiasedLocking; 396 } 397 398 void VM_Version::print_features() { 399 tty->print_cr("Version: %s L1_data_cache_line_size=%d", features_string(), L1_data_cache_line_size()); 400 } 401 402 #ifdef COMPILER2 403 // Determine section size on power6: If section size is 8 instructions, 404 // there should be a difference between the two testloops of ~15 %. If 405 // no difference is detected the section is assumed to be 32 instructions. 406 void VM_Version::determine_section_size() { 407 408 int unroll = 80; 409 410 const int code_size = (2* unroll * 32 + 100)*BytesPerInstWord; 411 412 // Allocate space for the code. 413 ResourceMark rm; 414 CodeBuffer cb("detect_section_size", code_size, 0); 415 MacroAssembler* a = new MacroAssembler(&cb); 416 417 uint32_t *code = (uint32_t *)a->pc(); 418 // Emit code. 419 void (*test1)() = (void(*)())(void *)a->function_entry(); 420 421 Label l1; 422 423 a->li(R4, 1); 424 a->sldi(R4, R4, 28); 425 a->b(l1); 426 a->align(CodeEntryAlignment); 427 428 a->bind(l1); 429 430 for (int i = 0; i < unroll; i++) { 431 // Schleife 1 432 // ------- sector 0 ------------ 433 // ;; 0 434 a->nop(); // 1 435 a->fpnop0(); // 2 436 a->fpnop1(); // 3 437 a->addi(R4,R4, -1); // 4 438 439 // ;; 1 440 a->nop(); // 5 441 a->fmr(F6, F6); // 6 442 a->fmr(F7, F7); // 7 443 a->endgroup(); // 8 444 // ------- sector 8 ------------ 445 446 // ;; 2 447 a->nop(); // 9 448 a->nop(); // 10 449 a->fmr(F8, F8); // 11 450 a->fmr(F9, F9); // 12 451 452 // ;; 3 453 a->nop(); // 13 454 a->fmr(F10, F10); // 14 455 a->fmr(F11, F11); // 15 456 a->endgroup(); // 16 457 // -------- sector 16 ------------- 458 459 // ;; 4 460 a->nop(); // 17 461 a->nop(); // 18 462 a->fmr(F15, F15); // 19 463 a->fmr(F16, F16); // 20 464 465 // ;; 5 466 a->nop(); // 21 467 a->fmr(F17, F17); // 22 468 a->fmr(F18, F18); // 23 469 a->endgroup(); // 24 470 // ------- sector 24 ------------ 471 472 // ;; 6 473 a->nop(); // 25 474 a->nop(); // 26 475 a->fmr(F19, F19); // 27 476 a->fmr(F20, F20); // 28 477 478 // ;; 7 479 a->nop(); // 29 480 a->fmr(F21, F21); // 30 481 a->fmr(F22, F22); // 31 482 a->brnop0(); // 32 483 484 // ------- sector 32 ------------ 485 } 486 487 // ;; 8 488 a->cmpdi(CCR0, R4, unroll); // 33 489 a->bge(CCR0, l1); // 34 490 a->blr(); 491 492 // Emit code. 493 void (*test2)() = (void(*)())(void *)a->function_entry(); 494 // uint32_t *code = (uint32_t *)a->pc(); 495 496 Label l2; 497 498 a->li(R4, 1); 499 a->sldi(R4, R4, 28); 500 a->b(l2); 501 a->align(CodeEntryAlignment); 502 503 a->bind(l2); 504 505 for (int i = 0; i < unroll; i++) { 506 // Schleife 2 507 // ------- sector 0 ------------ 508 // ;; 0 509 a->brnop0(); // 1 510 a->nop(); // 2 511 //a->cmpdi(CCR0, R4, unroll); 512 a->fpnop0(); // 3 513 a->fpnop1(); // 4 514 a->addi(R4,R4, -1); // 5 515 516 // ;; 1 517 518 a->nop(); // 6 519 a->fmr(F6, F6); // 7 520 a->fmr(F7, F7); // 8 521 // ------- sector 8 --------------- 522 523 // ;; 2 524 a->endgroup(); // 9 525 526 // ;; 3 527 a->nop(); // 10 528 a->nop(); // 11 529 a->fmr(F8, F8); // 12 530 531 // ;; 4 532 a->fmr(F9, F9); // 13 533 a->nop(); // 14 534 a->fmr(F10, F10); // 15 535 536 // ;; 5 537 a->fmr(F11, F11); // 16 538 // -------- sector 16 ------------- 539 540 // ;; 6 541 a->endgroup(); // 17 542 543 // ;; 7 544 a->nop(); // 18 545 a->nop(); // 19 546 a->fmr(F15, F15); // 20 547 548 // ;; 8 549 a->fmr(F16, F16); // 21 550 a->nop(); // 22 551 a->fmr(F17, F17); // 23 552 553 // ;; 9 554 a->fmr(F18, F18); // 24 555 // -------- sector 24 ------------- 556 557 // ;; 10 558 a->endgroup(); // 25 559 560 // ;; 11 561 a->nop(); // 26 562 a->nop(); // 27 563 a->fmr(F19, F19); // 28 564 565 // ;; 12 566 a->fmr(F20, F20); // 29 567 a->nop(); // 30 568 a->fmr(F21, F21); // 31 569 570 // ;; 13 571 a->fmr(F22, F22); // 32 572 } 573 574 // -------- sector 32 ------------- 575 // ;; 14 576 a->cmpdi(CCR0, R4, unroll); // 33 577 a->bge(CCR0, l2); // 34 578 579 a->blr(); 580 uint32_t *code_end = (uint32_t *)a->pc(); 581 a->flush(); 582 583 double loop1_seconds,loop2_seconds, rel_diff; 584 uint64_t start1, stop1; 585 586 start1 = os::current_thread_cpu_time(false); 587 (*test1)(); 588 stop1 = os::current_thread_cpu_time(false); 589 loop1_seconds = (stop1- start1) / (1000 *1000 *1000.0); 590 591 592 start1 = os::current_thread_cpu_time(false); 593 (*test2)(); 594 stop1 = os::current_thread_cpu_time(false); 595 596 loop2_seconds = (stop1 - start1) / (1000 *1000 *1000.0); 597 598 rel_diff = (loop2_seconds - loop1_seconds) / loop1_seconds *100; 599 600 if (PrintAssembly) { 601 ttyLocker ttyl; 602 tty->print_cr("Decoding section size detection stub at " INTPTR_FORMAT " before execution:", p2i(code)); 603 Disassembler::decode((u_char*)code, (u_char*)code_end, tty); 604 tty->print_cr("Time loop1 :%f", loop1_seconds); 605 tty->print_cr("Time loop2 :%f", loop2_seconds); 606 tty->print_cr("(time2 - time1) / time1 = %f %%", rel_diff); 607 608 if (rel_diff > 12.0) { 609 tty->print_cr("Section Size 8 Instructions"); 610 } else{ 611 tty->print_cr("Section Size 32 Instructions or Power5"); 612 } 613 } 614 615 #if 0 // TODO: PPC port 616 // Set sector size (if not set explicitly). 617 if (FLAG_IS_DEFAULT(Power6SectorSize128PPC64)) { 618 if (rel_diff > 12.0) { 619 PdScheduling::power6SectorSize = 0x20; 620 } else { 621 PdScheduling::power6SectorSize = 0x80; 622 } 623 } else if (Power6SectorSize128PPC64) { 624 PdScheduling::power6SectorSize = 0x80; 625 } else { 626 PdScheduling::power6SectorSize = 0x20; 627 } 628 #endif 629 if (UsePower6SchedulerPPC64) Unimplemented(); 630 } 631 #endif // COMPILER2 632 633 void VM_Version::determine_features() { 634 #if defined(ABI_ELFv2) 635 // 1 InstWord per call for the blr instruction. 636 const int code_size = (num_features+1+2*1)*BytesPerInstWord; 637 #else 638 // 7 InstWords for each call (function descriptor + blr instruction). 639 const int code_size = (num_features+1+2*7)*BytesPerInstWord; 640 #endif 641 int features = 0; 642 643 // create test area 644 enum { BUFFER_SIZE = 2*4*K }; // Needs to be >=2* max cache line size (cache line size can't exceed min page size). 645 char test_area[BUFFER_SIZE]; 646 char *mid_of_test_area = &test_area[BUFFER_SIZE>>1]; 647 648 // Allocate space for the code. 649 ResourceMark rm; 650 CodeBuffer cb("detect_cpu_features", code_size, 0); 651 MacroAssembler* a = new MacroAssembler(&cb); 652 653 // Must be set to true so we can generate the test code. 654 _features = VM_Version::all_features_m; 655 656 // Emit code. 657 void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->function_entry(); 658 uint32_t *code = (uint32_t *)a->pc(); 659 // Don't use R0 in ldarx. 660 // Keep R3_ARG1 unmodified, it contains &field (see below). 661 // Keep R4_ARG2 unmodified, it contains offset = 0 (see below). 662 a->fsqrt(F3, F4); // code[0] -> fsqrt_m 663 a->fsqrts(F3, F4); // code[1] -> fsqrts_m 664 a->isel(R7, R5, R6, 0); // code[2] -> isel_m 665 a->ldarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[3] -> lxarx_m 666 a->cmpb(R7, R5, R6); // code[4] -> cmpb 667 a->popcntb(R7, R5); // code[5] -> popcntb 668 a->popcntw(R7, R5); // code[6] -> popcntw 669 a->fcfids(F3, F4); // code[7] -> fcfids 670 a->vand(VR0, VR0, VR0); // code[8] -> vand 671 // arg0 of lqarx must be an even register, (arg1 + arg2) must be a multiple of 16 672 a->lqarx_unchecked(R6, R3_ARG1, R4_ARG2, 1); // code[9] -> lqarx_m 673 a->vcipher(VR0, VR1, VR2); // code[10] -> vcipher 674 a->vpmsumb(VR0, VR1, VR2); // code[11] -> vpmsumb 675 a->mfdscr(R0); // code[12] -> mfdscr 676 a->lxvd2x(VSR0, R3_ARG1); // code[13] -> vsx 677 a->ldbrx(R7, R3_ARG1, R4_ARG2); // code[14] -> ldbrx 678 a->stdbrx(R7, R3_ARG1, R4_ARG2); // code[15] -> stdbrx 679 a->vshasigmaw(VR0, VR1, 1, 0xF); // code[16] -> vshasig 680 // rtm is determined by OS 681 a->darn(R7); // code[17] -> darn 682 a->blr(); 683 684 // Emit function to set one cache line to zero. Emit function descriptor and get pointer to it. 685 void (*zero_cacheline_func_ptr)(char*) = (void(*)(char*))(void *)a->function_entry(); 686 a->dcbz(R3_ARG1); // R3_ARG1 = addr 687 a->blr(); 688 689 uint32_t *code_end = (uint32_t *)a->pc(); 690 a->flush(); 691 _features = VM_Version::unknown_m; 692 693 // Print the detection code. 694 if (PrintAssembly) { 695 ttyLocker ttyl; 696 tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " before execution:", p2i(code)); 697 Disassembler::decode((u_char*)code, (u_char*)code_end, tty); 698 } 699 700 // Measure cache line size. 701 memset(test_area, 0xFF, BUFFER_SIZE); // Fill test area with 0xFF. 702 (*zero_cacheline_func_ptr)(mid_of_test_area); // Call function which executes dcbz to the middle. 703 int count = 0; // count zeroed bytes 704 for (int i = 0; i < BUFFER_SIZE; i++) if (test_area[i] == 0) count++; 705 guarantee(is_power_of_2(count), "cache line size needs to be a power of 2"); 706 _L1_data_cache_line_size = count; 707 708 // Execute code. Illegal instructions will be replaced by 0 in the signal handler. 709 VM_Version::_is_determine_features_test_running = true; 710 // We must align the first argument to 16 bytes because of the lqarx check. 711 (*test)(align_up((address)mid_of_test_area, 16), 0); 712 VM_Version::_is_determine_features_test_running = false; 713 714 // determine which instructions are legal. 715 int feature_cntr = 0; 716 if (code[feature_cntr++]) features |= fsqrt_m; 717 if (code[feature_cntr++]) features |= fsqrts_m; 718 if (code[feature_cntr++]) features |= isel_m; 719 if (code[feature_cntr++]) features |= lxarxeh_m; 720 if (code[feature_cntr++]) features |= cmpb_m; 721 if (code[feature_cntr++]) features |= popcntb_m; 722 if (code[feature_cntr++]) features |= popcntw_m; 723 if (code[feature_cntr++]) features |= fcfids_m; 724 if (code[feature_cntr++]) features |= vand_m; 725 if (code[feature_cntr++]) features |= lqarx_m; 726 if (code[feature_cntr++]) features |= vcipher_m; 727 if (code[feature_cntr++]) features |= vpmsumb_m; 728 if (code[feature_cntr++]) features |= mfdscr_m; 729 if (code[feature_cntr++]) features |= vsx_m; 730 if (code[feature_cntr++]) features |= ldbrx_m; 731 if (code[feature_cntr++]) features |= stdbrx_m; 732 if (code[feature_cntr++]) features |= vshasig_m; 733 // feature rtm_m is determined by OS 734 if (code[feature_cntr++]) features |= darn_m; 735 736 // Print the detection code. 737 if (PrintAssembly) { 738 ttyLocker ttyl; 739 tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " after execution:", p2i(code)); 740 Disassembler::decode((u_char*)code, (u_char*)code_end, tty); 741 } 742 743 _features = features; 744 745 #ifdef AIX 746 // To enable it on AIX it's necessary POWER8 or above and at least AIX 7.2. 747 // Actually, this is supported since AIX 7.1.. Unfortunately, this first 748 // contained bugs, so that it can only be enabled after AIX 7.1.3.30. 749 // The Java property os.version, which is used in RTM tests to decide 750 // whether the feature is available, only knows major and minor versions. 751 // We don't want to change this property, as user code might depend on it. 752 // So the tests can not check on subversion 3.30, and we only enable RTM 753 // with AIX 7.2. 754 if (has_lqarx()) { // POWER8 or above 755 if (os::Aix::os_version() >= 0x07020000) { // At least AIX 7.2. 756 _features |= rtm_m; 757 } 758 } 759 #endif 760 #if defined(LINUX) && defined(VM_LITTLE_ENDIAN) 761 unsigned long auxv = getauxval(AT_HWCAP2); 762 763 if (auxv & PPC_FEATURE2_HTM_NOSC) { 764 if (auxv & PPC_FEATURE2_HAS_HTM) { 765 // TM on POWER8 and POWER9 in compat mode (VM) is supported by the JVM. 766 // TM on POWER9 DD2.1 NV (baremetal) is not supported by the JVM (TM on 767 // POWER9 DD2.1 NV has a few issues that need a couple of firmware 768 // and kernel workarounds, so there is a new mode only supported 769 // on non-virtualized P9 machines called HTM with no Suspend Mode). 770 // TM on POWER9 D2.2+ NV is not supported at all by Linux. 771 _features |= rtm_m; 772 } 773 } 774 #endif 775 } 776 777 // Power 8: Configure Data Stream Control Register. 778 void VM_Version::config_dscr() { 779 // 7 InstWords for each call (function descriptor + blr instruction). 780 const int code_size = (2+2*7)*BytesPerInstWord; 781 782 // Allocate space for the code. 783 ResourceMark rm; 784 CodeBuffer cb("config_dscr", code_size, 0); 785 MacroAssembler* a = new MacroAssembler(&cb); 786 787 // Emit code. 788 uint64_t (*get_dscr)() = (uint64_t(*)())(void *)a->function_entry(); 789 uint32_t *code = (uint32_t *)a->pc(); 790 a->mfdscr(R3); 791 a->blr(); 792 793 void (*set_dscr)(long) = (void(*)(long))(void *)a->function_entry(); 794 a->mtdscr(R3); 795 a->blr(); 796 797 uint32_t *code_end = (uint32_t *)a->pc(); 798 a->flush(); 799 800 // Print the detection code. 801 if (PrintAssembly) { 802 ttyLocker ttyl; 803 tty->print_cr("Decoding dscr configuration stub at " INTPTR_FORMAT " before execution:", p2i(code)); 804 Disassembler::decode((u_char*)code, (u_char*)code_end, tty); 805 } 806 807 // Apply the configuration if needed. 808 _dscr_val = (*get_dscr)(); 809 if (Verbose) { 810 tty->print_cr("dscr value was 0x%lx" , _dscr_val); 811 } 812 bool change_requested = false; 813 if (DSCR_PPC64 != (uintx)-1) { 814 _dscr_val = DSCR_PPC64; 815 change_requested = true; 816 } 817 if (DSCR_DPFD_PPC64 <= 7) { 818 uint64_t mask = 0x7; 819 if ((_dscr_val & mask) != DSCR_DPFD_PPC64) { 820 _dscr_val = (_dscr_val & ~mask) | (DSCR_DPFD_PPC64); 821 change_requested = true; 822 } 823 } 824 if (DSCR_URG_PPC64 <= 7) { 825 uint64_t mask = 0x7 << 6; 826 if ((_dscr_val & mask) != DSCR_DPFD_PPC64 << 6) { 827 _dscr_val = (_dscr_val & ~mask) | (DSCR_URG_PPC64 << 6); 828 change_requested = true; 829 } 830 } 831 if (change_requested) { 832 (*set_dscr)(_dscr_val); 833 if (Verbose) { 834 tty->print_cr("dscr was set to 0x%lx" , (*get_dscr)()); 835 } 836 } 837 } 838 839 static uint64_t saved_features = 0; 840 841 void VM_Version::allow_all() { 842 saved_features = _features; 843 _features = all_features_m; 844 } 845 846 void VM_Version::revert() { 847 _features = saved_features; 848 }