1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2018, SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "compiler/disassembler.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/barrierSet.hpp" 31 #include "gc/shared/barrierSetAssembler.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "nativeInst_ppc.hpp" 35 #include "prims/methodHandles.hpp" 36 #include "runtime/biasedLocking.hpp" 37 #include "runtime/icache.hpp" 38 #include "runtime/interfaceSupport.inline.hpp" 39 #include "runtime/objectMonitor.hpp" 40 #include "runtime/os.hpp" 41 #include "runtime/safepoint.hpp" 42 #include "runtime/safepointMechanism.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "utilities/macros.hpp" 46 #ifdef COMPILER2 47 #include "opto/intrinsicnode.hpp" 48 #endif 49 50 #ifdef PRODUCT 51 #define BLOCK_COMMENT(str) // nothing 52 #else 53 #define BLOCK_COMMENT(str) block_comment(str) 54 #endif 55 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 56 57 #ifdef ASSERT 58 // On RISC, there's no benefit to verifying instruction boundaries. 59 bool AbstractAssembler::pd_check_instruction_mark() { return false; } 60 #endif 61 62 void MacroAssembler::ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop) { 63 assert(Assembler::is_simm(si31, 31) && si31 >= 0, "si31 out of range"); 64 if (Assembler::is_simm(si31, 16)) { 65 ld(d, si31, a); 66 if (emit_filler_nop) nop(); 67 } else { 68 const int hi = MacroAssembler::largeoffset_si16_si16_hi(si31); 69 const int lo = MacroAssembler::largeoffset_si16_si16_lo(si31); 70 addis(d, a, hi); 71 ld(d, lo, d); 72 } 73 } 74 75 void MacroAssembler::ld_largeoffset(Register d, int si31, Register a, int emit_filler_nop) { 76 assert_different_registers(d, a); 77 ld_largeoffset_unchecked(d, si31, a, emit_filler_nop); 78 } 79 80 void MacroAssembler::load_sized_value(Register dst, RegisterOrConstant offs, Register base, 81 size_t size_in_bytes, bool is_signed) { 82 switch (size_in_bytes) { 83 case 8: ld(dst, offs, base); break; 84 case 4: is_signed ? lwa(dst, offs, base) : lwz(dst, offs, base); break; 85 case 2: is_signed ? lha(dst, offs, base) : lhz(dst, offs, base); break; 86 case 1: lbz(dst, offs, base); if (is_signed) extsb(dst, dst); break; // lba doesn't exist :( 87 default: ShouldNotReachHere(); 88 } 89 } 90 91 void MacroAssembler::store_sized_value(Register dst, RegisterOrConstant offs, Register base, 92 size_t size_in_bytes) { 93 switch (size_in_bytes) { 94 case 8: std(dst, offs, base); break; 95 case 4: stw(dst, offs, base); break; 96 case 2: sth(dst, offs, base); break; 97 case 1: stb(dst, offs, base); break; 98 default: ShouldNotReachHere(); 99 } 100 } 101 102 void MacroAssembler::align(int modulus, int max, int rem) { 103 int padding = (rem + modulus - (offset() % modulus)) % modulus; 104 if (padding > max) return; 105 for (int c = (padding >> 2); c > 0; --c) { nop(); } 106 } 107 108 // Issue instructions that calculate given TOC from global TOC. 109 void MacroAssembler::calculate_address_from_global_toc(Register dst, address addr, bool hi16, bool lo16, 110 bool add_relocation, bool emit_dummy_addr) { 111 int offset = -1; 112 if (emit_dummy_addr) { 113 offset = -128; // dummy address 114 } else if (addr != (address)(intptr_t)-1) { 115 offset = MacroAssembler::offset_to_global_toc(addr); 116 } 117 118 if (hi16) { 119 addis(dst, R29_TOC, MacroAssembler::largeoffset_si16_si16_hi(offset)); 120 } 121 if (lo16) { 122 if (add_relocation) { 123 // Relocate at the addi to avoid confusion with a load from the method's TOC. 124 relocate(internal_word_Relocation::spec(addr)); 125 } 126 addi(dst, dst, MacroAssembler::largeoffset_si16_si16_lo(offset)); 127 } 128 } 129 130 address MacroAssembler::patch_calculate_address_from_global_toc_at(address a, address bound, address addr) { 131 const int offset = MacroAssembler::offset_to_global_toc(addr); 132 133 const address inst2_addr = a; 134 const int inst2 = *(int *)inst2_addr; 135 136 // The relocation points to the second instruction, the addi, 137 // and the addi reads and writes the same register dst. 138 const int dst = inv_rt_field(inst2); 139 assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst"); 140 141 // Now, find the preceding addis which writes to dst. 142 int inst1 = 0; 143 address inst1_addr = inst2_addr - BytesPerInstWord; 144 while (inst1_addr >= bound) { 145 inst1 = *(int *) inst1_addr; 146 if (is_addis(inst1) && inv_rt_field(inst1) == dst) { 147 // Stop, found the addis which writes dst. 148 break; 149 } 150 inst1_addr -= BytesPerInstWord; 151 } 152 153 assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC"); 154 set_imm((int *)inst1_addr, MacroAssembler::largeoffset_si16_si16_hi(offset)); 155 set_imm((int *)inst2_addr, MacroAssembler::largeoffset_si16_si16_lo(offset)); 156 return inst1_addr; 157 } 158 159 address MacroAssembler::get_address_of_calculate_address_from_global_toc_at(address a, address bound) { 160 const address inst2_addr = a; 161 const int inst2 = *(int *)inst2_addr; 162 163 // The relocation points to the second instruction, the addi, 164 // and the addi reads and writes the same register dst. 165 const int dst = inv_rt_field(inst2); 166 assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst"); 167 168 // Now, find the preceding addis which writes to dst. 169 int inst1 = 0; 170 address inst1_addr = inst2_addr - BytesPerInstWord; 171 while (inst1_addr >= bound) { 172 inst1 = *(int *) inst1_addr; 173 if (is_addis(inst1) && inv_rt_field(inst1) == dst) { 174 // stop, found the addis which writes dst 175 break; 176 } 177 inst1_addr -= BytesPerInstWord; 178 } 179 180 assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC"); 181 182 int offset = (get_imm(inst1_addr, 0) << 16) + get_imm(inst2_addr, 0); 183 // -1 is a special case 184 if (offset == -1) { 185 return (address)(intptr_t)-1; 186 } else { 187 return global_toc() + offset; 188 } 189 } 190 191 #ifdef _LP64 192 // Patch compressed oops or klass constants. 193 // Assembler sequence is 194 // 1) compressed oops: 195 // lis rx = const.hi 196 // ori rx = rx | const.lo 197 // 2) compressed klass: 198 // lis rx = const.hi 199 // clrldi rx = rx & 0xFFFFffff // clearMS32b, optional 200 // ori rx = rx | const.lo 201 // Clrldi will be passed by. 202 address MacroAssembler::patch_set_narrow_oop(address a, address bound, narrowOop data) { 203 assert(UseCompressedOops, "Should only patch compressed oops"); 204 205 const address inst2_addr = a; 206 const int inst2 = *(int *)inst2_addr; 207 208 // The relocation points to the second instruction, the ori, 209 // and the ori reads and writes the same register dst. 210 const int dst = inv_rta_field(inst2); 211 assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be ori reading and writing dst"); 212 // Now, find the preceding addis which writes to dst. 213 int inst1 = 0; 214 address inst1_addr = inst2_addr - BytesPerInstWord; 215 bool inst1_found = false; 216 while (inst1_addr >= bound) { 217 inst1 = *(int *)inst1_addr; 218 if (is_lis(inst1) && inv_rs_field(inst1) == dst) { inst1_found = true; break; } 219 inst1_addr -= BytesPerInstWord; 220 } 221 assert(inst1_found, "inst is not lis"); 222 223 int xc = (data >> 16) & 0xffff; 224 int xd = (data >> 0) & 0xffff; 225 226 set_imm((int *)inst1_addr, (short)(xc)); // see enc_load_con_narrow_hi/_lo 227 set_imm((int *)inst2_addr, (xd)); // unsigned int 228 return inst1_addr; 229 } 230 231 // Get compressed oop or klass constant. 232 narrowOop MacroAssembler::get_narrow_oop(address a, address bound) { 233 assert(UseCompressedOops, "Should only patch compressed oops"); 234 235 const address inst2_addr = a; 236 const int inst2 = *(int *)inst2_addr; 237 238 // The relocation points to the second instruction, the ori, 239 // and the ori reads and writes the same register dst. 240 const int dst = inv_rta_field(inst2); 241 assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be ori reading and writing dst"); 242 // Now, find the preceding lis which writes to dst. 243 int inst1 = 0; 244 address inst1_addr = inst2_addr - BytesPerInstWord; 245 bool inst1_found = false; 246 247 while (inst1_addr >= bound) { 248 inst1 = *(int *) inst1_addr; 249 if (is_lis(inst1) && inv_rs_field(inst1) == dst) { inst1_found = true; break;} 250 inst1_addr -= BytesPerInstWord; 251 } 252 assert(inst1_found, "inst is not lis"); 253 254 uint xl = ((unsigned int) (get_imm(inst2_addr, 0) & 0xffff)); 255 uint xh = (((get_imm(inst1_addr, 0)) & 0xffff) << 16); 256 257 return (int) (xl | xh); 258 } 259 #endif // _LP64 260 261 // Returns true if successful. 262 bool MacroAssembler::load_const_from_method_toc(Register dst, AddressLiteral& a, 263 Register toc, bool fixed_size) { 264 int toc_offset = 0; 265 // Use RelocationHolder::none for the constant pool entry, otherwise 266 // we will end up with a failing NativeCall::verify(x) where x is 267 // the address of the constant pool entry. 268 // FIXME: We should insert relocation information for oops at the constant 269 // pool entries instead of inserting it at the loads; patching of a constant 270 // pool entry should be less expensive. 271 address const_address = address_constant((address)a.value(), RelocationHolder::none); 272 if (const_address == NULL) { return false; } // allocation failure 273 // Relocate at the pc of the load. 274 relocate(a.rspec()); 275 toc_offset = (int)(const_address - code()->consts()->start()); 276 ld_largeoffset_unchecked(dst, toc_offset, toc, fixed_size); 277 return true; 278 } 279 280 bool MacroAssembler::is_load_const_from_method_toc_at(address a) { 281 const address inst1_addr = a; 282 const int inst1 = *(int *)inst1_addr; 283 284 // The relocation points to the ld or the addis. 285 return (is_ld(inst1)) || 286 (is_addis(inst1) && inv_ra_field(inst1) != 0); 287 } 288 289 int MacroAssembler::get_offset_of_load_const_from_method_toc_at(address a) { 290 assert(is_load_const_from_method_toc_at(a), "must be load_const_from_method_toc"); 291 292 const address inst1_addr = a; 293 const int inst1 = *(int *)inst1_addr; 294 295 if (is_ld(inst1)) { 296 return inv_d1_field(inst1); 297 } else if (is_addis(inst1)) { 298 const int dst = inv_rt_field(inst1); 299 300 // Now, find the succeeding ld which reads and writes to dst. 301 address inst2_addr = inst1_addr + BytesPerInstWord; 302 int inst2 = 0; 303 while (true) { 304 inst2 = *(int *) inst2_addr; 305 if (is_ld(inst2) && inv_ra_field(inst2) == dst && inv_rt_field(inst2) == dst) { 306 // Stop, found the ld which reads and writes dst. 307 break; 308 } 309 inst2_addr += BytesPerInstWord; 310 } 311 return (inv_d1_field(inst1) << 16) + inv_d1_field(inst2); 312 } 313 ShouldNotReachHere(); 314 return 0; 315 } 316 317 // Get the constant from a `load_const' sequence. 318 long MacroAssembler::get_const(address a) { 319 assert(is_load_const_at(a), "not a load of a constant"); 320 const int *p = (const int*) a; 321 unsigned long x = (((unsigned long) (get_imm(a,0) & 0xffff)) << 48); 322 if (is_ori(*(p+1))) { 323 x |= (((unsigned long) (get_imm(a,1) & 0xffff)) << 32); 324 x |= (((unsigned long) (get_imm(a,3) & 0xffff)) << 16); 325 x |= (((unsigned long) (get_imm(a,4) & 0xffff))); 326 } else if (is_lis(*(p+1))) { 327 x |= (((unsigned long) (get_imm(a,2) & 0xffff)) << 32); 328 x |= (((unsigned long) (get_imm(a,1) & 0xffff)) << 16); 329 x |= (((unsigned long) (get_imm(a,3) & 0xffff))); 330 } else { 331 ShouldNotReachHere(); 332 return (long) 0; 333 } 334 return (long) x; 335 } 336 337 // Patch the 64 bit constant of a `load_const' sequence. This is a low 338 // level procedure. It neither flushes the instruction cache nor is it 339 // mt safe. 340 void MacroAssembler::patch_const(address a, long x) { 341 assert(is_load_const_at(a), "not a load of a constant"); 342 int *p = (int*) a; 343 if (is_ori(*(p+1))) { 344 set_imm(0 + p, (x >> 48) & 0xffff); 345 set_imm(1 + p, (x >> 32) & 0xffff); 346 set_imm(3 + p, (x >> 16) & 0xffff); 347 set_imm(4 + p, x & 0xffff); 348 } else if (is_lis(*(p+1))) { 349 set_imm(0 + p, (x >> 48) & 0xffff); 350 set_imm(2 + p, (x >> 32) & 0xffff); 351 set_imm(1 + p, (x >> 16) & 0xffff); 352 set_imm(3 + p, x & 0xffff); 353 } else { 354 ShouldNotReachHere(); 355 } 356 } 357 358 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { 359 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 360 int index = oop_recorder()->allocate_metadata_index(obj); 361 RelocationHolder rspec = metadata_Relocation::spec(index); 362 return AddressLiteral((address)obj, rspec); 363 } 364 365 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { 366 assert(oop_recorder() != NULL, "this assembler needs a Recorder"); 367 int index = oop_recorder()->find_index(obj); 368 RelocationHolder rspec = metadata_Relocation::spec(index); 369 return AddressLiteral((address)obj, rspec); 370 } 371 372 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) { 373 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 374 int oop_index = oop_recorder()->allocate_oop_index(obj); 375 return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); 376 } 377 378 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { 379 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 380 int oop_index = oop_recorder()->find_index(obj); 381 return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); 382 } 383 384 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 385 Register tmp, int offset) { 386 intptr_t value = *delayed_value_addr; 387 if (value != 0) { 388 return RegisterOrConstant(value + offset); 389 } 390 391 // Load indirectly to solve generation ordering problem. 392 // static address, no relocation 393 int simm16_offset = load_const_optimized(tmp, delayed_value_addr, noreg, true); 394 ld(tmp, simm16_offset, tmp); // must be aligned ((xa & 3) == 0) 395 396 if (offset != 0) { 397 addi(tmp, tmp, offset); 398 } 399 400 return RegisterOrConstant(tmp); 401 } 402 403 #ifndef PRODUCT 404 void MacroAssembler::pd_print_patched_instruction(address branch) { 405 Unimplemented(); // TODO: PPC port 406 } 407 #endif // ndef PRODUCT 408 409 // Conditional far branch for destinations encodable in 24+2 bits. 410 void MacroAssembler::bc_far(int boint, int biint, Label& dest, int optimize) { 411 412 // If requested by flag optimize, relocate the bc_far as a 413 // runtime_call and prepare for optimizing it when the code gets 414 // relocated. 415 if (optimize == bc_far_optimize_on_relocate) { 416 relocate(relocInfo::runtime_call_type); 417 } 418 419 // variant 2: 420 // 421 // b!cxx SKIP 422 // bxx DEST 423 // SKIP: 424 // 425 426 const int opposite_boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(boint)), 427 opposite_bcond(inv_boint_bcond(boint))); 428 429 // We emit two branches. 430 // First, a conditional branch which jumps around the far branch. 431 const address not_taken_pc = pc() + 2 * BytesPerInstWord; 432 const address bc_pc = pc(); 433 bc(opposite_boint, biint, not_taken_pc); 434 435 const int bc_instr = *(int*)bc_pc; 436 assert(not_taken_pc == (address)inv_bd_field(bc_instr, (intptr_t)bc_pc), "postcondition"); 437 assert(opposite_boint == inv_bo_field(bc_instr), "postcondition"); 438 assert(boint == add_bhint_to_boint(opposite_bhint(inv_boint_bhint(inv_bo_field(bc_instr))), 439 opposite_bcond(inv_boint_bcond(inv_bo_field(bc_instr)))), 440 "postcondition"); 441 assert(biint == inv_bi_field(bc_instr), "postcondition"); 442 443 // Second, an unconditional far branch which jumps to dest. 444 // Note: target(dest) remembers the current pc (see CodeSection::target) 445 // and returns the current pc if the label is not bound yet; when 446 // the label gets bound, the unconditional far branch will be patched. 447 const address target_pc = target(dest); 448 const address b_pc = pc(); 449 b(target_pc); 450 451 assert(not_taken_pc == pc(), "postcondition"); 452 assert(dest.is_bound() || target_pc == b_pc, "postcondition"); 453 } 454 455 // 1 or 2 instructions 456 void MacroAssembler::bc_far_optimized(int boint, int biint, Label& dest) { 457 if (dest.is_bound() && is_within_range_of_bcxx(target(dest), pc())) { 458 bc(boint, biint, dest); 459 } else { 460 bc_far(boint, biint, dest, MacroAssembler::bc_far_optimize_on_relocate); 461 } 462 } 463 464 bool MacroAssembler::is_bc_far_at(address instruction_addr) { 465 return is_bc_far_variant1_at(instruction_addr) || 466 is_bc_far_variant2_at(instruction_addr) || 467 is_bc_far_variant3_at(instruction_addr); 468 } 469 470 address MacroAssembler::get_dest_of_bc_far_at(address instruction_addr) { 471 if (is_bc_far_variant1_at(instruction_addr)) { 472 const address instruction_1_addr = instruction_addr; 473 const int instruction_1 = *(int*)instruction_1_addr; 474 return (address)inv_bd_field(instruction_1, (intptr_t)instruction_1_addr); 475 } else if (is_bc_far_variant2_at(instruction_addr)) { 476 const address instruction_2_addr = instruction_addr + 4; 477 return bxx_destination(instruction_2_addr); 478 } else if (is_bc_far_variant3_at(instruction_addr)) { 479 return instruction_addr + 8; 480 } 481 // variant 4 ??? 482 ShouldNotReachHere(); 483 return NULL; 484 } 485 void MacroAssembler::set_dest_of_bc_far_at(address instruction_addr, address dest) { 486 487 if (is_bc_far_variant3_at(instruction_addr)) { 488 // variant 3, far cond branch to the next instruction, already patched to nops: 489 // 490 // nop 491 // endgroup 492 // SKIP/DEST: 493 // 494 return; 495 } 496 497 // first, extract boint and biint from the current branch 498 int boint = 0; 499 int biint = 0; 500 501 ResourceMark rm; 502 const int code_size = 2 * BytesPerInstWord; 503 CodeBuffer buf(instruction_addr, code_size); 504 MacroAssembler masm(&buf); 505 if (is_bc_far_variant2_at(instruction_addr) && dest == instruction_addr + 8) { 506 // Far branch to next instruction: Optimize it by patching nops (produce variant 3). 507 masm.nop(); 508 masm.endgroup(); 509 } else { 510 if (is_bc_far_variant1_at(instruction_addr)) { 511 // variant 1, the 1st instruction contains the destination address: 512 // 513 // bcxx DEST 514 // nop 515 // 516 const int instruction_1 = *(int*)(instruction_addr); 517 boint = inv_bo_field(instruction_1); 518 biint = inv_bi_field(instruction_1); 519 } else if (is_bc_far_variant2_at(instruction_addr)) { 520 // variant 2, the 2nd instruction contains the destination address: 521 // 522 // b!cxx SKIP 523 // bxx DEST 524 // SKIP: 525 // 526 const int instruction_1 = *(int*)(instruction_addr); 527 boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(inv_bo_field(instruction_1))), 528 opposite_bcond(inv_boint_bcond(inv_bo_field(instruction_1)))); 529 biint = inv_bi_field(instruction_1); 530 } else { 531 // variant 4??? 532 ShouldNotReachHere(); 533 } 534 535 // second, set the new branch destination and optimize the code 536 if (dest != instruction_addr + 4 && // the bc_far is still unbound! 537 masm.is_within_range_of_bcxx(dest, instruction_addr)) { 538 // variant 1: 539 // 540 // bcxx DEST 541 // nop 542 // 543 masm.bc(boint, biint, dest); 544 masm.nop(); 545 } else { 546 // variant 2: 547 // 548 // b!cxx SKIP 549 // bxx DEST 550 // SKIP: 551 // 552 const int opposite_boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(boint)), 553 opposite_bcond(inv_boint_bcond(boint))); 554 const address not_taken_pc = masm.pc() + 2 * BytesPerInstWord; 555 masm.bc(opposite_boint, biint, not_taken_pc); 556 masm.b(dest); 557 } 558 } 559 ICache::ppc64_flush_icache_bytes(instruction_addr, code_size); 560 } 561 562 // Emit a NOT mt-safe patchable 64 bit absolute call/jump. 563 void MacroAssembler::bxx64_patchable(address dest, relocInfo::relocType rt, bool link) { 564 // get current pc 565 uint64_t start_pc = (uint64_t) pc(); 566 567 const address pc_of_bl = (address) (start_pc + (6*BytesPerInstWord)); // bl is last 568 const address pc_of_b = (address) (start_pc + (0*BytesPerInstWord)); // b is first 569 570 // relocate here 571 if (rt != relocInfo::none) { 572 relocate(rt); 573 } 574 575 if ( ReoptimizeCallSequences && 576 (( link && is_within_range_of_b(dest, pc_of_bl)) || 577 (!link && is_within_range_of_b(dest, pc_of_b)))) { 578 // variant 2: 579 // Emit an optimized, pc-relative call/jump. 580 581 if (link) { 582 // some padding 583 nop(); 584 nop(); 585 nop(); 586 nop(); 587 nop(); 588 nop(); 589 590 // do the call 591 assert(pc() == pc_of_bl, "just checking"); 592 bl(dest, relocInfo::none); 593 } else { 594 // do the jump 595 assert(pc() == pc_of_b, "just checking"); 596 b(dest, relocInfo::none); 597 598 // some padding 599 nop(); 600 nop(); 601 nop(); 602 nop(); 603 nop(); 604 nop(); 605 } 606 607 // Assert that we can identify the emitted call/jump. 608 assert(is_bxx64_patchable_variant2_at((address)start_pc, link), 609 "can't identify emitted call"); 610 } else { 611 // variant 1: 612 mr(R0, R11); // spill R11 -> R0. 613 614 // Load the destination address into CTR, 615 // calculate destination relative to global toc. 616 calculate_address_from_global_toc(R11, dest, true, true, false); 617 618 mtctr(R11); 619 mr(R11, R0); // spill R11 <- R0. 620 nop(); 621 622 // do the call/jump 623 if (link) { 624 bctrl(); 625 } else{ 626 bctr(); 627 } 628 // Assert that we can identify the emitted call/jump. 629 assert(is_bxx64_patchable_variant1b_at((address)start_pc, link), 630 "can't identify emitted call"); 631 } 632 633 // Assert that we can identify the emitted call/jump. 634 assert(is_bxx64_patchable_at((address)start_pc, link), 635 "can't identify emitted call"); 636 assert(get_dest_of_bxx64_patchable_at((address)start_pc, link) == dest, 637 "wrong encoding of dest address"); 638 } 639 640 // Identify a bxx64_patchable instruction. 641 bool MacroAssembler::is_bxx64_patchable_at(address instruction_addr, bool link) { 642 return is_bxx64_patchable_variant1b_at(instruction_addr, link) 643 //|| is_bxx64_patchable_variant1_at(instruction_addr, link) 644 || is_bxx64_patchable_variant2_at(instruction_addr, link); 645 } 646 647 // Does the call64_patchable instruction use a pc-relative encoding of 648 // the call destination? 649 bool MacroAssembler::is_bxx64_patchable_pcrelative_at(address instruction_addr, bool link) { 650 // variant 2 is pc-relative 651 return is_bxx64_patchable_variant2_at(instruction_addr, link); 652 } 653 654 // Identify variant 1. 655 bool MacroAssembler::is_bxx64_patchable_variant1_at(address instruction_addr, bool link) { 656 unsigned int* instr = (unsigned int*) instruction_addr; 657 return (link ? is_bctrl(instr[6]) : is_bctr(instr[6])) // bctr[l] 658 && is_mtctr(instr[5]) // mtctr 659 && is_load_const_at(instruction_addr); 660 } 661 662 // Identify variant 1b: load destination relative to global toc. 663 bool MacroAssembler::is_bxx64_patchable_variant1b_at(address instruction_addr, bool link) { 664 unsigned int* instr = (unsigned int*) instruction_addr; 665 return (link ? is_bctrl(instr[6]) : is_bctr(instr[6])) // bctr[l] 666 && is_mtctr(instr[3]) // mtctr 667 && is_calculate_address_from_global_toc_at(instruction_addr + 2*BytesPerInstWord, instruction_addr); 668 } 669 670 // Identify variant 2. 671 bool MacroAssembler::is_bxx64_patchable_variant2_at(address instruction_addr, bool link) { 672 unsigned int* instr = (unsigned int*) instruction_addr; 673 if (link) { 674 return is_bl (instr[6]) // bl dest is last 675 && is_nop(instr[0]) // nop 676 && is_nop(instr[1]) // nop 677 && is_nop(instr[2]) // nop 678 && is_nop(instr[3]) // nop 679 && is_nop(instr[4]) // nop 680 && is_nop(instr[5]); // nop 681 } else { 682 return is_b (instr[0]) // b dest is first 683 && is_nop(instr[1]) // nop 684 && is_nop(instr[2]) // nop 685 && is_nop(instr[3]) // nop 686 && is_nop(instr[4]) // nop 687 && is_nop(instr[5]) // nop 688 && is_nop(instr[6]); // nop 689 } 690 } 691 692 // Set dest address of a bxx64_patchable instruction. 693 void MacroAssembler::set_dest_of_bxx64_patchable_at(address instruction_addr, address dest, bool link) { 694 ResourceMark rm; 695 int code_size = MacroAssembler::bxx64_patchable_size; 696 CodeBuffer buf(instruction_addr, code_size); 697 MacroAssembler masm(&buf); 698 masm.bxx64_patchable(dest, relocInfo::none, link); 699 ICache::ppc64_flush_icache_bytes(instruction_addr, code_size); 700 } 701 702 // Get dest address of a bxx64_patchable instruction. 703 address MacroAssembler::get_dest_of_bxx64_patchable_at(address instruction_addr, bool link) { 704 if (is_bxx64_patchable_variant1_at(instruction_addr, link)) { 705 return (address) (unsigned long) get_const(instruction_addr); 706 } else if (is_bxx64_patchable_variant2_at(instruction_addr, link)) { 707 unsigned int* instr = (unsigned int*) instruction_addr; 708 if (link) { 709 const int instr_idx = 6; // bl is last 710 int branchoffset = branch_destination(instr[instr_idx], 0); 711 return instruction_addr + branchoffset + instr_idx*BytesPerInstWord; 712 } else { 713 const int instr_idx = 0; // b is first 714 int branchoffset = branch_destination(instr[instr_idx], 0); 715 return instruction_addr + branchoffset + instr_idx*BytesPerInstWord; 716 } 717 // Load dest relative to global toc. 718 } else if (is_bxx64_patchable_variant1b_at(instruction_addr, link)) { 719 return get_address_of_calculate_address_from_global_toc_at(instruction_addr + 2*BytesPerInstWord, 720 instruction_addr); 721 } else { 722 ShouldNotReachHere(); 723 return NULL; 724 } 725 } 726 727 // Uses ordering which corresponds to ABI: 728 // _savegpr0_14: std r14,-144(r1) 729 // _savegpr0_15: std r15,-136(r1) 730 // _savegpr0_16: std r16,-128(r1) 731 void MacroAssembler::save_nonvolatile_gprs(Register dst, int offset) { 732 std(R14, offset, dst); offset += 8; 733 std(R15, offset, dst); offset += 8; 734 std(R16, offset, dst); offset += 8; 735 std(R17, offset, dst); offset += 8; 736 std(R18, offset, dst); offset += 8; 737 std(R19, offset, dst); offset += 8; 738 std(R20, offset, dst); offset += 8; 739 std(R21, offset, dst); offset += 8; 740 std(R22, offset, dst); offset += 8; 741 std(R23, offset, dst); offset += 8; 742 std(R24, offset, dst); offset += 8; 743 std(R25, offset, dst); offset += 8; 744 std(R26, offset, dst); offset += 8; 745 std(R27, offset, dst); offset += 8; 746 std(R28, offset, dst); offset += 8; 747 std(R29, offset, dst); offset += 8; 748 std(R30, offset, dst); offset += 8; 749 std(R31, offset, dst); offset += 8; 750 751 stfd(F14, offset, dst); offset += 8; 752 stfd(F15, offset, dst); offset += 8; 753 stfd(F16, offset, dst); offset += 8; 754 stfd(F17, offset, dst); offset += 8; 755 stfd(F18, offset, dst); offset += 8; 756 stfd(F19, offset, dst); offset += 8; 757 stfd(F20, offset, dst); offset += 8; 758 stfd(F21, offset, dst); offset += 8; 759 stfd(F22, offset, dst); offset += 8; 760 stfd(F23, offset, dst); offset += 8; 761 stfd(F24, offset, dst); offset += 8; 762 stfd(F25, offset, dst); offset += 8; 763 stfd(F26, offset, dst); offset += 8; 764 stfd(F27, offset, dst); offset += 8; 765 stfd(F28, offset, dst); offset += 8; 766 stfd(F29, offset, dst); offset += 8; 767 stfd(F30, offset, dst); offset += 8; 768 stfd(F31, offset, dst); 769 } 770 771 // Uses ordering which corresponds to ABI: 772 // _restgpr0_14: ld r14,-144(r1) 773 // _restgpr0_15: ld r15,-136(r1) 774 // _restgpr0_16: ld r16,-128(r1) 775 void MacroAssembler::restore_nonvolatile_gprs(Register src, int offset) { 776 ld(R14, offset, src); offset += 8; 777 ld(R15, offset, src); offset += 8; 778 ld(R16, offset, src); offset += 8; 779 ld(R17, offset, src); offset += 8; 780 ld(R18, offset, src); offset += 8; 781 ld(R19, offset, src); offset += 8; 782 ld(R20, offset, src); offset += 8; 783 ld(R21, offset, src); offset += 8; 784 ld(R22, offset, src); offset += 8; 785 ld(R23, offset, src); offset += 8; 786 ld(R24, offset, src); offset += 8; 787 ld(R25, offset, src); offset += 8; 788 ld(R26, offset, src); offset += 8; 789 ld(R27, offset, src); offset += 8; 790 ld(R28, offset, src); offset += 8; 791 ld(R29, offset, src); offset += 8; 792 ld(R30, offset, src); offset += 8; 793 ld(R31, offset, src); offset += 8; 794 795 // FP registers 796 lfd(F14, offset, src); offset += 8; 797 lfd(F15, offset, src); offset += 8; 798 lfd(F16, offset, src); offset += 8; 799 lfd(F17, offset, src); offset += 8; 800 lfd(F18, offset, src); offset += 8; 801 lfd(F19, offset, src); offset += 8; 802 lfd(F20, offset, src); offset += 8; 803 lfd(F21, offset, src); offset += 8; 804 lfd(F22, offset, src); offset += 8; 805 lfd(F23, offset, src); offset += 8; 806 lfd(F24, offset, src); offset += 8; 807 lfd(F25, offset, src); offset += 8; 808 lfd(F26, offset, src); offset += 8; 809 lfd(F27, offset, src); offset += 8; 810 lfd(F28, offset, src); offset += 8; 811 lfd(F29, offset, src); offset += 8; 812 lfd(F30, offset, src); offset += 8; 813 lfd(F31, offset, src); 814 } 815 816 // For verify_oops. 817 void MacroAssembler::save_volatile_gprs(Register dst, int offset) { 818 std(R2, offset, dst); offset += 8; 819 std(R3, offset, dst); offset += 8; 820 std(R4, offset, dst); offset += 8; 821 std(R5, offset, dst); offset += 8; 822 std(R6, offset, dst); offset += 8; 823 std(R7, offset, dst); offset += 8; 824 std(R8, offset, dst); offset += 8; 825 std(R9, offset, dst); offset += 8; 826 std(R10, offset, dst); offset += 8; 827 std(R11, offset, dst); offset += 8; 828 std(R12, offset, dst); offset += 8; 829 830 stfd(F0, offset, dst); offset += 8; 831 stfd(F1, offset, dst); offset += 8; 832 stfd(F2, offset, dst); offset += 8; 833 stfd(F3, offset, dst); offset += 8; 834 stfd(F4, offset, dst); offset += 8; 835 stfd(F5, offset, dst); offset += 8; 836 stfd(F6, offset, dst); offset += 8; 837 stfd(F7, offset, dst); offset += 8; 838 stfd(F8, offset, dst); offset += 8; 839 stfd(F9, offset, dst); offset += 8; 840 stfd(F10, offset, dst); offset += 8; 841 stfd(F11, offset, dst); offset += 8; 842 stfd(F12, offset, dst); offset += 8; 843 stfd(F13, offset, dst); 844 } 845 846 // For verify_oops. 847 void MacroAssembler::restore_volatile_gprs(Register src, int offset) { 848 ld(R2, offset, src); offset += 8; 849 ld(R3, offset, src); offset += 8; 850 ld(R4, offset, src); offset += 8; 851 ld(R5, offset, src); offset += 8; 852 ld(R6, offset, src); offset += 8; 853 ld(R7, offset, src); offset += 8; 854 ld(R8, offset, src); offset += 8; 855 ld(R9, offset, src); offset += 8; 856 ld(R10, offset, src); offset += 8; 857 ld(R11, offset, src); offset += 8; 858 ld(R12, offset, src); offset += 8; 859 860 lfd(F0, offset, src); offset += 8; 861 lfd(F1, offset, src); offset += 8; 862 lfd(F2, offset, src); offset += 8; 863 lfd(F3, offset, src); offset += 8; 864 lfd(F4, offset, src); offset += 8; 865 lfd(F5, offset, src); offset += 8; 866 lfd(F6, offset, src); offset += 8; 867 lfd(F7, offset, src); offset += 8; 868 lfd(F8, offset, src); offset += 8; 869 lfd(F9, offset, src); offset += 8; 870 lfd(F10, offset, src); offset += 8; 871 lfd(F11, offset, src); offset += 8; 872 lfd(F12, offset, src); offset += 8; 873 lfd(F13, offset, src); 874 } 875 876 void MacroAssembler::save_LR_CR(Register tmp) { 877 mfcr(tmp); 878 std(tmp, _abi(cr), R1_SP); 879 mflr(tmp); 880 std(tmp, _abi(lr), R1_SP); 881 // Tmp must contain lr on exit! (see return_addr and prolog in ppc64.ad) 882 } 883 884 void MacroAssembler::restore_LR_CR(Register tmp) { 885 assert(tmp != R1_SP, "must be distinct"); 886 ld(tmp, _abi(lr), R1_SP); 887 mtlr(tmp); 888 ld(tmp, _abi(cr), R1_SP); 889 mtcr(tmp); 890 } 891 892 address MacroAssembler::get_PC_trash_LR(Register result) { 893 Label L; 894 bl(L); 895 bind(L); 896 address lr_pc = pc(); 897 mflr(result); 898 return lr_pc; 899 } 900 901 void MacroAssembler::resize_frame(Register offset, Register tmp) { 902 #ifdef ASSERT 903 assert_different_registers(offset, tmp, R1_SP); 904 andi_(tmp, offset, frame::alignment_in_bytes-1); 905 asm_assert_eq("resize_frame: unaligned", 0x204); 906 #endif 907 908 // tmp <- *(SP) 909 ld(tmp, _abi(callers_sp), R1_SP); 910 // addr <- SP + offset; 911 // *(addr) <- tmp; 912 // SP <- addr 913 stdux(tmp, R1_SP, offset); 914 } 915 916 void MacroAssembler::resize_frame(int offset, Register tmp) { 917 assert(is_simm(offset, 16), "too big an offset"); 918 assert_different_registers(tmp, R1_SP); 919 assert((offset & (frame::alignment_in_bytes-1))==0, "resize_frame: unaligned"); 920 // tmp <- *(SP) 921 ld(tmp, _abi(callers_sp), R1_SP); 922 // addr <- SP + offset; 923 // *(addr) <- tmp; 924 // SP <- addr 925 stdu(tmp, offset, R1_SP); 926 } 927 928 void MacroAssembler::resize_frame_absolute(Register addr, Register tmp1, Register tmp2) { 929 // (addr == tmp1) || (addr == tmp2) is allowed here! 930 assert(tmp1 != tmp2, "must be distinct"); 931 932 // compute offset w.r.t. current stack pointer 933 // tmp_1 <- addr - SP (!) 934 subf(tmp1, R1_SP, addr); 935 936 // atomically update SP keeping back link. 937 resize_frame(tmp1/* offset */, tmp2/* tmp */); 938 } 939 940 void MacroAssembler::push_frame(Register bytes, Register tmp) { 941 #ifdef ASSERT 942 assert(bytes != R0, "r0 not allowed here"); 943 andi_(R0, bytes, frame::alignment_in_bytes-1); 944 asm_assert_eq("push_frame(Reg, Reg): unaligned", 0x203); 945 #endif 946 neg(tmp, bytes); 947 stdux(R1_SP, R1_SP, tmp); 948 } 949 950 // Push a frame of size `bytes'. 951 void MacroAssembler::push_frame(unsigned int bytes, Register tmp) { 952 long offset = align_addr(bytes, frame::alignment_in_bytes); 953 if (is_simm(-offset, 16)) { 954 stdu(R1_SP, -offset, R1_SP); 955 } else { 956 load_const_optimized(tmp, -offset); 957 stdux(R1_SP, R1_SP, tmp); 958 } 959 } 960 961 // Push a frame of size `bytes' plus abi_reg_args on top. 962 void MacroAssembler::push_frame_reg_args(unsigned int bytes, Register tmp) { 963 push_frame(bytes + frame::abi_reg_args_size, tmp); 964 } 965 966 // Setup up a new C frame with a spill area for non-volatile GPRs and 967 // additional space for local variables. 968 void MacroAssembler::push_frame_reg_args_nonvolatiles(unsigned int bytes, 969 Register tmp) { 970 push_frame(bytes + frame::abi_reg_args_size + frame::spill_nonvolatiles_size, tmp); 971 } 972 973 // Pop current C frame. 974 void MacroAssembler::pop_frame() { 975 ld(R1_SP, _abi(callers_sp), R1_SP); 976 } 977 978 #if defined(ABI_ELFv2) 979 address MacroAssembler::branch_to(Register r_function_entry, bool and_link) { 980 // TODO(asmundak): make sure the caller uses R12 as function descriptor 981 // most of the times. 982 if (R12 != r_function_entry) { 983 mr(R12, r_function_entry); 984 } 985 mtctr(R12); 986 // Do a call or a branch. 987 if (and_link) { 988 bctrl(); 989 } else { 990 bctr(); 991 } 992 _last_calls_return_pc = pc(); 993 994 return _last_calls_return_pc; 995 } 996 997 // Call a C function via a function descriptor and use full C 998 // calling conventions. Updates and returns _last_calls_return_pc. 999 address MacroAssembler::call_c(Register r_function_entry) { 1000 return branch_to(r_function_entry, /*and_link=*/true); 1001 } 1002 1003 // For tail calls: only branch, don't link, so callee returns to caller of this function. 1004 address MacroAssembler::call_c_and_return_to_caller(Register r_function_entry) { 1005 return branch_to(r_function_entry, /*and_link=*/false); 1006 } 1007 1008 address MacroAssembler::call_c(address function_entry, relocInfo::relocType rt) { 1009 load_const(R12, function_entry, R0); 1010 return branch_to(R12, /*and_link=*/true); 1011 } 1012 1013 #else 1014 // Generic version of a call to C function via a function descriptor 1015 // with variable support for C calling conventions (TOC, ENV, etc.). 1016 // Updates and returns _last_calls_return_pc. 1017 address MacroAssembler::branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call, 1018 bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee) { 1019 // we emit standard ptrgl glue code here 1020 assert((function_descriptor != R0), "function_descriptor cannot be R0"); 1021 1022 // retrieve necessary entries from the function descriptor 1023 ld(R0, in_bytes(FunctionDescriptor::entry_offset()), function_descriptor); 1024 mtctr(R0); 1025 1026 if (load_toc_of_callee) { 1027 ld(R2_TOC, in_bytes(FunctionDescriptor::toc_offset()), function_descriptor); 1028 } 1029 if (load_env_of_callee) { 1030 ld(R11, in_bytes(FunctionDescriptor::env_offset()), function_descriptor); 1031 } else if (load_toc_of_callee) { 1032 li(R11, 0); 1033 } 1034 1035 // do a call or a branch 1036 if (and_link) { 1037 bctrl(); 1038 } else { 1039 bctr(); 1040 } 1041 _last_calls_return_pc = pc(); 1042 1043 return _last_calls_return_pc; 1044 } 1045 1046 // Call a C function via a function descriptor and use full C calling 1047 // conventions. 1048 // We don't use the TOC in generated code, so there is no need to save 1049 // and restore its value. 1050 address MacroAssembler::call_c(Register fd) { 1051 return branch_to(fd, /*and_link=*/true, 1052 /*save toc=*/false, 1053 /*restore toc=*/false, 1054 /*load toc=*/true, 1055 /*load env=*/true); 1056 } 1057 1058 address MacroAssembler::call_c_and_return_to_caller(Register fd) { 1059 return branch_to(fd, /*and_link=*/false, 1060 /*save toc=*/false, 1061 /*restore toc=*/false, 1062 /*load toc=*/true, 1063 /*load env=*/true); 1064 } 1065 1066 address MacroAssembler::call_c(const FunctionDescriptor* fd, relocInfo::relocType rt) { 1067 if (rt != relocInfo::none) { 1068 // this call needs to be relocatable 1069 if (!ReoptimizeCallSequences 1070 || (rt != relocInfo::runtime_call_type && rt != relocInfo::none) 1071 || fd == NULL // support code-size estimation 1072 || !fd->is_friend_function() 1073 || fd->entry() == NULL) { 1074 // it's not a friend function as defined by class FunctionDescriptor, 1075 // so do a full call-c here. 1076 load_const(R11, (address)fd, R0); 1077 1078 bool has_env = (fd != NULL && fd->env() != NULL); 1079 return branch_to(R11, /*and_link=*/true, 1080 /*save toc=*/false, 1081 /*restore toc=*/false, 1082 /*load toc=*/true, 1083 /*load env=*/has_env); 1084 } else { 1085 // It's a friend function. Load the entry point and don't care about 1086 // toc and env. Use an optimizable call instruction, but ensure the 1087 // same code-size as in the case of a non-friend function. 1088 nop(); 1089 nop(); 1090 nop(); 1091 bl64_patchable(fd->entry(), rt); 1092 _last_calls_return_pc = pc(); 1093 return _last_calls_return_pc; 1094 } 1095 } else { 1096 // This call does not need to be relocatable, do more aggressive 1097 // optimizations. 1098 if (!ReoptimizeCallSequences 1099 || !fd->is_friend_function()) { 1100 // It's not a friend function as defined by class FunctionDescriptor, 1101 // so do a full call-c here. 1102 load_const(R11, (address)fd, R0); 1103 return branch_to(R11, /*and_link=*/true, 1104 /*save toc=*/false, 1105 /*restore toc=*/false, 1106 /*load toc=*/true, 1107 /*load env=*/true); 1108 } else { 1109 // it's a friend function, load the entry point and don't care about 1110 // toc and env. 1111 address dest = fd->entry(); 1112 if (is_within_range_of_b(dest, pc())) { 1113 bl(dest); 1114 } else { 1115 bl64_patchable(dest, rt); 1116 } 1117 _last_calls_return_pc = pc(); 1118 return _last_calls_return_pc; 1119 } 1120 } 1121 } 1122 1123 // Call a C function. All constants needed reside in TOC. 1124 // 1125 // Read the address to call from the TOC. 1126 // Read env from TOC, if fd specifies an env. 1127 // Read new TOC from TOC. 1128 address MacroAssembler::call_c_using_toc(const FunctionDescriptor* fd, 1129 relocInfo::relocType rt, Register toc) { 1130 if (!ReoptimizeCallSequences 1131 || (rt != relocInfo::runtime_call_type && rt != relocInfo::none) 1132 || !fd->is_friend_function()) { 1133 // It's not a friend function as defined by class FunctionDescriptor, 1134 // so do a full call-c here. 1135 assert(fd->entry() != NULL, "function must be linked"); 1136 1137 AddressLiteral fd_entry(fd->entry()); 1138 bool success = load_const_from_method_toc(R11, fd_entry, toc, /*fixed_size*/ true); 1139 mtctr(R11); 1140 if (fd->env() == NULL) { 1141 li(R11, 0); 1142 nop(); 1143 } else { 1144 AddressLiteral fd_env(fd->env()); 1145 success = success && load_const_from_method_toc(R11, fd_env, toc, /*fixed_size*/ true); 1146 } 1147 AddressLiteral fd_toc(fd->toc()); 1148 // Set R2_TOC (load from toc) 1149 success = success && load_const_from_method_toc(R2_TOC, fd_toc, toc, /*fixed_size*/ true); 1150 bctrl(); 1151 _last_calls_return_pc = pc(); 1152 if (!success) { return NULL; } 1153 } else { 1154 // It's a friend function, load the entry point and don't care about 1155 // toc and env. Use an optimizable call instruction, but ensure the 1156 // same code-size as in the case of a non-friend function. 1157 nop(); 1158 bl64_patchable(fd->entry(), rt); 1159 _last_calls_return_pc = pc(); 1160 } 1161 return _last_calls_return_pc; 1162 } 1163 #endif // ABI_ELFv2 1164 1165 void MacroAssembler::call_VM_base(Register oop_result, 1166 Register last_java_sp, 1167 address entry_point, 1168 bool check_exceptions) { 1169 BLOCK_COMMENT("call_VM {"); 1170 // Determine last_java_sp register. 1171 if (!last_java_sp->is_valid()) { 1172 last_java_sp = R1_SP; 1173 } 1174 set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, R11_scratch1); 1175 1176 // ARG1 must hold thread address. 1177 mr(R3_ARG1, R16_thread); 1178 #if defined(ABI_ELFv2) 1179 address return_pc = call_c(entry_point, relocInfo::none); 1180 #else 1181 address return_pc = call_c((FunctionDescriptor*)entry_point, relocInfo::none); 1182 #endif 1183 1184 reset_last_Java_frame(); 1185 1186 // Check for pending exceptions. 1187 if (check_exceptions) { 1188 // We don't check for exceptions here. 1189 ShouldNotReachHere(); 1190 } 1191 1192 // Get oop result if there is one and reset the value in the thread. 1193 if (oop_result->is_valid()) { 1194 get_vm_result(oop_result); 1195 } 1196 1197 _last_calls_return_pc = return_pc; 1198 BLOCK_COMMENT("} call_VM"); 1199 } 1200 1201 void MacroAssembler::call_VM_leaf_base(address entry_point) { 1202 BLOCK_COMMENT("call_VM_leaf {"); 1203 #if defined(ABI_ELFv2) 1204 call_c(entry_point, relocInfo::none); 1205 #else 1206 call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::none); 1207 #endif 1208 BLOCK_COMMENT("} call_VM_leaf"); 1209 } 1210 1211 void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) { 1212 call_VM_base(oop_result, noreg, entry_point, check_exceptions); 1213 } 1214 1215 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, 1216 bool check_exceptions) { 1217 // R3_ARG1 is reserved for the thread. 1218 mr_if_needed(R4_ARG2, arg_1); 1219 call_VM(oop_result, entry_point, check_exceptions); 1220 } 1221 1222 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, 1223 bool check_exceptions) { 1224 // R3_ARG1 is reserved for the thread 1225 mr_if_needed(R4_ARG2, arg_1); 1226 assert(arg_2 != R4_ARG2, "smashed argument"); 1227 mr_if_needed(R5_ARG3, arg_2); 1228 call_VM(oop_result, entry_point, check_exceptions); 1229 } 1230 1231 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, 1232 bool check_exceptions) { 1233 // R3_ARG1 is reserved for the thread 1234 mr_if_needed(R4_ARG2, arg_1); 1235 assert(arg_2 != R4_ARG2, "smashed argument"); 1236 mr_if_needed(R5_ARG3, arg_2); 1237 mr_if_needed(R6_ARG4, arg_3); 1238 call_VM(oop_result, entry_point, check_exceptions); 1239 } 1240 1241 void MacroAssembler::call_VM_leaf(address entry_point) { 1242 call_VM_leaf_base(entry_point); 1243 } 1244 1245 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) { 1246 mr_if_needed(R3_ARG1, arg_1); 1247 call_VM_leaf(entry_point); 1248 } 1249 1250 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) { 1251 mr_if_needed(R3_ARG1, arg_1); 1252 assert(arg_2 != R3_ARG1, "smashed argument"); 1253 mr_if_needed(R4_ARG2, arg_2); 1254 call_VM_leaf(entry_point); 1255 } 1256 1257 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) { 1258 mr_if_needed(R3_ARG1, arg_1); 1259 assert(arg_2 != R3_ARG1, "smashed argument"); 1260 mr_if_needed(R4_ARG2, arg_2); 1261 assert(arg_3 != R3_ARG1 && arg_3 != R4_ARG2, "smashed argument"); 1262 mr_if_needed(R5_ARG3, arg_3); 1263 call_VM_leaf(entry_point); 1264 } 1265 1266 // Check whether instruction is a read access to the polling page 1267 // which was emitted by load_from_polling_page(..). 1268 bool MacroAssembler::is_load_from_polling_page(int instruction, void* ucontext, 1269 address* polling_address_ptr) { 1270 if (!is_ld(instruction)) 1271 return false; // It's not a ld. Fail. 1272 1273 int rt = inv_rt_field(instruction); 1274 int ra = inv_ra_field(instruction); 1275 int ds = inv_ds_field(instruction); 1276 if (!(ds == 0 && ra != 0 && rt == 0)) { 1277 return false; // It's not a ld(r0, X, ra). Fail. 1278 } 1279 1280 if (!ucontext) { 1281 // Set polling address. 1282 if (polling_address_ptr != NULL) { 1283 *polling_address_ptr = NULL; 1284 } 1285 return true; // No ucontext given. Can't check value of ra. Assume true. 1286 } 1287 1288 #ifdef LINUX 1289 // Ucontext given. Check that register ra contains the address of 1290 // the safepoing polling page. 1291 ucontext_t* uc = (ucontext_t*) ucontext; 1292 // Set polling address. 1293 address addr = (address)uc->uc_mcontext.regs->gpr[ra] + (ssize_t)ds; 1294 if (polling_address_ptr != NULL) { 1295 *polling_address_ptr = addr; 1296 } 1297 return os::is_poll_address(addr); 1298 #else 1299 // Not on Linux, ucontext must be NULL. 1300 ShouldNotReachHere(); 1301 return false; 1302 #endif 1303 } 1304 1305 bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) { 1306 #ifdef LINUX 1307 ucontext_t* uc = (ucontext_t*) ucontext; 1308 1309 if (is_stwx(instruction) || is_stwux(instruction)) { 1310 int ra = inv_ra_field(instruction); 1311 int rb = inv_rb_field(instruction); 1312 1313 // look up content of ra and rb in ucontext 1314 address ra_val=(address)uc->uc_mcontext.regs->gpr[ra]; 1315 long rb_val=(long)uc->uc_mcontext.regs->gpr[rb]; 1316 return os::is_memory_serialize_page(thread, ra_val+rb_val); 1317 } else if (is_stw(instruction) || is_stwu(instruction)) { 1318 int ra = inv_ra_field(instruction); 1319 int d1 = inv_d1_field(instruction); 1320 1321 // look up content of ra in ucontext 1322 address ra_val=(address)uc->uc_mcontext.regs->gpr[ra]; 1323 return os::is_memory_serialize_page(thread, ra_val+d1); 1324 } else { 1325 return false; 1326 } 1327 #else 1328 // workaround not needed on !LINUX :-) 1329 ShouldNotCallThis(); 1330 return false; 1331 #endif 1332 } 1333 1334 void MacroAssembler::bang_stack_with_offset(int offset) { 1335 // When increasing the stack, the old stack pointer will be written 1336 // to the new top of stack according to the PPC64 abi. 1337 // Therefore, stack banging is not necessary when increasing 1338 // the stack by <= os::vm_page_size() bytes. 1339 // When increasing the stack by a larger amount, this method is 1340 // called repeatedly to bang the intermediate pages. 1341 1342 // Stack grows down, caller passes positive offset. 1343 assert(offset > 0, "must bang with positive offset"); 1344 1345 long stdoffset = -offset; 1346 1347 if (is_simm(stdoffset, 16)) { 1348 // Signed 16 bit offset, a simple std is ok. 1349 if (UseLoadInstructionsForStackBangingPPC64) { 1350 ld(R0, (int)(signed short)stdoffset, R1_SP); 1351 } else { 1352 std(R0,(int)(signed short)stdoffset, R1_SP); 1353 } 1354 } else if (is_simm(stdoffset, 31)) { 1355 const int hi = MacroAssembler::largeoffset_si16_si16_hi(stdoffset); 1356 const int lo = MacroAssembler::largeoffset_si16_si16_lo(stdoffset); 1357 1358 Register tmp = R11; 1359 addis(tmp, R1_SP, hi); 1360 if (UseLoadInstructionsForStackBangingPPC64) { 1361 ld(R0, lo, tmp); 1362 } else { 1363 std(R0, lo, tmp); 1364 } 1365 } else { 1366 ShouldNotReachHere(); 1367 } 1368 } 1369 1370 // If instruction is a stack bang of the form 1371 // std R0, x(Ry), (see bang_stack_with_offset()) 1372 // stdu R1_SP, x(R1_SP), (see push_frame(), resize_frame()) 1373 // or stdux R1_SP, Rx, R1_SP (see push_frame(), resize_frame()) 1374 // return the banged address. Otherwise, return 0. 1375 address MacroAssembler::get_stack_bang_address(int instruction, void *ucontext) { 1376 #ifdef LINUX 1377 ucontext_t* uc = (ucontext_t*) ucontext; 1378 int rs = inv_rs_field(instruction); 1379 int ra = inv_ra_field(instruction); 1380 if ( (is_ld(instruction) && rs == 0 && UseLoadInstructionsForStackBangingPPC64) 1381 || (is_std(instruction) && rs == 0 && !UseLoadInstructionsForStackBangingPPC64) 1382 || (is_stdu(instruction) && rs == 1)) { 1383 int ds = inv_ds_field(instruction); 1384 // return banged address 1385 return ds+(address)uc->uc_mcontext.regs->gpr[ra]; 1386 } else if (is_stdux(instruction) && rs == 1) { 1387 int rb = inv_rb_field(instruction); 1388 address sp = (address)uc->uc_mcontext.regs->gpr[1]; 1389 long rb_val = (long)uc->uc_mcontext.regs->gpr[rb]; 1390 return ra != 1 || rb_val >= 0 ? NULL // not a stack bang 1391 : sp + rb_val; // banged address 1392 } 1393 return NULL; // not a stack bang 1394 #else 1395 // workaround not needed on !LINUX :-) 1396 ShouldNotCallThis(); 1397 return NULL; 1398 #endif 1399 } 1400 1401 void MacroAssembler::reserved_stack_check(Register return_pc) { 1402 // Test if reserved zone needs to be enabled. 1403 Label no_reserved_zone_enabling; 1404 1405 ld_ptr(R0, JavaThread::reserved_stack_activation_offset(), R16_thread); 1406 cmpld(CCR0, R1_SP, R0); 1407 blt_predict_taken(CCR0, no_reserved_zone_enabling); 1408 1409 // Enable reserved zone again, throw stack overflow exception. 1410 push_frame_reg_args(0, R0); 1411 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), R16_thread); 1412 pop_frame(); 1413 mtlr(return_pc); 1414 load_const_optimized(R0, StubRoutines::throw_delayed_StackOverflowError_entry()); 1415 mtctr(R0); 1416 bctr(); 1417 1418 should_not_reach_here(); 1419 1420 bind(no_reserved_zone_enabling); 1421 } 1422 1423 void MacroAssembler::getandsetd(Register dest_current_value, Register exchange_value, Register addr_base, 1424 bool cmpxchgx_hint) { 1425 Label retry; 1426 bind(retry); 1427 ldarx(dest_current_value, addr_base, cmpxchgx_hint); 1428 stdcx_(exchange_value, addr_base); 1429 if (UseStaticBranchPredictionInCompareAndSwapPPC64) { 1430 bne_predict_not_taken(CCR0, retry); // StXcx_ sets CCR0. 1431 } else { 1432 bne( CCR0, retry); // StXcx_ sets CCR0. 1433 } 1434 } 1435 1436 void MacroAssembler::getandaddd(Register dest_current_value, Register inc_value, Register addr_base, 1437 Register tmp, bool cmpxchgx_hint) { 1438 Label retry; 1439 bind(retry); 1440 ldarx(dest_current_value, addr_base, cmpxchgx_hint); 1441 add(tmp, dest_current_value, inc_value); 1442 stdcx_(tmp, addr_base); 1443 if (UseStaticBranchPredictionInCompareAndSwapPPC64) { 1444 bne_predict_not_taken(CCR0, retry); // StXcx_ sets CCR0. 1445 } else { 1446 bne( CCR0, retry); // StXcx_ sets CCR0. 1447 } 1448 } 1449 1450 // Word/sub-word atomic helper functions 1451 1452 // Temps and addr_base are killed if size < 4 and processor does not support respective instructions. 1453 // Only signed types are supported with size < 4. 1454 // Atomic add always kills tmp1. 1455 void MacroAssembler::atomic_get_and_modify_generic(Register dest_current_value, Register exchange_value, 1456 Register addr_base, Register tmp1, Register tmp2, Register tmp3, 1457 bool cmpxchgx_hint, bool is_add, int size) { 1458 // Sub-word instructions are available since Power 8. 1459 // For older processors, instruction_type != size holds, and we 1460 // emulate the sub-word instructions by constructing a 4-byte value 1461 // that leaves the other bytes unchanged. 1462 const int instruction_type = VM_Version::has_lqarx() ? size : 4; 1463 1464 Label retry; 1465 Register shift_amount = noreg, 1466 val32 = dest_current_value, 1467 modval = is_add ? tmp1 : exchange_value; 1468 1469 if (instruction_type != size) { 1470 assert_different_registers(tmp1, tmp2, tmp3, dest_current_value, exchange_value, addr_base); 1471 modval = tmp1; 1472 shift_amount = tmp2; 1473 val32 = tmp3; 1474 // Need some preperation: Compute shift amount, align address. Note: shorts must be 2 byte aligned. 1475 #ifdef VM_LITTLE_ENDIAN 1476 rldic(shift_amount, addr_base, 3, 64-5); // (dest & 3) * 8; 1477 clrrdi(addr_base, addr_base, 2); 1478 #else 1479 xori(shift_amount, addr_base, (size == 1) ? 3 : 2); 1480 clrrdi(addr_base, addr_base, 2); 1481 rldic(shift_amount, shift_amount, 3, 64-5); // byte: ((3-dest) & 3) * 8; short: ((1-dest/2) & 1) * 16; 1482 #endif 1483 } 1484 1485 // atomic emulation loop 1486 bind(retry); 1487 1488 switch (instruction_type) { 1489 case 4: lwarx(val32, addr_base, cmpxchgx_hint); break; 1490 case 2: lharx(val32, addr_base, cmpxchgx_hint); break; 1491 case 1: lbarx(val32, addr_base, cmpxchgx_hint); break; 1492 default: ShouldNotReachHere(); 1493 } 1494 1495 if (instruction_type != size) { 1496 srw(dest_current_value, val32, shift_amount); 1497 } 1498 1499 if (is_add) { add(modval, dest_current_value, exchange_value); } 1500 1501 if (instruction_type != size) { 1502 // Transform exchange value such that the replacement can be done by one xor instruction. 1503 xorr(modval, dest_current_value, is_add ? modval : exchange_value); 1504 clrldi(modval, modval, (size == 1) ? 56 : 48); 1505 slw(modval, modval, shift_amount); 1506 xorr(modval, val32, modval); 1507 } 1508 1509 switch (instruction_type) { 1510 case 4: stwcx_(modval, addr_base); break; 1511 case 2: sthcx_(modval, addr_base); break; 1512 case 1: stbcx_(modval, addr_base); break; 1513 default: ShouldNotReachHere(); 1514 } 1515 1516 if (UseStaticBranchPredictionInCompareAndSwapPPC64) { 1517 bne_predict_not_taken(CCR0, retry); // StXcx_ sets CCR0. 1518 } else { 1519 bne( CCR0, retry); // StXcx_ sets CCR0. 1520 } 1521 1522 // l?arx zero-extends, but Java wants byte/short values sign-extended. 1523 if (size == 1) { 1524 extsb(dest_current_value, dest_current_value); 1525 } else if (size == 2) { 1526 extsh(dest_current_value, dest_current_value); 1527 }; 1528 } 1529 1530 // Temps, addr_base and exchange_value are killed if size < 4 and processor does not support respective instructions. 1531 // Only signed types are supported with size < 4. 1532 void MacroAssembler::cmpxchg_loop_body(ConditionRegister flag, Register dest_current_value, 1533 Register compare_value, Register exchange_value, 1534 Register addr_base, Register tmp1, Register tmp2, 1535 Label &retry, Label &failed, bool cmpxchgx_hint, int size) { 1536 // Sub-word instructions are available since Power 8. 1537 // For older processors, instruction_type != size holds, and we 1538 // emulate the sub-word instructions by constructing a 4-byte value 1539 // that leaves the other bytes unchanged. 1540 const int instruction_type = VM_Version::has_lqarx() ? size : 4; 1541 1542 Register shift_amount = noreg, 1543 val32 = dest_current_value, 1544 modval = exchange_value; 1545 1546 if (instruction_type != size) { 1547 assert_different_registers(tmp1, tmp2, dest_current_value, compare_value, exchange_value, addr_base); 1548 shift_amount = tmp1; 1549 val32 = tmp2; 1550 modval = tmp2; 1551 // Need some preperation: Compute shift amount, align address. Note: shorts must be 2 byte aligned. 1552 #ifdef VM_LITTLE_ENDIAN 1553 rldic(shift_amount, addr_base, 3, 64-5); // (dest & 3) * 8; 1554 clrrdi(addr_base, addr_base, 2); 1555 #else 1556 xori(shift_amount, addr_base, (size == 1) ? 3 : 2); 1557 clrrdi(addr_base, addr_base, 2); 1558 rldic(shift_amount, shift_amount, 3, 64-5); // byte: ((3-dest) & 3) * 8; short: ((1-dest/2) & 1) * 16; 1559 #endif 1560 // Transform exchange value such that the replacement can be done by one xor instruction. 1561 xorr(exchange_value, compare_value, exchange_value); 1562 clrldi(exchange_value, exchange_value, (size == 1) ? 56 : 48); 1563 slw(exchange_value, exchange_value, shift_amount); 1564 } 1565 1566 // atomic emulation loop 1567 bind(retry); 1568 1569 switch (instruction_type) { 1570 case 4: lwarx(val32, addr_base, cmpxchgx_hint); break; 1571 case 2: lharx(val32, addr_base, cmpxchgx_hint); break; 1572 case 1: lbarx(val32, addr_base, cmpxchgx_hint); break; 1573 default: ShouldNotReachHere(); 1574 } 1575 1576 if (instruction_type != size) { 1577 srw(dest_current_value, val32, shift_amount); 1578 } 1579 if (size == 1) { 1580 extsb(dest_current_value, dest_current_value); 1581 } else if (size == 2) { 1582 extsh(dest_current_value, dest_current_value); 1583 }; 1584 1585 cmpw(flag, dest_current_value, compare_value); 1586 if (UseStaticBranchPredictionInCompareAndSwapPPC64) { 1587 bne_predict_not_taken(flag, failed); 1588 } else { 1589 bne( flag, failed); 1590 } 1591 // branch to done => (flag == ne), (dest_current_value != compare_value) 1592 // fall through => (flag == eq), (dest_current_value == compare_value) 1593 1594 if (instruction_type != size) { 1595 xorr(modval, val32, exchange_value); 1596 } 1597 1598 switch (instruction_type) { 1599 case 4: stwcx_(modval, addr_base); break; 1600 case 2: sthcx_(modval, addr_base); break; 1601 case 1: stbcx_(modval, addr_base); break; 1602 default: ShouldNotReachHere(); 1603 } 1604 } 1605 1606 // CmpxchgX sets condition register to cmpX(current, compare). 1607 void MacroAssembler::cmpxchg_generic(ConditionRegister flag, Register dest_current_value, 1608 Register compare_value, Register exchange_value, 1609 Register addr_base, Register tmp1, Register tmp2, 1610 int semantics, bool cmpxchgx_hint, 1611 Register int_flag_success, bool contention_hint, bool weak, int size) { 1612 Label retry; 1613 Label failed; 1614 Label done; 1615 1616 // Save one branch if result is returned via register and 1617 // result register is different from the other ones. 1618 bool use_result_reg = (int_flag_success != noreg); 1619 bool preset_result_reg = (int_flag_success != dest_current_value && int_flag_success != compare_value && 1620 int_flag_success != exchange_value && int_flag_success != addr_base && 1621 int_flag_success != tmp1 && int_flag_success != tmp2); 1622 assert(!weak || flag == CCR0, "weak only supported with CCR0"); 1623 assert(size == 1 || size == 2 || size == 4, "unsupported"); 1624 1625 if (use_result_reg && preset_result_reg) { 1626 li(int_flag_success, 0); // preset (assume cas failed) 1627 } 1628 1629 // Add simple guard in order to reduce risk of starving under high contention (recommended by IBM). 1630 if (contention_hint) { // Don't try to reserve if cmp fails. 1631 switch (size) { 1632 case 1: lbz(dest_current_value, 0, addr_base); extsb(dest_current_value, dest_current_value); break; 1633 case 2: lha(dest_current_value, 0, addr_base); break; 1634 case 4: lwz(dest_current_value, 0, addr_base); break; 1635 default: ShouldNotReachHere(); 1636 } 1637 cmpw(flag, dest_current_value, compare_value); 1638 bne(flag, failed); 1639 } 1640 1641 // release/fence semantics 1642 if (semantics & MemBarRel) { 1643 release(); 1644 } 1645 1646 cmpxchg_loop_body(flag, dest_current_value, compare_value, exchange_value, addr_base, tmp1, tmp2, 1647 retry, failed, cmpxchgx_hint, size); 1648 if (!weak || use_result_reg) { 1649 if (UseStaticBranchPredictionInCompareAndSwapPPC64) { 1650 bne_predict_not_taken(CCR0, weak ? failed : retry); // StXcx_ sets CCR0. 1651 } else { 1652 bne( CCR0, weak ? failed : retry); // StXcx_ sets CCR0. 1653 } 1654 } 1655 // fall through => (flag == eq), (dest_current_value == compare_value), (swapped) 1656 1657 // Result in register (must do this at the end because int_flag_success can be the 1658 // same register as one above). 1659 if (use_result_reg) { 1660 li(int_flag_success, 1); 1661 } 1662 1663 if (semantics & MemBarFenceAfter) { 1664 fence(); 1665 } else if (semantics & MemBarAcq) { 1666 isync(); 1667 } 1668 1669 if (use_result_reg && !preset_result_reg) { 1670 b(done); 1671 } 1672 1673 bind(failed); 1674 if (use_result_reg && !preset_result_reg) { 1675 li(int_flag_success, 0); 1676 } 1677 1678 bind(done); 1679 // (flag == ne) => (dest_current_value != compare_value), (!swapped) 1680 // (flag == eq) => (dest_current_value == compare_value), ( swapped) 1681 } 1682 1683 // Preforms atomic compare exchange: 1684 // if (compare_value == *addr_base) 1685 // *addr_base = exchange_value 1686 // int_flag_success = 1; 1687 // else 1688 // int_flag_success = 0; 1689 // 1690 // ConditionRegister flag = cmp(compare_value, *addr_base) 1691 // Register dest_current_value = *addr_base 1692 // Register compare_value Used to compare with value in memory 1693 // Register exchange_value Written to memory if compare_value == *addr_base 1694 // Register addr_base The memory location to compareXChange 1695 // Register int_flag_success Set to 1 if exchange_value was written to *addr_base 1696 // 1697 // To avoid the costly compare exchange the value is tested beforehand. 1698 // Several special cases exist to avoid that unnecessary information is generated. 1699 // 1700 void MacroAssembler::cmpxchgd(ConditionRegister flag, 1701 Register dest_current_value, RegisterOrConstant compare_value, Register exchange_value, 1702 Register addr_base, int semantics, bool cmpxchgx_hint, 1703 Register int_flag_success, Label* failed_ext, bool contention_hint, bool weak) { 1704 Label retry; 1705 Label failed_int; 1706 Label& failed = (failed_ext != NULL) ? *failed_ext : failed_int; 1707 Label done; 1708 1709 // Save one branch if result is returned via register and result register is different from the other ones. 1710 bool use_result_reg = (int_flag_success!=noreg); 1711 bool preset_result_reg = (int_flag_success!=dest_current_value && int_flag_success!=compare_value.register_or_noreg() && 1712 int_flag_success!=exchange_value && int_flag_success!=addr_base); 1713 assert(!weak || flag == CCR0, "weak only supported with CCR0"); 1714 assert(int_flag_success == noreg || failed_ext == NULL, "cannot have both"); 1715 1716 if (use_result_reg && preset_result_reg) { 1717 li(int_flag_success, 0); // preset (assume cas failed) 1718 } 1719 1720 // Add simple guard in order to reduce risk of starving under high contention (recommended by IBM). 1721 if (contention_hint) { // Don't try to reserve if cmp fails. 1722 ld(dest_current_value, 0, addr_base); 1723 cmpd(flag, compare_value, dest_current_value); 1724 bne(flag, failed); 1725 } 1726 1727 // release/fence semantics 1728 if (semantics & MemBarRel) { 1729 release(); 1730 } 1731 1732 // atomic emulation loop 1733 bind(retry); 1734 1735 ldarx(dest_current_value, addr_base, cmpxchgx_hint); 1736 cmpd(flag, compare_value, dest_current_value); 1737 if (UseStaticBranchPredictionInCompareAndSwapPPC64) { 1738 bne_predict_not_taken(flag, failed); 1739 } else { 1740 bne( flag, failed); 1741 } 1742 1743 stdcx_(exchange_value, addr_base); 1744 if (!weak || use_result_reg || failed_ext) { 1745 if (UseStaticBranchPredictionInCompareAndSwapPPC64) { 1746 bne_predict_not_taken(CCR0, weak ? failed : retry); // stXcx_ sets CCR0 1747 } else { 1748 bne( CCR0, weak ? failed : retry); // stXcx_ sets CCR0 1749 } 1750 } 1751 1752 // result in register (must do this at the end because int_flag_success can be the same register as one above) 1753 if (use_result_reg) { 1754 li(int_flag_success, 1); 1755 } 1756 1757 if (semantics & MemBarFenceAfter) { 1758 fence(); 1759 } else if (semantics & MemBarAcq) { 1760 isync(); 1761 } 1762 1763 if (use_result_reg && !preset_result_reg) { 1764 b(done); 1765 } 1766 1767 bind(failed_int); 1768 if (use_result_reg && !preset_result_reg) { 1769 li(int_flag_success, 0); 1770 } 1771 1772 bind(done); 1773 // (flag == ne) => (dest_current_value != compare_value), (!swapped) 1774 // (flag == eq) => (dest_current_value == compare_value), ( swapped) 1775 } 1776 1777 // Look up the method for a megamorphic invokeinterface call. 1778 // The target method is determined by <intf_klass, itable_index>. 1779 // The receiver klass is in recv_klass. 1780 // On success, the result will be in method_result, and execution falls through. 1781 // On failure, execution transfers to the given label. 1782 void MacroAssembler::lookup_interface_method(Register recv_klass, 1783 Register intf_klass, 1784 RegisterOrConstant itable_index, 1785 Register method_result, 1786 Register scan_temp, 1787 Register temp2, 1788 Label& L_no_such_interface, 1789 bool return_method) { 1790 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp); 1791 1792 // Compute start of first itableOffsetEntry (which is at the end of the vtable). 1793 int vtable_base = in_bytes(Klass::vtable_start_offset()); 1794 int itentry_off = itableMethodEntry::method_offset_in_bytes(); 1795 int logMEsize = exact_log2(itableMethodEntry::size() * wordSize); 1796 int scan_step = itableOffsetEntry::size() * wordSize; 1797 int log_vte_size= exact_log2(vtableEntry::size_in_bytes()); 1798 1799 lwz(scan_temp, in_bytes(Klass::vtable_length_offset()), recv_klass); 1800 // %%% We should store the aligned, prescaled offset in the klassoop. 1801 // Then the next several instructions would fold away. 1802 1803 sldi(scan_temp, scan_temp, log_vte_size); 1804 addi(scan_temp, scan_temp, vtable_base); 1805 add(scan_temp, recv_klass, scan_temp); 1806 1807 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 1808 if (return_method) { 1809 if (itable_index.is_register()) { 1810 Register itable_offset = itable_index.as_register(); 1811 sldi(method_result, itable_offset, logMEsize); 1812 if (itentry_off) { addi(method_result, method_result, itentry_off); } 1813 add(method_result, method_result, recv_klass); 1814 } else { 1815 long itable_offset = (long)itable_index.as_constant(); 1816 // static address, no relocation 1817 add_const_optimized(method_result, recv_klass, (itable_offset << logMEsize) + itentry_off, temp2); 1818 } 1819 } 1820 1821 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 1822 // if (scan->interface() == intf) { 1823 // result = (klass + scan->offset() + itable_index); 1824 // } 1825 // } 1826 Label search, found_method; 1827 1828 for (int peel = 1; peel >= 0; peel--) { 1829 // %%%% Could load both offset and interface in one ldx, if they were 1830 // in the opposite order. This would save a load. 1831 ld(temp2, itableOffsetEntry::interface_offset_in_bytes(), scan_temp); 1832 1833 // Check that this entry is non-null. A null entry means that 1834 // the receiver class doesn't implement the interface, and wasn't the 1835 // same as when the caller was compiled. 1836 cmpd(CCR0, temp2, intf_klass); 1837 1838 if (peel) { 1839 beq(CCR0, found_method); 1840 } else { 1841 bne(CCR0, search); 1842 // (invert the test to fall through to found_method...) 1843 } 1844 1845 if (!peel) break; 1846 1847 bind(search); 1848 1849 cmpdi(CCR0, temp2, 0); 1850 beq(CCR0, L_no_such_interface); 1851 addi(scan_temp, scan_temp, scan_step); 1852 } 1853 1854 bind(found_method); 1855 1856 // Got a hit. 1857 if (return_method) { 1858 int ito_offset = itableOffsetEntry::offset_offset_in_bytes(); 1859 lwz(scan_temp, ito_offset, scan_temp); 1860 ldx(method_result, scan_temp, method_result); 1861 } 1862 } 1863 1864 // virtual method calling 1865 void MacroAssembler::lookup_virtual_method(Register recv_klass, 1866 RegisterOrConstant vtable_index, 1867 Register method_result) { 1868 1869 assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg()); 1870 1871 const int base = in_bytes(Klass::vtable_start_offset()); 1872 assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 1873 1874 if (vtable_index.is_register()) { 1875 sldi(vtable_index.as_register(), vtable_index.as_register(), LogBytesPerWord); 1876 add(recv_klass, vtable_index.as_register(), recv_klass); 1877 } else { 1878 addi(recv_klass, recv_klass, vtable_index.as_constant() << LogBytesPerWord); 1879 } 1880 ld(R19_method, base + vtableEntry::method_offset_in_bytes(), recv_klass); 1881 } 1882 1883 /////////////////////////////////////////// subtype checking //////////////////////////////////////////// 1884 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 1885 Register super_klass, 1886 Register temp1_reg, 1887 Register temp2_reg, 1888 Label* L_success, 1889 Label* L_failure, 1890 Label* L_slow_path, 1891 RegisterOrConstant super_check_offset) { 1892 1893 const Register check_cache_offset = temp1_reg; 1894 const Register cached_super = temp2_reg; 1895 1896 assert_different_registers(sub_klass, super_klass, check_cache_offset, cached_super); 1897 1898 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1899 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1900 1901 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 1902 bool need_slow_path = (must_load_sco || super_check_offset.constant_or_zero() == sco_offset); 1903 1904 Label L_fallthrough; 1905 int label_nulls = 0; 1906 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 1907 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 1908 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 1909 assert(label_nulls <= 1 || 1910 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), 1911 "at most one NULL in the batch, usually"); 1912 1913 // If the pointers are equal, we are done (e.g., String[] elements). 1914 // This self-check enables sharing of secondary supertype arrays among 1915 // non-primary types such as array-of-interface. Otherwise, each such 1916 // type would need its own customized SSA. 1917 // We move this check to the front of the fast path because many 1918 // type checks are in fact trivially successful in this manner, 1919 // so we get a nicely predicted branch right at the start of the check. 1920 cmpd(CCR0, sub_klass, super_klass); 1921 beq(CCR0, *L_success); 1922 1923 // Check the supertype display: 1924 if (must_load_sco) { 1925 // The super check offset is always positive... 1926 lwz(check_cache_offset, sco_offset, super_klass); 1927 super_check_offset = RegisterOrConstant(check_cache_offset); 1928 // super_check_offset is register. 1929 assert_different_registers(sub_klass, super_klass, cached_super, super_check_offset.as_register()); 1930 } 1931 // The loaded value is the offset from KlassOopDesc. 1932 1933 ld(cached_super, super_check_offset, sub_klass); 1934 cmpd(CCR0, cached_super, super_klass); 1935 1936 // This check has worked decisively for primary supers. 1937 // Secondary supers are sought in the super_cache ('super_cache_addr'). 1938 // (Secondary supers are interfaces and very deeply nested subtypes.) 1939 // This works in the same check above because of a tricky aliasing 1940 // between the super_cache and the primary super display elements. 1941 // (The 'super_check_addr' can address either, as the case requires.) 1942 // Note that the cache is updated below if it does not help us find 1943 // what we need immediately. 1944 // So if it was a primary super, we can just fail immediately. 1945 // Otherwise, it's the slow path for us (no success at this point). 1946 1947 #define FINAL_JUMP(label) if (&(label) != &L_fallthrough) { b(label); } 1948 1949 if (super_check_offset.is_register()) { 1950 beq(CCR0, *L_success); 1951 cmpwi(CCR0, super_check_offset.as_register(), sc_offset); 1952 if (L_failure == &L_fallthrough) { 1953 beq(CCR0, *L_slow_path); 1954 } else { 1955 bne(CCR0, *L_failure); 1956 FINAL_JUMP(*L_slow_path); 1957 } 1958 } else { 1959 if (super_check_offset.as_constant() == sc_offset) { 1960 // Need a slow path; fast failure is impossible. 1961 if (L_slow_path == &L_fallthrough) { 1962 beq(CCR0, *L_success); 1963 } else { 1964 bne(CCR0, *L_slow_path); 1965 FINAL_JUMP(*L_success); 1966 } 1967 } else { 1968 // No slow path; it's a fast decision. 1969 if (L_failure == &L_fallthrough) { 1970 beq(CCR0, *L_success); 1971 } else { 1972 bne(CCR0, *L_failure); 1973 FINAL_JUMP(*L_success); 1974 } 1975 } 1976 } 1977 1978 bind(L_fallthrough); 1979 #undef FINAL_JUMP 1980 } 1981 1982 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 1983 Register super_klass, 1984 Register temp1_reg, 1985 Register temp2_reg, 1986 Label* L_success, 1987 Register result_reg) { 1988 const Register array_ptr = temp1_reg; // current value from cache array 1989 const Register temp = temp2_reg; 1990 1991 assert_different_registers(sub_klass, super_klass, array_ptr, temp); 1992 1993 int source_offset = in_bytes(Klass::secondary_supers_offset()); 1994 int target_offset = in_bytes(Klass::secondary_super_cache_offset()); 1995 1996 int length_offset = Array<Klass*>::length_offset_in_bytes(); 1997 int base_offset = Array<Klass*>::base_offset_in_bytes(); 1998 1999 Label hit, loop, failure, fallthru; 2000 2001 ld(array_ptr, source_offset, sub_klass); 2002 2003 // TODO: PPC port: assert(4 == arrayOopDesc::length_length_in_bytes(), "precondition violated."); 2004 lwz(temp, length_offset, array_ptr); 2005 cmpwi(CCR0, temp, 0); 2006 beq(CCR0, result_reg!=noreg ? failure : fallthru); // length 0 2007 2008 mtctr(temp); // load ctr 2009 2010 bind(loop); 2011 // Oops in table are NO MORE compressed. 2012 ld(temp, base_offset, array_ptr); 2013 cmpd(CCR0, temp, super_klass); 2014 beq(CCR0, hit); 2015 addi(array_ptr, array_ptr, BytesPerWord); 2016 bdnz(loop); 2017 2018 bind(failure); 2019 if (result_reg!=noreg) li(result_reg, 1); // load non-zero result (indicates a miss) 2020 b(fallthru); 2021 2022 bind(hit); 2023 std(super_klass, target_offset, sub_klass); // save result to cache 2024 if (result_reg != noreg) { li(result_reg, 0); } // load zero result (indicates a hit) 2025 if (L_success != NULL) { b(*L_success); } 2026 else if (result_reg == noreg) { blr(); } // return with CR0.eq if neither label nor result reg provided 2027 2028 bind(fallthru); 2029 } 2030 2031 // Try fast path, then go to slow one if not successful 2032 void MacroAssembler::check_klass_subtype(Register sub_klass, 2033 Register super_klass, 2034 Register temp1_reg, 2035 Register temp2_reg, 2036 Label& L_success) { 2037 Label L_failure; 2038 check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg, temp2_reg, &L_success, &L_failure); 2039 check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, temp2_reg, &L_success); 2040 bind(L_failure); // Fallthru if not successful. 2041 } 2042 2043 void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg, 2044 Register temp_reg, 2045 Label& wrong_method_type) { 2046 assert_different_registers(mtype_reg, mh_reg, temp_reg); 2047 // Compare method type against that of the receiver. 2048 load_heap_oop(temp_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg), mh_reg, 2049 noreg, noreg, false, IS_NOT_NULL); 2050 cmpd(CCR0, temp_reg, mtype_reg); 2051 bne(CCR0, wrong_method_type); 2052 } 2053 2054 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot, 2055 Register temp_reg, 2056 int extra_slot_offset) { 2057 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 2058 int stackElementSize = Interpreter::stackElementSize; 2059 int offset = extra_slot_offset * stackElementSize; 2060 if (arg_slot.is_constant()) { 2061 offset += arg_slot.as_constant() * stackElementSize; 2062 return offset; 2063 } else { 2064 assert(temp_reg != noreg, "must specify"); 2065 sldi(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize)); 2066 if (offset != 0) 2067 addi(temp_reg, temp_reg, offset); 2068 return temp_reg; 2069 } 2070 } 2071 2072 // Supports temp2_reg = R0. 2073 void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj_reg, 2074 Register mark_reg, Register temp_reg, 2075 Register temp2_reg, Label& done, Label* slow_case) { 2076 assert(UseBiasedLocking, "why call this otherwise?"); 2077 2078 #ifdef ASSERT 2079 assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg); 2080 #endif 2081 2082 Label cas_label; 2083 2084 // Branch to done if fast path fails and no slow_case provided. 2085 Label *slow_case_int = (slow_case != NULL) ? slow_case : &done; 2086 2087 // Biased locking 2088 // See whether the lock is currently biased toward our thread and 2089 // whether the epoch is still valid 2090 // Note that the runtime guarantees sufficient alignment of JavaThread 2091 // pointers to allow age to be placed into low bits 2092 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, 2093 "biased locking makes assumptions about bit layout"); 2094 2095 if (PrintBiasedLockingStatistics) { 2096 load_const(temp2_reg, (address) BiasedLocking::total_entry_count_addr(), temp_reg); 2097 lwzx(temp_reg, temp2_reg); 2098 addi(temp_reg, temp_reg, 1); 2099 stwx(temp_reg, temp2_reg); 2100 } 2101 2102 andi(temp_reg, mark_reg, markOopDesc::biased_lock_mask_in_place); 2103 cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern); 2104 bne(cr_reg, cas_label); 2105 2106 load_klass(temp_reg, obj_reg); 2107 2108 load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place)); 2109 ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg); 2110 orr(temp_reg, R16_thread, temp_reg); 2111 xorr(temp_reg, mark_reg, temp_reg); 2112 andr(temp_reg, temp_reg, temp2_reg); 2113 cmpdi(cr_reg, temp_reg, 0); 2114 if (PrintBiasedLockingStatistics) { 2115 Label l; 2116 bne(cr_reg, l); 2117 load_const(temp2_reg, (address) BiasedLocking::biased_lock_entry_count_addr()); 2118 lwzx(mark_reg, temp2_reg); 2119 addi(mark_reg, mark_reg, 1); 2120 stwx(mark_reg, temp2_reg); 2121 // restore mark_reg 2122 ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg); 2123 bind(l); 2124 } 2125 beq(cr_reg, done); 2126 2127 Label try_revoke_bias; 2128 Label try_rebias; 2129 2130 // At this point we know that the header has the bias pattern and 2131 // that we are not the bias owner in the current epoch. We need to 2132 // figure out more details about the state of the header in order to 2133 // know what operations can be legally performed on the object's 2134 // header. 2135 2136 // If the low three bits in the xor result aren't clear, that means 2137 // the prototype header is no longer biased and we have to revoke 2138 // the bias on this object. 2139 andi(temp2_reg, temp_reg, markOopDesc::biased_lock_mask_in_place); 2140 cmpwi(cr_reg, temp2_reg, 0); 2141 bne(cr_reg, try_revoke_bias); 2142 2143 // Biasing is still enabled for this data type. See whether the 2144 // epoch of the current bias is still valid, meaning that the epoch 2145 // bits of the mark word are equal to the epoch bits of the 2146 // prototype header. (Note that the prototype header's epoch bits 2147 // only change at a safepoint.) If not, attempt to rebias the object 2148 // toward the current thread. Note that we must be absolutely sure 2149 // that the current epoch is invalid in order to do this because 2150 // otherwise the manipulations it performs on the mark word are 2151 // illegal. 2152 2153 int shift_amount = 64 - markOopDesc::epoch_shift; 2154 // rotate epoch bits to right (little) end and set other bits to 0 2155 // [ big part | epoch | little part ] -> [ 0..0 | epoch ] 2156 rldicl_(temp2_reg, temp_reg, shift_amount, 64 - markOopDesc::epoch_bits); 2157 // branch if epoch bits are != 0, i.e. they differ, because the epoch has been incremented 2158 bne(CCR0, try_rebias); 2159 2160 // The epoch of the current bias is still valid but we know nothing 2161 // about the owner; it might be set or it might be clear. Try to 2162 // acquire the bias of the object using an atomic operation. If this 2163 // fails we will go in to the runtime to revoke the object's bias. 2164 // Note that we first construct the presumed unbiased header so we 2165 // don't accidentally blow away another thread's valid bias. 2166 andi(mark_reg, mark_reg, (markOopDesc::biased_lock_mask_in_place | 2167 markOopDesc::age_mask_in_place | 2168 markOopDesc::epoch_mask_in_place)); 2169 orr(temp_reg, R16_thread, mark_reg); 2170 2171 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 2172 2173 // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg). 2174 cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg, 2175 /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg, 2176 /*where=*/obj_reg, 2177 MacroAssembler::MemBarAcq, 2178 MacroAssembler::cmpxchgx_hint_acquire_lock(), 2179 noreg, slow_case_int); // bail out if failed 2180 2181 // If the biasing toward our thread failed, this means that 2182 // another thread succeeded in biasing it toward itself and we 2183 // need to revoke that bias. The revocation will occur in the 2184 // interpreter runtime in the slow case. 2185 if (PrintBiasedLockingStatistics) { 2186 load_const(temp2_reg, (address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), temp_reg); 2187 lwzx(temp_reg, temp2_reg); 2188 addi(temp_reg, temp_reg, 1); 2189 stwx(temp_reg, temp2_reg); 2190 } 2191 b(done); 2192 2193 bind(try_rebias); 2194 // At this point we know the epoch has expired, meaning that the 2195 // current "bias owner", if any, is actually invalid. Under these 2196 // circumstances _only_, we are allowed to use the current header's 2197 // value as the comparison value when doing the cas to acquire the 2198 // bias in the current epoch. In other words, we allow transfer of 2199 // the bias from one thread to another directly in this situation. 2200 load_klass(temp_reg, obj_reg); 2201 andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place); 2202 orr(temp2_reg, R16_thread, temp2_reg); 2203 ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg); 2204 orr(temp_reg, temp2_reg, temp_reg); 2205 2206 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 2207 2208 cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg, 2209 /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg, 2210 /*where=*/obj_reg, 2211 MacroAssembler::MemBarAcq, 2212 MacroAssembler::cmpxchgx_hint_acquire_lock(), 2213 noreg, slow_case_int); // bail out if failed 2214 2215 // If the biasing toward our thread failed, this means that 2216 // another thread succeeded in biasing it toward itself and we 2217 // need to revoke that bias. The revocation will occur in the 2218 // interpreter runtime in the slow case. 2219 if (PrintBiasedLockingStatistics) { 2220 load_const(temp2_reg, (address) BiasedLocking::rebiased_lock_entry_count_addr(), temp_reg); 2221 lwzx(temp_reg, temp2_reg); 2222 addi(temp_reg, temp_reg, 1); 2223 stwx(temp_reg, temp2_reg); 2224 } 2225 b(done); 2226 2227 bind(try_revoke_bias); 2228 // The prototype mark in the klass doesn't have the bias bit set any 2229 // more, indicating that objects of this data type are not supposed 2230 // to be biased any more. We are going to try to reset the mark of 2231 // this object to the prototype value and fall through to the 2232 // CAS-based locking scheme. Note that if our CAS fails, it means 2233 // that another thread raced us for the privilege of revoking the 2234 // bias of this particular object, so it's okay to continue in the 2235 // normal locking code. 2236 load_klass(temp_reg, obj_reg); 2237 ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg); 2238 andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place); 2239 orr(temp_reg, temp_reg, temp2_reg); 2240 2241 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 2242 2243 // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg). 2244 cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg, 2245 /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg, 2246 /*where=*/obj_reg, 2247 MacroAssembler::MemBarAcq, 2248 MacroAssembler::cmpxchgx_hint_acquire_lock()); 2249 2250 // reload markOop in mark_reg before continuing with lightweight locking 2251 ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg); 2252 2253 // Fall through to the normal CAS-based lock, because no matter what 2254 // the result of the above CAS, some thread must have succeeded in 2255 // removing the bias bit from the object's header. 2256 if (PrintBiasedLockingStatistics) { 2257 Label l; 2258 bne(cr_reg, l); 2259 load_const(temp2_reg, (address) BiasedLocking::revoked_lock_entry_count_addr(), temp_reg); 2260 lwzx(temp_reg, temp2_reg); 2261 addi(temp_reg, temp_reg, 1); 2262 stwx(temp_reg, temp2_reg); 2263 bind(l); 2264 } 2265 2266 bind(cas_label); 2267 } 2268 2269 void MacroAssembler::biased_locking_exit (ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done) { 2270 // Check for biased locking unlock case, which is a no-op 2271 // Note: we do not have to check the thread ID for two reasons. 2272 // First, the interpreter checks for IllegalMonitorStateException at 2273 // a higher level. Second, if the bias was revoked while we held the 2274 // lock, the object could not be rebiased toward another thread, so 2275 // the bias bit would be clear. 2276 2277 ld(temp_reg, 0, mark_addr); 2278 andi(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place); 2279 2280 cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern); 2281 beq(cr_reg, done); 2282 } 2283 2284 // allocation (for C1) 2285 void MacroAssembler::eden_allocate( 2286 Register obj, // result: pointer to object after successful allocation 2287 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 2288 int con_size_in_bytes, // object size in bytes if known at compile time 2289 Register t1, // temp register 2290 Register t2, // temp register 2291 Label& slow_case // continuation point if fast allocation fails 2292 ) { 2293 b(slow_case); 2294 } 2295 2296 void MacroAssembler::tlab_allocate( 2297 Register obj, // result: pointer to object after successful allocation 2298 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 2299 int con_size_in_bytes, // object size in bytes if known at compile time 2300 Register t1, // temp register 2301 Label& slow_case // continuation point if fast allocation fails 2302 ) { 2303 // make sure arguments make sense 2304 assert_different_registers(obj, var_size_in_bytes, t1); 2305 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size"); 2306 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); 2307 2308 const Register new_top = t1; 2309 //verify_tlab(); not implemented 2310 2311 ld(obj, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 2312 ld(R0, in_bytes(JavaThread::tlab_end_offset()), R16_thread); 2313 if (var_size_in_bytes == noreg) { 2314 addi(new_top, obj, con_size_in_bytes); 2315 } else { 2316 add(new_top, obj, var_size_in_bytes); 2317 } 2318 cmpld(CCR0, new_top, R0); 2319 bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CCR0, Assembler::greater), slow_case); 2320 2321 #ifdef ASSERT 2322 // make sure new free pointer is properly aligned 2323 { 2324 Label L; 2325 andi_(R0, new_top, MinObjAlignmentInBytesMask); 2326 beq(CCR0, L); 2327 stop("updated TLAB free is not properly aligned", 0x934); 2328 bind(L); 2329 } 2330 #endif // ASSERT 2331 2332 // update the tlab top pointer 2333 std(new_top, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 2334 //verify_tlab(); not implemented 2335 } 2336 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, Register t1, Register t2) { 2337 unimplemented("incr_allocated_bytes"); 2338 } 2339 2340 address MacroAssembler::emit_trampoline_stub(int destination_toc_offset, 2341 int insts_call_instruction_offset, Register Rtoc) { 2342 // Start the stub. 2343 address stub = start_a_stub(64); 2344 if (stub == NULL) { return NULL; } // CodeCache full: bail out 2345 2346 // Create a trampoline stub relocation which relates this trampoline stub 2347 // with the call instruction at insts_call_instruction_offset in the 2348 // instructions code-section. 2349 relocate(trampoline_stub_Relocation::spec(code()->insts()->start() + insts_call_instruction_offset)); 2350 const int stub_start_offset = offset(); 2351 2352 // For java_to_interp stubs we use R11_scratch1 as scratch register 2353 // and in call trampoline stubs we use R12_scratch2. This way we 2354 // can distinguish them (see is_NativeCallTrampolineStub_at()). 2355 Register reg_scratch = R12_scratch2; 2356 2357 // Now, create the trampoline stub's code: 2358 // - load the TOC 2359 // - load the call target from the constant pool 2360 // - call 2361 if (Rtoc == noreg) { 2362 calculate_address_from_global_toc(reg_scratch, method_toc()); 2363 Rtoc = reg_scratch; 2364 } 2365 2366 ld_largeoffset_unchecked(reg_scratch, destination_toc_offset, Rtoc, false); 2367 mtctr(reg_scratch); 2368 bctr(); 2369 2370 const address stub_start_addr = addr_at(stub_start_offset); 2371 2372 // Assert that the encoded destination_toc_offset can be identified and that it is correct. 2373 assert(destination_toc_offset == NativeCallTrampolineStub_at(stub_start_addr)->destination_toc_offset(), 2374 "encoded offset into the constant pool must match"); 2375 // Trampoline_stub_size should be good. 2376 assert((uint)(offset() - stub_start_offset) <= trampoline_stub_size, "should be good size"); 2377 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline"); 2378 2379 // End the stub. 2380 end_a_stub(); 2381 return stub; 2382 } 2383 2384 // TM on PPC64. 2385 void MacroAssembler::atomic_inc_ptr(Register addr, Register result, int simm16) { 2386 Label retry; 2387 bind(retry); 2388 ldarx(result, addr, /*hint*/ false); 2389 addi(result, result, simm16); 2390 stdcx_(result, addr); 2391 if (UseStaticBranchPredictionInCompareAndSwapPPC64) { 2392 bne_predict_not_taken(CCR0, retry); // stXcx_ sets CCR0 2393 } else { 2394 bne( CCR0, retry); // stXcx_ sets CCR0 2395 } 2396 } 2397 2398 void MacroAssembler::atomic_ori_int(Register addr, Register result, int uimm16) { 2399 Label retry; 2400 bind(retry); 2401 lwarx(result, addr, /*hint*/ false); 2402 ori(result, result, uimm16); 2403 stwcx_(result, addr); 2404 if (UseStaticBranchPredictionInCompareAndSwapPPC64) { 2405 bne_predict_not_taken(CCR0, retry); // stXcx_ sets CCR0 2406 } else { 2407 bne( CCR0, retry); // stXcx_ sets CCR0 2408 } 2409 } 2410 2411 #if INCLUDE_RTM_OPT 2412 2413 // Update rtm_counters based on abort status 2414 // input: abort_status 2415 // rtm_counters_Reg (RTMLockingCounters*) 2416 void MacroAssembler::rtm_counters_update(Register abort_status, Register rtm_counters_Reg) { 2417 // Mapping to keep PreciseRTMLockingStatistics similar to x86. 2418 // x86 ppc (! means inverted, ? means not the same) 2419 // 0 31 Set if abort caused by XABORT instruction. 2420 // 1 ! 7 If set, the transaction may succeed on a retry. This bit is always clear if bit 0 is set. 2421 // 2 13 Set if another logical processor conflicted with a memory address that was part of the transaction that aborted. 2422 // 3 10 Set if an internal buffer overflowed. 2423 // 4 ?12 Set if a debug breakpoint was hit. 2424 // 5 ?32 Set if an abort occurred during execution of a nested transaction. 2425 const int failure_bit[] = {tm_tabort, // Signal handler will set this too. 2426 tm_failure_persistent, 2427 tm_non_trans_cf, 2428 tm_trans_cf, 2429 tm_footprint_of, 2430 tm_failure_code, 2431 tm_transaction_level}; 2432 2433 const int num_failure_bits = sizeof(failure_bit) / sizeof(int); 2434 const int num_counters = RTMLockingCounters::ABORT_STATUS_LIMIT; 2435 2436 const int bit2counter_map[][num_counters] = 2437 // 0 = no map; 1 = mapped, no inverted logic; -1 = mapped, inverted logic 2438 // Inverted logic means that if a bit is set don't count it, or vice-versa. 2439 // Care must be taken when mapping bits to counters as bits for a given 2440 // counter must be mutually exclusive. Otherwise, counter will be increment 2441 // more than once. 2442 // counters: 2443 // 0 1 2 3 4 5 2444 // abort , persist, conflict, overflow, debug , nested bits: 2445 {{ 1 , 0 , 0 , 0 , 0 , 0 }, // abort 2446 { 0 , -1 , 0 , 0 , 0 , 0 }, // failure_persistent 2447 { 0 , 0 , 1 , 0 , 0 , 0 }, // non_trans_cf 2448 { 0 , 0 , 1 , 0 , 0 , 0 }, // trans_cf 2449 { 0 , 0 , 0 , 1 , 0 , 0 }, // footprint_of 2450 { 0 , 0 , 0 , 0 , -1 , 0 }, // failure_code = 0xD4 2451 { 0 , 0 , 0 , 0 , 0 , 1 }}; // transaction_level > 1 2452 // ... 2453 2454 // Move abort_status value to R0 and use abort_status register as a 2455 // temporary register because R0 as third operand in ld/std is treated 2456 // as base address zero (value). Likewise, R0 as second operand in addi 2457 // is problematic because it amounts to li. 2458 const Register temp_Reg = abort_status; 2459 const Register abort_status_R0 = R0; 2460 mr(abort_status_R0, abort_status); 2461 2462 // Increment total abort counter. 2463 int counters_offs = RTMLockingCounters::abort_count_offset(); 2464 ld(temp_Reg, counters_offs, rtm_counters_Reg); 2465 addi(temp_Reg, temp_Reg, 1); 2466 std(temp_Reg, counters_offs, rtm_counters_Reg); 2467 2468 // Increment specific abort counters. 2469 if (PrintPreciseRTMLockingStatistics) { 2470 2471 // #0 counter offset. 2472 int abortX_offs; 2473 abortX_offs = RTMLockingCounters::abortX_count_offset(); 2474 2475 for (int nbit = 0; nbit < num_failure_bits; nbit++) { 2476 for (int ncounter = 0; ncounter < num_counters; ncounter++) { 2477 if (bit2counter_map[nbit][ncounter] != 0) { 2478 Label check_abort; 2479 int abort_counter_offs; 2480 2481 abort_counter_offs = abortX_offs + (ncounter << 3); 2482 2483 if (failure_bit[nbit] == tm_transaction_level) { 2484 // Don't check outer transaction, TL = 1 (bit 63). Hence only 2485 // 11 bits in the TL field are checked to find out if failure 2486 // occured in a nested transaction. This check also matches 2487 // the case when nesting_of = 1 (nesting overflow). 2488 rldicr_(temp_Reg, abort_status_R0, failure_bit[nbit], 10); 2489 } else if (failure_bit[nbit] == tm_failure_code) { 2490 // Check failure code for trap or illegal caught in TM. 2491 // Bits 0:7 are tested as bit 7 (persistent) is copied from 2492 // tabort or treclaim source operand. 2493 // On Linux: trap or illegal is TM_CAUSE_SIGNAL (0xD4). 2494 rldicl(temp_Reg, abort_status_R0, 8, 56); 2495 cmpdi(CCR0, temp_Reg, 0xD4); 2496 } else { 2497 rldicr_(temp_Reg, abort_status_R0, failure_bit[nbit], 0); 2498 } 2499 2500 if (bit2counter_map[nbit][ncounter] == 1) { 2501 beq(CCR0, check_abort); 2502 } else { 2503 bne(CCR0, check_abort); 2504 } 2505 2506 // We don't increment atomically. 2507 ld(temp_Reg, abort_counter_offs, rtm_counters_Reg); 2508 addi(temp_Reg, temp_Reg, 1); 2509 std(temp_Reg, abort_counter_offs, rtm_counters_Reg); 2510 2511 bind(check_abort); 2512 } 2513 } 2514 } 2515 } 2516 // Restore abort_status. 2517 mr(abort_status, abort_status_R0); 2518 } 2519 2520 // Branch if (random & (count-1) != 0), count is 2^n 2521 // tmp and CR0 are killed 2522 void MacroAssembler::branch_on_random_using_tb(Register tmp, int count, Label& brLabel) { 2523 mftb(tmp); 2524 andi_(tmp, tmp, count-1); 2525 bne(CCR0, brLabel); 2526 } 2527 2528 // Perform abort ratio calculation, set no_rtm bit if high ratio. 2529 // input: rtm_counters_Reg (RTMLockingCounters* address) - KILLED 2530 void MacroAssembler::rtm_abort_ratio_calculation(Register rtm_counters_Reg, 2531 RTMLockingCounters* rtm_counters, 2532 Metadata* method_data) { 2533 Label L_done, L_check_always_rtm1, L_check_always_rtm2; 2534 2535 if (RTMLockingCalculationDelay > 0) { 2536 // Delay calculation. 2537 ld(rtm_counters_Reg, (RegisterOrConstant)(intptr_t)RTMLockingCounters::rtm_calculation_flag_addr()); 2538 cmpdi(CCR0, rtm_counters_Reg, 0); 2539 beq(CCR0, L_done); 2540 load_const_optimized(rtm_counters_Reg, (address)rtm_counters, R0); // reload 2541 } 2542 // Abort ratio calculation only if abort_count > RTMAbortThreshold. 2543 // Aborted transactions = abort_count * 100 2544 // All transactions = total_count * RTMTotalCountIncrRate 2545 // Set no_rtm bit if (Aborted transactions >= All transactions * RTMAbortRatio) 2546 ld(R0, RTMLockingCounters::abort_count_offset(), rtm_counters_Reg); 2547 if (is_simm(RTMAbortThreshold, 16)) { // cmpdi can handle 16bit immediate only. 2548 cmpdi(CCR0, R0, RTMAbortThreshold); 2549 blt(CCR0, L_check_always_rtm2); // reload of rtm_counters_Reg not necessary 2550 } else { 2551 load_const_optimized(rtm_counters_Reg, RTMAbortThreshold); 2552 cmpd(CCR0, R0, rtm_counters_Reg); 2553 blt(CCR0, L_check_always_rtm1); // reload of rtm_counters_Reg required 2554 } 2555 mulli(R0, R0, 100); 2556 2557 const Register tmpReg = rtm_counters_Reg; 2558 ld(tmpReg, RTMLockingCounters::total_count_offset(), rtm_counters_Reg); 2559 mulli(tmpReg, tmpReg, RTMTotalCountIncrRate); // allowable range: int16 2560 mulli(tmpReg, tmpReg, RTMAbortRatio); // allowable range: int16 2561 cmpd(CCR0, R0, tmpReg); 2562 blt(CCR0, L_check_always_rtm1); // jump to reload 2563 if (method_data != NULL) { 2564 // Set rtm_state to "no rtm" in MDO. 2565 // Not using a metadata relocation. Method and Class Loader are kept alive anyway. 2566 // (See nmethod::metadata_do and CodeBuffer::finalize_oop_references.) 2567 load_const(R0, (address)method_data + MethodData::rtm_state_offset_in_bytes(), tmpReg); 2568 atomic_ori_int(R0, tmpReg, NoRTM); 2569 } 2570 b(L_done); 2571 2572 bind(L_check_always_rtm1); 2573 load_const_optimized(rtm_counters_Reg, (address)rtm_counters, R0); // reload 2574 bind(L_check_always_rtm2); 2575 ld(tmpReg, RTMLockingCounters::total_count_offset(), rtm_counters_Reg); 2576 int64_t thresholdValue = RTMLockingThreshold / RTMTotalCountIncrRate; 2577 if (is_simm(thresholdValue, 16)) { // cmpdi can handle 16bit immediate only. 2578 cmpdi(CCR0, tmpReg, thresholdValue); 2579 } else { 2580 load_const_optimized(R0, thresholdValue); 2581 cmpd(CCR0, tmpReg, R0); 2582 } 2583 blt(CCR0, L_done); 2584 if (method_data != NULL) { 2585 // Set rtm_state to "always rtm" in MDO. 2586 // Not using a metadata relocation. See above. 2587 load_const(R0, (address)method_data + MethodData::rtm_state_offset_in_bytes(), tmpReg); 2588 atomic_ori_int(R0, tmpReg, UseRTM); 2589 } 2590 bind(L_done); 2591 } 2592 2593 // Update counters and perform abort ratio calculation. 2594 // input: abort_status_Reg 2595 void MacroAssembler::rtm_profiling(Register abort_status_Reg, Register temp_Reg, 2596 RTMLockingCounters* rtm_counters, 2597 Metadata* method_data, 2598 bool profile_rtm) { 2599 2600 assert(rtm_counters != NULL, "should not be NULL when profiling RTM"); 2601 // Update rtm counters based on state at abort. 2602 // Reads abort_status_Reg, updates flags. 2603 assert_different_registers(abort_status_Reg, temp_Reg); 2604 load_const_optimized(temp_Reg, (address)rtm_counters, R0); 2605 rtm_counters_update(abort_status_Reg, temp_Reg); 2606 if (profile_rtm) { 2607 assert(rtm_counters != NULL, "should not be NULL when profiling RTM"); 2608 rtm_abort_ratio_calculation(temp_Reg, rtm_counters, method_data); 2609 } 2610 } 2611 2612 // Retry on abort if abort's status indicates non-persistent failure. 2613 // inputs: retry_count_Reg 2614 // : abort_status_Reg 2615 // output: retry_count_Reg decremented by 1 2616 void MacroAssembler::rtm_retry_lock_on_abort(Register retry_count_Reg, Register abort_status_Reg, 2617 Label& retryLabel, Label* checkRetry) { 2618 Label doneRetry; 2619 rldicr_(R0, abort_status_Reg, tm_failure_persistent, 0); 2620 bne(CCR0, doneRetry); 2621 if (checkRetry) { bind(*checkRetry); } 2622 addic_(retry_count_Reg, retry_count_Reg, -1); 2623 blt(CCR0, doneRetry); 2624 b(retryLabel); 2625 bind(doneRetry); 2626 } 2627 2628 // Spin and retry if lock is busy. 2629 // inputs: owner_addr_Reg (monitor address) 2630 // : retry_count_Reg 2631 // output: retry_count_Reg decremented by 1 2632 // CTR is killed 2633 void MacroAssembler::rtm_retry_lock_on_busy(Register retry_count_Reg, Register owner_addr_Reg, Label& retryLabel) { 2634 Label SpinLoop, doneRetry, doRetry; 2635 addic_(retry_count_Reg, retry_count_Reg, -1); 2636 blt(CCR0, doneRetry); 2637 2638 if (RTMSpinLoopCount > 1) { 2639 li(R0, RTMSpinLoopCount); 2640 mtctr(R0); 2641 } 2642 2643 // low thread priority 2644 smt_prio_low(); 2645 bind(SpinLoop); 2646 2647 if (RTMSpinLoopCount > 1) { 2648 bdz(doRetry); 2649 ld(R0, 0, owner_addr_Reg); 2650 cmpdi(CCR0, R0, 0); 2651 bne(CCR0, SpinLoop); 2652 } 2653 2654 bind(doRetry); 2655 2656 // restore thread priority to default in userspace 2657 #ifdef LINUX 2658 smt_prio_medium_low(); 2659 #else 2660 smt_prio_medium(); 2661 #endif 2662 2663 b(retryLabel); 2664 2665 bind(doneRetry); 2666 } 2667 2668 // Use RTM for normal stack locks. 2669 // Input: objReg (object to lock) 2670 void MacroAssembler::rtm_stack_locking(ConditionRegister flag, 2671 Register obj, Register mark_word, Register tmp, 2672 Register retry_on_abort_count_Reg, 2673 RTMLockingCounters* stack_rtm_counters, 2674 Metadata* method_data, bool profile_rtm, 2675 Label& DONE_LABEL, Label& IsInflated) { 2676 assert(UseRTMForStackLocks, "why call this otherwise?"); 2677 assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking"); 2678 Label L_rtm_retry, L_decrement_retry, L_on_abort; 2679 2680 if (RTMRetryCount > 0) { 2681 load_const_optimized(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort 2682 bind(L_rtm_retry); 2683 } 2684 andi_(R0, mark_word, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased 2685 bne(CCR0, IsInflated); 2686 2687 if (PrintPreciseRTMLockingStatistics || profile_rtm) { 2688 Label L_noincrement; 2689 if (RTMTotalCountIncrRate > 1) { 2690 branch_on_random_using_tb(tmp, RTMTotalCountIncrRate, L_noincrement); 2691 } 2692 assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM"); 2693 load_const_optimized(tmp, (address)stack_rtm_counters->total_count_addr(), R0); 2694 //atomic_inc_ptr(tmp, /*temp, will be reloaded*/mark_word); We don't increment atomically 2695 ldx(mark_word, tmp); 2696 addi(mark_word, mark_word, 1); 2697 stdx(mark_word, tmp); 2698 bind(L_noincrement); 2699 } 2700 tbegin_(); 2701 beq(CCR0, L_on_abort); 2702 ld(mark_word, oopDesc::mark_offset_in_bytes(), obj); // Reload in transaction, conflicts need to be tracked. 2703 andi(R0, mark_word, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits 2704 cmpwi(flag, R0, markOopDesc::unlocked_value); // bits = 001 unlocked 2705 beq(flag, DONE_LABEL); // all done if unlocked 2706 2707 if (UseRTMXendForLockBusy) { 2708 tend_(); 2709 b(L_decrement_retry); 2710 } else { 2711 tabort_(); 2712 } 2713 bind(L_on_abort); 2714 const Register abort_status_Reg = tmp; 2715 mftexasr(abort_status_Reg); 2716 if (PrintPreciseRTMLockingStatistics || profile_rtm) { 2717 rtm_profiling(abort_status_Reg, /*temp*/mark_word, stack_rtm_counters, method_data, profile_rtm); 2718 } 2719 ld(mark_word, oopDesc::mark_offset_in_bytes(), obj); // reload 2720 if (RTMRetryCount > 0) { 2721 // Retry on lock abort if abort status is not permanent. 2722 rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry, &L_decrement_retry); 2723 } else { 2724 bind(L_decrement_retry); 2725 } 2726 } 2727 2728 // Use RTM for inflating locks 2729 // inputs: obj (object to lock) 2730 // mark_word (current header - KILLED) 2731 // boxReg (on-stack box address (displaced header location) - KILLED) 2732 void MacroAssembler::rtm_inflated_locking(ConditionRegister flag, 2733 Register obj, Register mark_word, Register boxReg, 2734 Register retry_on_busy_count_Reg, Register retry_on_abort_count_Reg, 2735 RTMLockingCounters* rtm_counters, 2736 Metadata* method_data, bool profile_rtm, 2737 Label& DONE_LABEL) { 2738 assert(UseRTMLocking, "why call this otherwise?"); 2739 Label L_rtm_retry, L_decrement_retry, L_on_abort; 2740 // Clean monitor_value bit to get valid pointer. 2741 int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value; 2742 2743 // Store non-null, using boxReg instead of (intptr_t)markOopDesc::unused_mark(). 2744 std(boxReg, BasicLock::displaced_header_offset_in_bytes(), boxReg); 2745 const Register tmpReg = boxReg; 2746 const Register owner_addr_Reg = mark_word; 2747 addi(owner_addr_Reg, mark_word, owner_offset); 2748 2749 if (RTMRetryCount > 0) { 2750 load_const_optimized(retry_on_busy_count_Reg, RTMRetryCount); // Retry on lock busy. 2751 load_const_optimized(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort. 2752 bind(L_rtm_retry); 2753 } 2754 if (PrintPreciseRTMLockingStatistics || profile_rtm) { 2755 Label L_noincrement; 2756 if (RTMTotalCountIncrRate > 1) { 2757 branch_on_random_using_tb(R0, RTMTotalCountIncrRate, L_noincrement); 2758 } 2759 assert(rtm_counters != NULL, "should not be NULL when profiling RTM"); 2760 load_const(R0, (address)rtm_counters->total_count_addr(), tmpReg); 2761 //atomic_inc_ptr(R0, tmpReg); We don't increment atomically 2762 ldx(tmpReg, R0); 2763 addi(tmpReg, tmpReg, 1); 2764 stdx(tmpReg, R0); 2765 bind(L_noincrement); 2766 } 2767 tbegin_(); 2768 beq(CCR0, L_on_abort); 2769 // We don't reload mark word. Will only be reset at safepoint. 2770 ld(R0, 0, owner_addr_Reg); // Load in transaction, conflicts need to be tracked. 2771 cmpdi(flag, R0, 0); 2772 beq(flag, DONE_LABEL); 2773 2774 if (UseRTMXendForLockBusy) { 2775 tend_(); 2776 b(L_decrement_retry); 2777 } else { 2778 tabort_(); 2779 } 2780 bind(L_on_abort); 2781 const Register abort_status_Reg = tmpReg; 2782 mftexasr(abort_status_Reg); 2783 if (PrintPreciseRTMLockingStatistics || profile_rtm) { 2784 rtm_profiling(abort_status_Reg, /*temp*/ owner_addr_Reg, rtm_counters, method_data, profile_rtm); 2785 // Restore owner_addr_Reg 2786 ld(mark_word, oopDesc::mark_offset_in_bytes(), obj); 2787 #ifdef ASSERT 2788 andi_(R0, mark_word, markOopDesc::monitor_value); 2789 asm_assert_ne("must be inflated", 0xa754); // Deflating only allowed at safepoint. 2790 #endif 2791 addi(owner_addr_Reg, mark_word, owner_offset); 2792 } 2793 if (RTMRetryCount > 0) { 2794 // Retry on lock abort if abort status is not permanent. 2795 rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry); 2796 } 2797 2798 // Appears unlocked - try to swing _owner from null to non-null. 2799 cmpxchgd(flag, /*current val*/ R0, (intptr_t)0, /*new val*/ R16_thread, owner_addr_Reg, 2800 MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq, 2801 MacroAssembler::cmpxchgx_hint_acquire_lock(), noreg, &L_decrement_retry, true); 2802 2803 if (RTMRetryCount > 0) { 2804 // success done else retry 2805 b(DONE_LABEL); 2806 bind(L_decrement_retry); 2807 // Spin and retry if lock is busy. 2808 rtm_retry_lock_on_busy(retry_on_busy_count_Reg, owner_addr_Reg, L_rtm_retry); 2809 } else { 2810 bind(L_decrement_retry); 2811 } 2812 } 2813 2814 #endif // INCLUDE_RTM_OPT 2815 2816 // "The box" is the space on the stack where we copy the object mark. 2817 void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box, 2818 Register temp, Register displaced_header, Register current_header, 2819 bool try_bias, 2820 RTMLockingCounters* rtm_counters, 2821 RTMLockingCounters* stack_rtm_counters, 2822 Metadata* method_data, 2823 bool use_rtm, bool profile_rtm) { 2824 assert_different_registers(oop, box, temp, displaced_header, current_header); 2825 assert(flag != CCR0, "bad condition register"); 2826 Label cont; 2827 Label object_has_monitor; 2828 Label cas_failed; 2829 2830 // Load markOop from object into displaced_header. 2831 ld(displaced_header, oopDesc::mark_offset_in_bytes(), oop); 2832 2833 2834 // Always do locking in runtime. 2835 if (EmitSync & 0x01) { 2836 cmpdi(flag, oop, 0); // Oop can't be 0 here => always false. 2837 return; 2838 } 2839 2840 if (try_bias) { 2841 biased_locking_enter(flag, oop, displaced_header, temp, current_header, cont); 2842 } 2843 2844 #if INCLUDE_RTM_OPT 2845 if (UseRTMForStackLocks && use_rtm) { 2846 rtm_stack_locking(flag, oop, displaced_header, temp, /*temp*/ current_header, 2847 stack_rtm_counters, method_data, profile_rtm, 2848 cont, object_has_monitor); 2849 } 2850 #endif // INCLUDE_RTM_OPT 2851 2852 // Handle existing monitor. 2853 if ((EmitSync & 0x02) == 0) { 2854 // The object has an existing monitor iff (mark & monitor_value) != 0. 2855 andi_(temp, displaced_header, markOopDesc::monitor_value); 2856 bne(CCR0, object_has_monitor); 2857 } 2858 2859 // Set displaced_header to be (markOop of object | UNLOCK_VALUE). 2860 ori(displaced_header, displaced_header, markOopDesc::unlocked_value); 2861 2862 // Load Compare Value application register. 2863 2864 // Initialize the box. (Must happen before we update the object mark!) 2865 std(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box); 2866 2867 // Must fence, otherwise, preceding store(s) may float below cmpxchg. 2868 // Compare object markOop with mark and if equal exchange scratch1 with object markOop. 2869 cmpxchgd(/*flag=*/flag, 2870 /*current_value=*/current_header, 2871 /*compare_value=*/displaced_header, 2872 /*exchange_value=*/box, 2873 /*where=*/oop, 2874 MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq, 2875 MacroAssembler::cmpxchgx_hint_acquire_lock(), 2876 noreg, 2877 &cas_failed, 2878 /*check without membar and ldarx first*/true); 2879 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 2880 2881 // If the compare-and-exchange succeeded, then we found an unlocked 2882 // object and we have now locked it. 2883 b(cont); 2884 2885 bind(cas_failed); 2886 // We did not see an unlocked object so try the fast recursive case. 2887 2888 // Check if the owner is self by comparing the value in the markOop of object 2889 // (current_header) with the stack pointer. 2890 sub(current_header, current_header, R1_SP); 2891 load_const_optimized(temp, ~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place); 2892 2893 and_(R0/*==0?*/, current_header, temp); 2894 // If condition is true we are cont and hence we can store 0 as the 2895 // displaced header in the box, which indicates that it is a recursive lock. 2896 mcrf(flag,CCR0); 2897 std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), box); 2898 2899 // Handle existing monitor. 2900 if ((EmitSync & 0x02) == 0) { 2901 b(cont); 2902 2903 bind(object_has_monitor); 2904 // The object's monitor m is unlocked iff m->owner == NULL, 2905 // otherwise m->owner may contain a thread or a stack address. 2906 2907 #if INCLUDE_RTM_OPT 2908 // Use the same RTM locking code in 32- and 64-bit VM. 2909 if (use_rtm) { 2910 rtm_inflated_locking(flag, oop, displaced_header, box, temp, /*temp*/ current_header, 2911 rtm_counters, method_data, profile_rtm, cont); 2912 } else { 2913 #endif // INCLUDE_RTM_OPT 2914 2915 // Try to CAS m->owner from NULL to current thread. 2916 addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value); 2917 cmpxchgd(/*flag=*/flag, 2918 /*current_value=*/current_header, 2919 /*compare_value=*/(intptr_t)0, 2920 /*exchange_value=*/R16_thread, 2921 /*where=*/temp, 2922 MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq, 2923 MacroAssembler::cmpxchgx_hint_acquire_lock()); 2924 2925 // Store a non-null value into the box. 2926 std(box, BasicLock::displaced_header_offset_in_bytes(), box); 2927 2928 # ifdef ASSERT 2929 bne(flag, cont); 2930 // We have acquired the monitor, check some invariants. 2931 addi(/*monitor=*/temp, temp, -ObjectMonitor::owner_offset_in_bytes()); 2932 // Invariant 1: _recursions should be 0. 2933 //assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size"); 2934 asm_assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), temp, 2935 "monitor->_recursions should be 0", -1); 2936 # endif 2937 2938 #if INCLUDE_RTM_OPT 2939 } // use_rtm() 2940 #endif 2941 } 2942 2943 bind(cont); 2944 // flag == EQ indicates success 2945 // flag == NE indicates failure 2946 } 2947 2948 void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box, 2949 Register temp, Register displaced_header, Register current_header, 2950 bool try_bias, bool use_rtm) { 2951 assert_different_registers(oop, box, temp, displaced_header, current_header); 2952 assert(flag != CCR0, "bad condition register"); 2953 Label cont; 2954 Label object_has_monitor; 2955 2956 // Always do locking in runtime. 2957 if (EmitSync & 0x01) { 2958 cmpdi(flag, oop, 0); // Oop can't be 0 here => always false. 2959 return; 2960 } 2961 2962 if (try_bias) { 2963 biased_locking_exit(flag, oop, current_header, cont); 2964 } 2965 2966 #if INCLUDE_RTM_OPT 2967 if (UseRTMForStackLocks && use_rtm) { 2968 assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking"); 2969 Label L_regular_unlock; 2970 ld(current_header, oopDesc::mark_offset_in_bytes(), oop); // fetch markword 2971 andi(R0, current_header, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits 2972 cmpwi(flag, R0, markOopDesc::unlocked_value); // bits = 001 unlocked 2973 bne(flag, L_regular_unlock); // else RegularLock 2974 tend_(); // otherwise end... 2975 b(cont); // ... and we're done 2976 bind(L_regular_unlock); 2977 } 2978 #endif 2979 2980 // Find the lock address and load the displaced header from the stack. 2981 ld(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box); 2982 2983 // If the displaced header is 0, we have a recursive unlock. 2984 cmpdi(flag, displaced_header, 0); 2985 beq(flag, cont); 2986 2987 // Handle existing monitor. 2988 if ((EmitSync & 0x02) == 0) { 2989 // The object has an existing monitor iff (mark & monitor_value) != 0. 2990 RTM_OPT_ONLY( if (!(UseRTMForStackLocks && use_rtm)) ) // skip load if already done 2991 ld(current_header, oopDesc::mark_offset_in_bytes(), oop); 2992 andi_(R0, current_header, markOopDesc::monitor_value); 2993 bne(CCR0, object_has_monitor); 2994 } 2995 2996 // Check if it is still a light weight lock, this is is true if we see 2997 // the stack address of the basicLock in the markOop of the object. 2998 // Cmpxchg sets flag to cmpd(current_header, box). 2999 cmpxchgd(/*flag=*/flag, 3000 /*current_value=*/current_header, 3001 /*compare_value=*/box, 3002 /*exchange_value=*/displaced_header, 3003 /*where=*/oop, 3004 MacroAssembler::MemBarRel, 3005 MacroAssembler::cmpxchgx_hint_release_lock(), 3006 noreg, 3007 &cont); 3008 3009 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 3010 3011 // Handle existing monitor. 3012 if ((EmitSync & 0x02) == 0) { 3013 b(cont); 3014 3015 bind(object_has_monitor); 3016 addi(current_header, current_header, -markOopDesc::monitor_value); // monitor 3017 ld(temp, ObjectMonitor::owner_offset_in_bytes(), current_header); 3018 3019 // It's inflated. 3020 #if INCLUDE_RTM_OPT 3021 if (use_rtm) { 3022 Label L_regular_inflated_unlock; 3023 // Clean monitor_value bit to get valid pointer 3024 cmpdi(flag, temp, 0); 3025 bne(flag, L_regular_inflated_unlock); 3026 tend_(); 3027 b(cont); 3028 bind(L_regular_inflated_unlock); 3029 } 3030 #endif 3031 3032 ld(displaced_header, ObjectMonitor::recursions_offset_in_bytes(), current_header); 3033 xorr(temp, R16_thread, temp); // Will be 0 if we are the owner. 3034 orr(temp, temp, displaced_header); // Will be 0 if there are 0 recursions. 3035 cmpdi(flag, temp, 0); 3036 bne(flag, cont); 3037 3038 ld(temp, ObjectMonitor::EntryList_offset_in_bytes(), current_header); 3039 ld(displaced_header, ObjectMonitor::cxq_offset_in_bytes(), current_header); 3040 orr(temp, temp, displaced_header); // Will be 0 if both are 0. 3041 cmpdi(flag, temp, 0); 3042 bne(flag, cont); 3043 release(); 3044 std(temp, ObjectMonitor::owner_offset_in_bytes(), current_header); 3045 } 3046 3047 bind(cont); 3048 // flag == EQ indicates success 3049 // flag == NE indicates failure 3050 } 3051 3052 // Write serialization page so VM thread can do a pseudo remote membar. 3053 // We use the current thread pointer to calculate a thread specific 3054 // offset to write to within the page. This minimizes bus traffic 3055 // due to cache line collision. 3056 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { 3057 srdi(tmp2, thread, os::get_serialize_page_shift_count()); 3058 3059 int mask = os::vm_page_size() - sizeof(int); 3060 if (Assembler::is_simm(mask, 16)) { 3061 andi(tmp2, tmp2, mask); 3062 } else { 3063 lis(tmp1, (int)((signed short) (mask >> 16))); 3064 ori(tmp1, tmp1, mask & 0x0000ffff); 3065 andr(tmp2, tmp2, tmp1); 3066 } 3067 3068 load_const(tmp1, (long) os::get_memory_serialize_page()); 3069 release(); 3070 stwx(R0, tmp1, tmp2); 3071 } 3072 3073 void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) { 3074 if (SafepointMechanism::uses_thread_local_poll()) { 3075 ld(temp_reg, in_bytes(Thread::polling_page_offset()), R16_thread); 3076 // Armed page has poll_bit set. 3077 andi_(temp_reg, temp_reg, SafepointMechanism::poll_bit()); 3078 } else { 3079 lwz(temp_reg, (RegisterOrConstant)(intptr_t)SafepointSynchronize::address_of_state()); 3080 cmpwi(CCR0, temp_reg, SafepointSynchronize::_not_synchronized); 3081 } 3082 bne(CCR0, slow_path); 3083 } 3084 3085 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2, bool needs_frame) { 3086 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 3087 bs->resolve_jobject(this, value, tmp1, tmp2, needs_frame); 3088 } 3089 3090 // Values for last_Java_pc, and last_Java_sp must comply to the rules 3091 // in frame_ppc.hpp. 3092 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc) { 3093 // Always set last_Java_pc and flags first because once last_Java_sp 3094 // is visible has_last_Java_frame is true and users will look at the 3095 // rest of the fields. (Note: flags should always be zero before we 3096 // get here so doesn't need to be set.) 3097 3098 // Verify that last_Java_pc was zeroed on return to Java 3099 asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()), R16_thread, 3100 "last_Java_pc not zeroed before leaving Java", 0x200); 3101 3102 // When returning from calling out from Java mode the frame anchor's 3103 // last_Java_pc will always be set to NULL. It is set here so that 3104 // if we are doing a call to native (not VM) that we capture the 3105 // known pc and don't have to rely on the native call having a 3106 // standard frame linkage where we can find the pc. 3107 if (last_Java_pc != noreg) 3108 std(last_Java_pc, in_bytes(JavaThread::last_Java_pc_offset()), R16_thread); 3109 3110 // Set last_Java_sp last. 3111 std(last_Java_sp, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread); 3112 } 3113 3114 void MacroAssembler::reset_last_Java_frame(void) { 3115 asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()), 3116 R16_thread, "SP was not set, still zero", 0x202); 3117 3118 BLOCK_COMMENT("reset_last_Java_frame {"); 3119 li(R0, 0); 3120 3121 // _last_Java_sp = 0 3122 std(R0, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread); 3123 3124 // _last_Java_pc = 0 3125 std(R0, in_bytes(JavaThread::last_Java_pc_offset()), R16_thread); 3126 BLOCK_COMMENT("} reset_last_Java_frame"); 3127 } 3128 3129 void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1) { 3130 assert_different_registers(sp, tmp1); 3131 3132 // sp points to a TOP_IJAVA_FRAME, retrieve frame's PC via 3133 // TOP_IJAVA_FRAME_ABI. 3134 // FIXME: assert that we really have a TOP_IJAVA_FRAME here! 3135 address entry = pc(); 3136 load_const_optimized(tmp1, entry); 3137 3138 set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1); 3139 } 3140 3141 void MacroAssembler::get_vm_result(Register oop_result) { 3142 // Read: 3143 // R16_thread 3144 // R16_thread->in_bytes(JavaThread::vm_result_offset()) 3145 // 3146 // Updated: 3147 // oop_result 3148 // R16_thread->in_bytes(JavaThread::vm_result_offset()) 3149 3150 verify_thread(); 3151 3152 ld(oop_result, in_bytes(JavaThread::vm_result_offset()), R16_thread); 3153 li(R0, 0); 3154 std(R0, in_bytes(JavaThread::vm_result_offset()), R16_thread); 3155 3156 verify_oop(oop_result); 3157 } 3158 3159 void MacroAssembler::get_vm_result_2(Register metadata_result) { 3160 // Read: 3161 // R16_thread 3162 // R16_thread->in_bytes(JavaThread::vm_result_2_offset()) 3163 // 3164 // Updated: 3165 // metadata_result 3166 // R16_thread->in_bytes(JavaThread::vm_result_2_offset()) 3167 3168 ld(metadata_result, in_bytes(JavaThread::vm_result_2_offset()), R16_thread); 3169 li(R0, 0); 3170 std(R0, in_bytes(JavaThread::vm_result_2_offset()), R16_thread); 3171 } 3172 3173 Register MacroAssembler::encode_klass_not_null(Register dst, Register src) { 3174 Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. 3175 if (Universe::narrow_klass_base() != 0) { 3176 // Use dst as temp if it is free. 3177 sub_const_optimized(dst, current, Universe::narrow_klass_base(), R0); 3178 current = dst; 3179 } 3180 if (Universe::narrow_klass_shift() != 0) { 3181 srdi(dst, current, Universe::narrow_klass_shift()); 3182 current = dst; 3183 } 3184 return current; 3185 } 3186 3187 void MacroAssembler::store_klass(Register dst_oop, Register klass, Register ck) { 3188 if (UseCompressedClassPointers) { 3189 Register compressedKlass = encode_klass_not_null(ck, klass); 3190 stw(compressedKlass, oopDesc::klass_offset_in_bytes(), dst_oop); 3191 } else { 3192 std(klass, oopDesc::klass_offset_in_bytes(), dst_oop); 3193 } 3194 } 3195 3196 void MacroAssembler::store_klass_gap(Register dst_oop, Register val) { 3197 if (UseCompressedClassPointers) { 3198 if (val == noreg) { 3199 val = R0; 3200 li(val, 0); 3201 } 3202 stw(val, oopDesc::klass_gap_offset_in_bytes(), dst_oop); // klass gap if compressed 3203 } 3204 } 3205 3206 int MacroAssembler::instr_size_for_decode_klass_not_null() { 3207 if (!UseCompressedClassPointers) return 0; 3208 int num_instrs = 1; // shift or move 3209 if (Universe::narrow_klass_base() != 0) num_instrs = 7; // shift + load const + add 3210 return num_instrs * BytesPerInstWord; 3211 } 3212 3213 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 3214 assert(dst != R0, "Dst reg may not be R0, as R0 is used here."); 3215 if (src == noreg) src = dst; 3216 Register shifted_src = src; 3217 if (Universe::narrow_klass_shift() != 0 || 3218 Universe::narrow_klass_base() == 0 && src != dst) { // Move required. 3219 shifted_src = dst; 3220 sldi(shifted_src, src, Universe::narrow_klass_shift()); 3221 } 3222 if (Universe::narrow_klass_base() != 0) { 3223 add_const_optimized(dst, shifted_src, Universe::narrow_klass_base(), R0); 3224 } 3225 } 3226 3227 void MacroAssembler::load_klass(Register dst, Register src) { 3228 if (UseCompressedClassPointers) { 3229 lwz(dst, oopDesc::klass_offset_in_bytes(), src); 3230 // Attention: no null check here! 3231 decode_klass_not_null(dst, dst); 3232 } else { 3233 ld(dst, oopDesc::klass_offset_in_bytes(), src); 3234 } 3235 } 3236 3237 // ((OopHandle)result).resolve(); 3238 void MacroAssembler::resolve_oop_handle(Register result) { 3239 // OopHandle::resolve is an indirection. 3240 ld(result, 0, result); 3241 } 3242 3243 void MacroAssembler::load_mirror_from_const_method(Register mirror, Register const_method) { 3244 ld(mirror, in_bytes(ConstMethod::constants_offset()), const_method); 3245 ld(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror); 3246 ld(mirror, in_bytes(Klass::java_mirror_offset()), mirror); 3247 resolve_oop_handle(mirror); 3248 } 3249 3250 // Clear Array 3251 // For very short arrays. tmp == R0 is allowed. 3252 void MacroAssembler::clear_memory_unrolled(Register base_ptr, int cnt_dwords, Register tmp, int offset) { 3253 if (cnt_dwords > 0) { li(tmp, 0); } 3254 for (int i = 0; i < cnt_dwords; ++i) { std(tmp, offset + i * 8, base_ptr); } 3255 } 3256 3257 // Version for constant short array length. Kills base_ptr. tmp == R0 is allowed. 3258 void MacroAssembler::clear_memory_constlen(Register base_ptr, int cnt_dwords, Register tmp) { 3259 if (cnt_dwords < 8) { 3260 clear_memory_unrolled(base_ptr, cnt_dwords, tmp); 3261 return; 3262 } 3263 3264 Label loop; 3265 const long loopcnt = cnt_dwords >> 1, 3266 remainder = cnt_dwords & 1; 3267 3268 li(tmp, loopcnt); 3269 mtctr(tmp); 3270 li(tmp, 0); 3271 bind(loop); 3272 std(tmp, 0, base_ptr); 3273 std(tmp, 8, base_ptr); 3274 addi(base_ptr, base_ptr, 16); 3275 bdnz(loop); 3276 if (remainder) { std(tmp, 0, base_ptr); } 3277 } 3278 3279 // Kills both input registers. tmp == R0 is allowed. 3280 void MacroAssembler::clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp, long const_cnt) { 3281 // Procedure for large arrays (uses data cache block zero instruction). 3282 Label startloop, fast, fastloop, small_rest, restloop, done; 3283 const int cl_size = VM_Version::L1_data_cache_line_size(), 3284 cl_dwords = cl_size >> 3, 3285 cl_dw_addr_bits = exact_log2(cl_dwords), 3286 dcbz_min = 1, // Min count of dcbz executions, needs to be >0. 3287 min_cnt = ((dcbz_min + 1) << cl_dw_addr_bits) - 1; 3288 3289 if (const_cnt >= 0) { 3290 // Constant case. 3291 if (const_cnt < min_cnt) { 3292 clear_memory_constlen(base_ptr, const_cnt, tmp); 3293 return; 3294 } 3295 load_const_optimized(cnt_dwords, const_cnt, tmp); 3296 } else { 3297 // cnt_dwords already loaded in register. Need to check size. 3298 cmpdi(CCR1, cnt_dwords, min_cnt); // Big enough? (ensure >= dcbz_min lines included). 3299 blt(CCR1, small_rest); 3300 } 3301 rldicl_(tmp, base_ptr, 64-3, 64-cl_dw_addr_bits); // Extract dword offset within first cache line. 3302 beq(CCR0, fast); // Already 128byte aligned. 3303 3304 subfic(tmp, tmp, cl_dwords); 3305 mtctr(tmp); // Set ctr to hit 128byte boundary (0<ctr<cl_dwords). 3306 subf(cnt_dwords, tmp, cnt_dwords); // rest. 3307 li(tmp, 0); 3308 3309 bind(startloop); // Clear at the beginning to reach 128byte boundary. 3310 std(tmp, 0, base_ptr); // Clear 8byte aligned block. 3311 addi(base_ptr, base_ptr, 8); 3312 bdnz(startloop); 3313 3314 bind(fast); // Clear 128byte blocks. 3315 srdi(tmp, cnt_dwords, cl_dw_addr_bits); // Loop count for 128byte loop (>0). 3316 andi(cnt_dwords, cnt_dwords, cl_dwords-1); // Rest in dwords. 3317 mtctr(tmp); // Load counter. 3318 3319 bind(fastloop); 3320 dcbz(base_ptr); // Clear 128byte aligned block. 3321 addi(base_ptr, base_ptr, cl_size); 3322 bdnz(fastloop); 3323 3324 bind(small_rest); 3325 cmpdi(CCR0, cnt_dwords, 0); // size 0? 3326 beq(CCR0, done); // rest == 0 3327 li(tmp, 0); 3328 mtctr(cnt_dwords); // Load counter. 3329 3330 bind(restloop); // Clear rest. 3331 std(tmp, 0, base_ptr); // Clear 8byte aligned block. 3332 addi(base_ptr, base_ptr, 8); 3333 bdnz(restloop); 3334 3335 bind(done); 3336 } 3337 3338 /////////////////////////////////////////// String intrinsics //////////////////////////////////////////// 3339 3340 #ifdef COMPILER2 3341 // Intrinsics for CompactStrings 3342 3343 // Compress char[] to byte[] by compressing 16 bytes at once. 3344 void MacroAssembler::string_compress_16(Register src, Register dst, Register cnt, 3345 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, 3346 Label& Lfailure) { 3347 3348 const Register tmp0 = R0; 3349 assert_different_registers(src, dst, cnt, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5); 3350 Label Lloop, Lslow; 3351 3352 // Check if cnt >= 8 (= 16 bytes) 3353 lis(tmp1, 0xFF); // tmp1 = 0x00FF00FF00FF00FF 3354 srwi_(tmp2, cnt, 3); 3355 beq(CCR0, Lslow); 3356 ori(tmp1, tmp1, 0xFF); 3357 rldimi(tmp1, tmp1, 32, 0); 3358 mtctr(tmp2); 3359 3360 // 2x unrolled loop 3361 bind(Lloop); 3362 ld(tmp2, 0, src); // _0_1_2_3 (Big Endian) 3363 ld(tmp4, 8, src); // _4_5_6_7 3364 3365 orr(tmp0, tmp2, tmp4); 3366 rldicl(tmp3, tmp2, 6*8, 64-24); // _____1_2 3367 rldimi(tmp2, tmp2, 2*8, 2*8); // _0_2_3_3 3368 rldicl(tmp5, tmp4, 6*8, 64-24); // _____5_6 3369 rldimi(tmp4, tmp4, 2*8, 2*8); // _4_6_7_7 3370 3371 andc_(tmp0, tmp0, tmp1); 3372 bne(CCR0, Lfailure); // Not latin1. 3373 addi(src, src, 16); 3374 3375 rlwimi(tmp3, tmp2, 0*8, 24, 31);// _____1_3 3376 srdi(tmp2, tmp2, 3*8); // ____0_2_ 3377 rlwimi(tmp5, tmp4, 0*8, 24, 31);// _____5_7 3378 srdi(tmp4, tmp4, 3*8); // ____4_6_ 3379 3380 orr(tmp2, tmp2, tmp3); // ____0123 3381 orr(tmp4, tmp4, tmp5); // ____4567 3382 3383 stw(tmp2, 0, dst); 3384 stw(tmp4, 4, dst); 3385 addi(dst, dst, 8); 3386 bdnz(Lloop); 3387 3388 bind(Lslow); // Fallback to slow version 3389 } 3390 3391 // Compress char[] to byte[]. cnt must be positive int. 3392 void MacroAssembler::string_compress(Register src, Register dst, Register cnt, Register tmp, Label& Lfailure) { 3393 Label Lloop; 3394 mtctr(cnt); 3395 3396 bind(Lloop); 3397 lhz(tmp, 0, src); 3398 cmplwi(CCR0, tmp, 0xff); 3399 bgt(CCR0, Lfailure); // Not latin1. 3400 addi(src, src, 2); 3401 stb(tmp, 0, dst); 3402 addi(dst, dst, 1); 3403 bdnz(Lloop); 3404 } 3405 3406 // Inflate byte[] to char[] by inflating 16 bytes at once. 3407 void MacroAssembler::string_inflate_16(Register src, Register dst, Register cnt, 3408 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) { 3409 const Register tmp0 = R0; 3410 assert_different_registers(src, dst, cnt, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5); 3411 Label Lloop, Lslow; 3412 3413 // Check if cnt >= 8 3414 srwi_(tmp2, cnt, 3); 3415 beq(CCR0, Lslow); 3416 lis(tmp1, 0xFF); // tmp1 = 0x00FF00FF 3417 ori(tmp1, tmp1, 0xFF); 3418 mtctr(tmp2); 3419 3420 // 2x unrolled loop 3421 bind(Lloop); 3422 lwz(tmp2, 0, src); // ____0123 (Big Endian) 3423 lwz(tmp4, 4, src); // ____4567 3424 addi(src, src, 8); 3425 3426 rldicl(tmp3, tmp2, 7*8, 64-8); // _______2 3427 rlwimi(tmp2, tmp2, 3*8, 16, 23);// ____0113 3428 rldicl(tmp5, tmp4, 7*8, 64-8); // _______6 3429 rlwimi(tmp4, tmp4, 3*8, 16, 23);// ____4557 3430 3431 andc(tmp0, tmp2, tmp1); // ____0_1_ 3432 rlwimi(tmp2, tmp3, 2*8, 0, 23); // _____2_3 3433 andc(tmp3, tmp4, tmp1); // ____4_5_ 3434 rlwimi(tmp4, tmp5, 2*8, 0, 23); // _____6_7 3435 3436 rldimi(tmp2, tmp0, 3*8, 0*8); // _0_1_2_3 3437 rldimi(tmp4, tmp3, 3*8, 0*8); // _4_5_6_7 3438 3439 std(tmp2, 0, dst); 3440 std(tmp4, 8, dst); 3441 addi(dst, dst, 16); 3442 bdnz(Lloop); 3443 3444 bind(Lslow); // Fallback to slow version 3445 } 3446 3447 // Inflate byte[] to char[]. cnt must be positive int. 3448 void MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp) { 3449 Label Lloop; 3450 mtctr(cnt); 3451 3452 bind(Lloop); 3453 lbz(tmp, 0, src); 3454 addi(src, src, 1); 3455 sth(tmp, 0, dst); 3456 addi(dst, dst, 2); 3457 bdnz(Lloop); 3458 } 3459 3460 void MacroAssembler::string_compare(Register str1, Register str2, 3461 Register cnt1, Register cnt2, 3462 Register tmp1, Register result, int ae) { 3463 const Register tmp0 = R0, 3464 diff = tmp1; 3465 3466 assert_different_registers(str1, str2, cnt1, cnt2, tmp0, tmp1, result); 3467 Label Ldone, Lslow, Lloop, Lreturn_diff; 3468 3469 // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a) 3470 // we interchange str1 and str2 in the UL case and negate the result. 3471 // Like this, str1 is always latin1 encoded, except for the UU case. 3472 // In addition, we need 0 (or sign which is 0) extend. 3473 3474 if (ae == StrIntrinsicNode::UU) { 3475 srwi(cnt1, cnt1, 1); 3476 } else { 3477 clrldi(cnt1, cnt1, 32); 3478 } 3479 3480 if (ae != StrIntrinsicNode::LL) { 3481 srwi(cnt2, cnt2, 1); 3482 } else { 3483 clrldi(cnt2, cnt2, 32); 3484 } 3485 3486 // See if the lengths are different, and calculate min in cnt1. 3487 // Save diff in case we need it for a tie-breaker. 3488 subf_(diff, cnt2, cnt1); // diff = cnt1 - cnt2 3489 // if (diff > 0) { cnt1 = cnt2; } 3490 if (VM_Version::has_isel()) { 3491 isel(cnt1, CCR0, Assembler::greater, /*invert*/ false, cnt2); 3492 } else { 3493 Label Lskip; 3494 blt(CCR0, Lskip); 3495 mr(cnt1, cnt2); 3496 bind(Lskip); 3497 } 3498 3499 // Rename registers 3500 Register chr1 = result; 3501 Register chr2 = tmp0; 3502 3503 // Compare multiple characters in fast loop (only implemented for same encoding). 3504 int stride1 = 8, stride2 = 8; 3505 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 3506 int log2_chars_per_iter = (ae == StrIntrinsicNode::LL) ? 3 : 2; 3507 Label Lfastloop, Lskipfast; 3508 3509 srwi_(tmp0, cnt1, log2_chars_per_iter); 3510 beq(CCR0, Lskipfast); 3511 rldicl(cnt2, cnt1, 0, 64 - log2_chars_per_iter); // Remaining characters. 3512 li(cnt1, 1 << log2_chars_per_iter); // Initialize for failure case: Rescan characters from current iteration. 3513 mtctr(tmp0); 3514 3515 bind(Lfastloop); 3516 ld(chr1, 0, str1); 3517 ld(chr2, 0, str2); 3518 cmpd(CCR0, chr1, chr2); 3519 bne(CCR0, Lslow); 3520 addi(str1, str1, stride1); 3521 addi(str2, str2, stride2); 3522 bdnz(Lfastloop); 3523 mr(cnt1, cnt2); // Remaining characters. 3524 bind(Lskipfast); 3525 } 3526 3527 // Loop which searches the first difference character by character. 3528 cmpwi(CCR0, cnt1, 0); 3529 beq(CCR0, Lreturn_diff); 3530 bind(Lslow); 3531 mtctr(cnt1); 3532 3533 switch (ae) { 3534 case StrIntrinsicNode::LL: stride1 = 1; stride2 = 1; break; 3535 case StrIntrinsicNode::UL: // fallthru (see comment above) 3536 case StrIntrinsicNode::LU: stride1 = 1; stride2 = 2; break; 3537 case StrIntrinsicNode::UU: stride1 = 2; stride2 = 2; break; 3538 default: ShouldNotReachHere(); break; 3539 } 3540 3541 bind(Lloop); 3542 if (stride1 == 1) { lbz(chr1, 0, str1); } else { lhz(chr1, 0, str1); } 3543 if (stride2 == 1) { lbz(chr2, 0, str2); } else { lhz(chr2, 0, str2); } 3544 subf_(result, chr2, chr1); // result = chr1 - chr2 3545 bne(CCR0, Ldone); 3546 addi(str1, str1, stride1); 3547 addi(str2, str2, stride2); 3548 bdnz(Lloop); 3549 3550 // If strings are equal up to min length, return the length difference. 3551 bind(Lreturn_diff); 3552 mr(result, diff); 3553 3554 // Otherwise, return the difference between the first mismatched chars. 3555 bind(Ldone); 3556 if (ae == StrIntrinsicNode::UL) { 3557 neg(result, result); // Negate result (see note above). 3558 } 3559 } 3560 3561 void MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, 3562 Register limit, Register tmp1, Register result, bool is_byte) { 3563 const Register tmp0 = R0; 3564 assert_different_registers(ary1, ary2, limit, tmp0, tmp1, result); 3565 Label Ldone, Lskiploop, Lloop, Lfastloop, Lskipfast; 3566 bool limit_needs_shift = false; 3567 3568 if (is_array_equ) { 3569 const int length_offset = arrayOopDesc::length_offset_in_bytes(); 3570 const int base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR); 3571 3572 // Return true if the same array. 3573 cmpd(CCR0, ary1, ary2); 3574 beq(CCR0, Lskiploop); 3575 3576 // Return false if one of them is NULL. 3577 cmpdi(CCR0, ary1, 0); 3578 cmpdi(CCR1, ary2, 0); 3579 li(result, 0); 3580 cror(CCR0, Assembler::equal, CCR1, Assembler::equal); 3581 beq(CCR0, Ldone); 3582 3583 // Load the lengths of arrays. 3584 lwz(limit, length_offset, ary1); 3585 lwz(tmp0, length_offset, ary2); 3586 3587 // Return false if the two arrays are not equal length. 3588 cmpw(CCR0, limit, tmp0); 3589 bne(CCR0, Ldone); 3590 3591 // Load array addresses. 3592 addi(ary1, ary1, base_offset); 3593 addi(ary2, ary2, base_offset); 3594 } else { 3595 limit_needs_shift = !is_byte; 3596 li(result, 0); // Assume not equal. 3597 } 3598 3599 // Rename registers 3600 Register chr1 = tmp0; 3601 Register chr2 = tmp1; 3602 3603 // Compare 8 bytes per iteration in fast loop. 3604 const int log2_chars_per_iter = is_byte ? 3 : 2; 3605 3606 srwi_(tmp0, limit, log2_chars_per_iter + (limit_needs_shift ? 1 : 0)); 3607 beq(CCR0, Lskipfast); 3608 mtctr(tmp0); 3609 3610 bind(Lfastloop); 3611 ld(chr1, 0, ary1); 3612 ld(chr2, 0, ary2); 3613 addi(ary1, ary1, 8); 3614 addi(ary2, ary2, 8); 3615 cmpd(CCR0, chr1, chr2); 3616 bne(CCR0, Ldone); 3617 bdnz(Lfastloop); 3618 3619 bind(Lskipfast); 3620 rldicl_(limit, limit, limit_needs_shift ? 64 - 1 : 0, 64 - log2_chars_per_iter); // Remaining characters. 3621 beq(CCR0, Lskiploop); 3622 mtctr(limit); 3623 3624 // Character by character. 3625 bind(Lloop); 3626 if (is_byte) { 3627 lbz(chr1, 0, ary1); 3628 lbz(chr2, 0, ary2); 3629 addi(ary1, ary1, 1); 3630 addi(ary2, ary2, 1); 3631 } else { 3632 lhz(chr1, 0, ary1); 3633 lhz(chr2, 0, ary2); 3634 addi(ary1, ary1, 2); 3635 addi(ary2, ary2, 2); 3636 } 3637 cmpw(CCR0, chr1, chr2); 3638 bne(CCR0, Ldone); 3639 bdnz(Lloop); 3640 3641 bind(Lskiploop); 3642 li(result, 1); // All characters are equal. 3643 bind(Ldone); 3644 } 3645 3646 void MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt, 3647 Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval, 3648 Register tmp1, Register tmp2, Register tmp3, Register tmp4, int ae) { 3649 3650 // Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite! 3651 Label L_TooShort, L_Found, L_NotFound, L_End; 3652 Register last_addr = haycnt, // Kill haycnt at the beginning. 3653 addr = tmp1, 3654 n_start = tmp2, 3655 ch1 = tmp3, 3656 ch2 = R0; 3657 3658 assert(ae != StrIntrinsicNode::LU, "Invalid encoding"); 3659 const int h_csize = (ae == StrIntrinsicNode::LL) ? 1 : 2; 3660 const int n_csize = (ae == StrIntrinsicNode::UU) ? 2 : 1; 3661 3662 // ************************************************************************************************** 3663 // Prepare for main loop: optimized for needle count >=2, bail out otherwise. 3664 // ************************************************************************************************** 3665 3666 // Compute last haystack addr to use if no match gets found. 3667 clrldi(haycnt, haycnt, 32); // Ensure positive int is valid as 64 bit value. 3668 addi(addr, haystack, -h_csize); // Accesses use pre-increment. 3669 if (needlecntval == 0) { // variable needlecnt 3670 cmpwi(CCR6, needlecnt, 2); 3671 clrldi(needlecnt, needlecnt, 32); // Ensure positive int is valid as 64 bit value. 3672 blt(CCR6, L_TooShort); // Variable needlecnt: handle short needle separately. 3673 } 3674 3675 if (n_csize == 2) { lwz(n_start, 0, needle); } else { lhz(n_start, 0, needle); } // Load first 2 characters of needle. 3676 3677 if (needlecntval == 0) { // variable needlecnt 3678 subf(ch1, needlecnt, haycnt); // Last character index to compare is haycnt-needlecnt. 3679 addi(needlecnt, needlecnt, -2); // Rest of needle. 3680 } else { // constant needlecnt 3681 guarantee(needlecntval != 1, "IndexOf with single-character needle must be handled separately"); 3682 assert((needlecntval & 0x7fff) == needlecntval, "wrong immediate"); 3683 addi(ch1, haycnt, -needlecntval); // Last character index to compare is haycnt-needlecnt. 3684 if (needlecntval > 3) { li(needlecnt, needlecntval - 2); } // Rest of needle. 3685 } 3686 3687 if (h_csize == 2) { slwi(ch1, ch1, 1); } // Scale to number of bytes. 3688 3689 if (ae ==StrIntrinsicNode::UL) { 3690 srwi(tmp4, n_start, 1*8); // ___0 3691 rlwimi(n_start, tmp4, 2*8, 0, 23); // _0_1 3692 } 3693 3694 add(last_addr, haystack, ch1); // Point to last address to compare (haystack+2*(haycnt-needlecnt)). 3695 3696 // Main Loop (now we have at least 2 characters). 3697 Label L_OuterLoop, L_InnerLoop, L_FinalCheck, L_Comp1, L_Comp2; 3698 bind(L_OuterLoop); // Search for 1st 2 characters. 3699 Register addr_diff = tmp4; 3700 subf(addr_diff, addr, last_addr); // Difference between already checked address and last address to check. 3701 addi(addr, addr, h_csize); // This is the new address we want to use for comparing. 3702 srdi_(ch2, addr_diff, h_csize); 3703 beq(CCR0, L_FinalCheck); // 2 characters left? 3704 mtctr(ch2); // num of characters / 2 3705 bind(L_InnerLoop); // Main work horse (2x unrolled search loop) 3706 if (h_csize == 2) { // Load 2 characters of haystack (ignore alignment). 3707 lwz(ch1, 0, addr); 3708 lwz(ch2, 2, addr); 3709 } else { 3710 lhz(ch1, 0, addr); 3711 lhz(ch2, 1, addr); 3712 } 3713 cmpw(CCR0, ch1, n_start); // Compare 2 characters (1 would be sufficient but try to reduce branches to CompLoop). 3714 cmpw(CCR1, ch2, n_start); 3715 beq(CCR0, L_Comp1); // Did we find the needle start? 3716 beq(CCR1, L_Comp2); 3717 addi(addr, addr, 2 * h_csize); 3718 bdnz(L_InnerLoop); 3719 bind(L_FinalCheck); 3720 andi_(addr_diff, addr_diff, h_csize); // Remaining characters not covered by InnerLoop: (num of characters) & 1. 3721 beq(CCR0, L_NotFound); 3722 if (h_csize == 2) { lwz(ch1, 0, addr); } else { lhz(ch1, 0, addr); } // One position left at which we have to compare. 3723 cmpw(CCR1, ch1, n_start); 3724 beq(CCR1, L_Comp1); 3725 bind(L_NotFound); 3726 li(result, -1); // not found 3727 b(L_End); 3728 3729 // ************************************************************************************************** 3730 // Special Case: unfortunately, the variable needle case can be called with needlecnt<2 3731 // ************************************************************************************************** 3732 if (needlecntval == 0) { // We have to handle these cases separately. 3733 Label L_OneCharLoop; 3734 bind(L_TooShort); 3735 mtctr(haycnt); 3736 if (n_csize == 2) { lhz(n_start, 0, needle); } else { lbz(n_start, 0, needle); } // First character of needle 3737 bind(L_OneCharLoop); 3738 if (h_csize == 2) { lhzu(ch1, 2, addr); } else { lbzu(ch1, 1, addr); } 3739 cmpw(CCR1, ch1, n_start); 3740 beq(CCR1, L_Found); // Did we find the one character needle? 3741 bdnz(L_OneCharLoop); 3742 li(result, -1); // Not found. 3743 b(L_End); 3744 } 3745 3746 // ************************************************************************************************** 3747 // Regular Case Part II: compare rest of needle (first 2 characters have been compared already) 3748 // ************************************************************************************************** 3749 3750 // Compare the rest 3751 bind(L_Comp2); 3752 addi(addr, addr, h_csize); // First comparison has failed, 2nd one hit. 3753 bind(L_Comp1); // Addr points to possible needle start. 3754 if (needlecntval != 2) { // Const needlecnt==2? 3755 if (needlecntval != 3) { 3756 if (needlecntval == 0) { beq(CCR6, L_Found); } // Variable needlecnt==2? 3757 Register n_ind = tmp4, 3758 h_ind = n_ind; 3759 li(n_ind, 2 * n_csize); // First 2 characters are already compared, use index 2. 3760 mtctr(needlecnt); // Decremented by 2, still > 0. 3761 Label L_CompLoop; 3762 bind(L_CompLoop); 3763 if (ae ==StrIntrinsicNode::UL) { 3764 h_ind = ch1; 3765 sldi(h_ind, n_ind, 1); 3766 } 3767 if (n_csize == 2) { lhzx(ch2, needle, n_ind); } else { lbzx(ch2, needle, n_ind); } 3768 if (h_csize == 2) { lhzx(ch1, addr, h_ind); } else { lbzx(ch1, addr, h_ind); } 3769 cmpw(CCR1, ch1, ch2); 3770 bne(CCR1, L_OuterLoop); 3771 addi(n_ind, n_ind, n_csize); 3772 bdnz(L_CompLoop); 3773 } else { // No loop required if there's only one needle character left. 3774 if (n_csize == 2) { lhz(ch2, 2 * 2, needle); } else { lbz(ch2, 2 * 1, needle); } 3775 if (h_csize == 2) { lhz(ch1, 2 * 2, addr); } else { lbz(ch1, 2 * 1, addr); } 3776 cmpw(CCR1, ch1, ch2); 3777 bne(CCR1, L_OuterLoop); 3778 } 3779 } 3780 // Return index ... 3781 bind(L_Found); 3782 subf(result, haystack, addr); // relative to haystack, ... 3783 if (h_csize == 2) { srdi(result, result, 1); } // in characters. 3784 bind(L_End); 3785 } // string_indexof 3786 3787 void MacroAssembler::string_indexof_char(Register result, Register haystack, Register haycnt, 3788 Register needle, jchar needleChar, Register tmp1, Register tmp2, bool is_byte) { 3789 assert_different_registers(haystack, haycnt, needle, tmp1, tmp2); 3790 3791 Label L_InnerLoop, L_FinalCheck, L_Found1, L_Found2, L_NotFound, L_End; 3792 Register addr = tmp1, 3793 ch1 = tmp2, 3794 ch2 = R0; 3795 3796 const int h_csize = is_byte ? 1 : 2; 3797 3798 //4: 3799 srwi_(tmp2, haycnt, 1); // Shift right by exact_log2(UNROLL_FACTOR). 3800 mr(addr, haystack); 3801 beq(CCR0, L_FinalCheck); 3802 mtctr(tmp2); // Move to count register. 3803 //8: 3804 bind(L_InnerLoop); // Main work horse (2x unrolled search loop). 3805 if (!is_byte) { 3806 lhz(ch1, 0, addr); 3807 lhz(ch2, 2, addr); 3808 } else { 3809 lbz(ch1, 0, addr); 3810 lbz(ch2, 1, addr); 3811 } 3812 (needle != R0) ? cmpw(CCR0, ch1, needle) : cmplwi(CCR0, ch1, (unsigned int)needleChar); 3813 (needle != R0) ? cmpw(CCR1, ch2, needle) : cmplwi(CCR1, ch2, (unsigned int)needleChar); 3814 beq(CCR0, L_Found1); // Did we find the needle? 3815 beq(CCR1, L_Found2); 3816 addi(addr, addr, 2 * h_csize); 3817 bdnz(L_InnerLoop); 3818 //16: 3819 bind(L_FinalCheck); 3820 andi_(R0, haycnt, 1); 3821 beq(CCR0, L_NotFound); 3822 if (!is_byte) { lhz(ch1, 0, addr); } else { lbz(ch1, 0, addr); } // One position left at which we have to compare. 3823 (needle != R0) ? cmpw(CCR1, ch1, needle) : cmplwi(CCR1, ch1, (unsigned int)needleChar); 3824 beq(CCR1, L_Found1); 3825 //21: 3826 bind(L_NotFound); 3827 li(result, -1); // Not found. 3828 b(L_End); 3829 3830 bind(L_Found2); 3831 addi(addr, addr, h_csize); 3832 //24: 3833 bind(L_Found1); // Return index ... 3834 subf(result, haystack, addr); // relative to haystack, ... 3835 if (!is_byte) { srdi(result, result, 1); } // in characters. 3836 bind(L_End); 3837 } // string_indexof_char 3838 3839 3840 void MacroAssembler::has_negatives(Register src, Register cnt, Register result, 3841 Register tmp1, Register tmp2) { 3842 const Register tmp0 = R0; 3843 assert_different_registers(src, result, cnt, tmp0, tmp1, tmp2); 3844 Label Lfastloop, Lslow, Lloop, Lnoneg, Ldone; 3845 3846 // Check if cnt >= 8 (= 16 bytes) 3847 lis(tmp1, (int)(short)0x8080); // tmp1 = 0x8080808080808080 3848 srwi_(tmp2, cnt, 4); 3849 li(result, 1); // Assume there's a negative byte. 3850 beq(CCR0, Lslow); 3851 ori(tmp1, tmp1, 0x8080); 3852 rldimi(tmp1, tmp1, 32, 0); 3853 mtctr(tmp2); 3854 3855 // 2x unrolled loop 3856 bind(Lfastloop); 3857 ld(tmp2, 0, src); 3858 ld(tmp0, 8, src); 3859 3860 orr(tmp0, tmp2, tmp0); 3861 3862 and_(tmp0, tmp0, tmp1); 3863 bne(CCR0, Ldone); // Found negative byte. 3864 addi(src, src, 16); 3865 3866 bdnz(Lfastloop); 3867 3868 bind(Lslow); // Fallback to slow version 3869 rldicl_(tmp0, cnt, 0, 64-4); 3870 beq(CCR0, Lnoneg); 3871 mtctr(tmp0); 3872 bind(Lloop); 3873 lbz(tmp0, 0, src); 3874 addi(src, src, 1); 3875 andi_(tmp0, tmp0, 0x80); 3876 bne(CCR0, Ldone); // Found negative byte. 3877 bdnz(Lloop); 3878 bind(Lnoneg); 3879 li(result, 0); 3880 3881 bind(Ldone); 3882 } 3883 3884 #endif // Compiler2 3885 3886 // Helpers for Intrinsic Emitters 3887 // 3888 // Revert the byte order of a 32bit value in a register 3889 // src: 0x44556677 3890 // dst: 0x77665544 3891 // Three steps to obtain the result: 3892 // 1) Rotate src (as doubleword) left 5 bytes. That puts the leftmost byte of the src word 3893 // into the rightmost byte position. Afterwards, everything left of the rightmost byte is cleared. 3894 // This value initializes dst. 3895 // 2) Rotate src (as word) left 3 bytes. That puts the rightmost byte of the src word into the leftmost 3896 // byte position. Furthermore, byte 5 is rotated into byte 6 position where it is supposed to go. 3897 // This value is mask inserted into dst with a [0..23] mask of 1s. 3898 // 3) Rotate src (as word) left 1 byte. That puts byte 6 into byte 5 position. 3899 // This value is mask inserted into dst with a [8..15] mask of 1s. 3900 void MacroAssembler::load_reverse_32(Register dst, Register src) { 3901 assert_different_registers(dst, src); 3902 3903 rldicl(dst, src, (4+1)*8, 56); // Rotate byte 4 into position 7 (rightmost), clear all to the left. 3904 rlwimi(dst, src, 3*8, 0, 23); // Insert byte 5 into position 6, 7 into 4, leave pos 7 alone. 3905 rlwimi(dst, src, 1*8, 8, 15); // Insert byte 6 into position 5, leave the rest alone. 3906 } 3907 3908 // Calculate the column addresses of the crc32 lookup table into distinct registers. 3909 // This loop-invariant calculation is moved out of the loop body, reducing the loop 3910 // body size from 20 to 16 instructions. 3911 // Returns the offset that was used to calculate the address of column tc3. 3912 // Due to register shortage, setting tc3 may overwrite table. With the return offset 3913 // at hand, the original table address can be easily reconstructed. 3914 int MacroAssembler::crc32_table_columns(Register table, Register tc0, Register tc1, Register tc2, Register tc3) { 3915 3916 #ifdef VM_LITTLE_ENDIAN 3917 // This is what we implement (the DOLIT4 part): 3918 // ========================================================================= */ 3919 // #define DOLIT4 c ^= *buf4++; \ 3920 // c = crc_table[3][c & 0xff] ^ crc_table[2][(c >> 8) & 0xff] ^ \ 3921 // crc_table[1][(c >> 16) & 0xff] ^ crc_table[0][c >> 24] 3922 // #define DOLIT32 DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4 3923 // ========================================================================= */ 3924 const int ix0 = 3*(4*CRC32_COLUMN_SIZE); 3925 const int ix1 = 2*(4*CRC32_COLUMN_SIZE); 3926 const int ix2 = 1*(4*CRC32_COLUMN_SIZE); 3927 const int ix3 = 0*(4*CRC32_COLUMN_SIZE); 3928 #else 3929 // This is what we implement (the DOBIG4 part): 3930 // ========================================================================= 3931 // #define DOBIG4 c ^= *++buf4; \ 3932 // c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \ 3933 // crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24] 3934 // #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4 3935 // ========================================================================= 3936 const int ix0 = 4*(4*CRC32_COLUMN_SIZE); 3937 const int ix1 = 5*(4*CRC32_COLUMN_SIZE); 3938 const int ix2 = 6*(4*CRC32_COLUMN_SIZE); 3939 const int ix3 = 7*(4*CRC32_COLUMN_SIZE); 3940 #endif 3941 assert_different_registers(table, tc0, tc1, tc2); 3942 assert(table == tc3, "must be!"); 3943 3944 addi(tc0, table, ix0); 3945 addi(tc1, table, ix1); 3946 addi(tc2, table, ix2); 3947 if (ix3 != 0) addi(tc3, table, ix3); 3948 3949 return ix3; 3950 } 3951 3952 /** 3953 * uint32_t crc; 3954 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 3955 */ 3956 void MacroAssembler::fold_byte_crc32(Register crc, Register val, Register table, Register tmp) { 3957 assert_different_registers(crc, table, tmp); 3958 assert_different_registers(val, table); 3959 3960 if (crc == val) { // Must rotate first to use the unmodified value. 3961 rlwinm(tmp, val, 2, 24-2, 31-2); // Insert (rightmost) byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest. 3962 // As we use a word (4-byte) instruction, we have to adapt the mask bit positions. 3963 srwi(crc, crc, 8); // Unsigned shift, clear leftmost 8 bits. 3964 } else { 3965 srwi(crc, crc, 8); // Unsigned shift, clear leftmost 8 bits. 3966 rlwinm(tmp, val, 2, 24-2, 31-2); // Insert (rightmost) byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest. 3967 } 3968 lwzx(tmp, table, tmp); 3969 xorr(crc, crc, tmp); 3970 } 3971 3972 /** 3973 * uint32_t crc; 3974 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 3975 */ 3976 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 3977 fold_byte_crc32(crc, crc, table, tmp); 3978 } 3979 3980 /** 3981 * Emits code to update CRC-32 with a byte value according to constants in table. 3982 * 3983 * @param [in,out]crc Register containing the crc. 3984 * @param [in]val Register containing the byte to fold into the CRC. 3985 * @param [in]table Register containing the table of crc constants. 3986 * 3987 * uint32_t crc; 3988 * val = crc_table[(val ^ crc) & 0xFF]; 3989 * crc = val ^ (crc >> 8); 3990 */ 3991 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 3992 BLOCK_COMMENT("update_byte_crc32:"); 3993 xorr(val, val, crc); 3994 fold_byte_crc32(crc, val, table, val); 3995 } 3996 3997 /** 3998 * @param crc register containing existing CRC (32-bit) 3999 * @param buf register pointing to input byte buffer (byte*) 4000 * @param len register containing number of bytes 4001 * @param table register pointing to CRC table 4002 */ 4003 void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, 4004 Register data, bool loopAlignment) { 4005 assert_different_registers(crc, buf, len, table, data); 4006 4007 Label L_mainLoop, L_done; 4008 const int mainLoop_stepping = 1; 4009 const int mainLoop_alignment = loopAlignment ? 32 : 4; // (InputForNewCode > 4 ? InputForNewCode : 32) : 4; 4010 4011 // Process all bytes in a single-byte loop. 4012 clrldi_(len, len, 32); // Enforce 32 bit. Anything to do? 4013 beq(CCR0, L_done); 4014 4015 mtctr(len); 4016 align(mainLoop_alignment); 4017 BIND(L_mainLoop); 4018 lbz(data, 0, buf); // Byte from buffer, zero-extended. 4019 addi(buf, buf, mainLoop_stepping); // Advance buffer position. 4020 update_byte_crc32(crc, data, table); 4021 bdnz(L_mainLoop); // Iterate. 4022 4023 bind(L_done); 4024 } 4025 4026 /** 4027 * Emits code to update CRC-32 with a 4-byte value according to constants in table 4028 * Implementation according to jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.c 4029 */ 4030 // A not on the lookup table address(es): 4031 // The lookup table consists of two sets of four columns each. 4032 // The columns {0..3} are used for little-endian machines. 4033 // The columns {4..7} are used for big-endian machines. 4034 // To save the effort of adding the column offset to the table address each time 4035 // a table element is looked up, it is possible to pass the pre-calculated 4036 // column addresses. 4037 // Uses R9..R12 as work register. Must be saved/restored by caller, if necessary. 4038 void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc, 4039 Register t0, Register t1, Register t2, Register t3, 4040 Register tc0, Register tc1, Register tc2, Register tc3) { 4041 assert_different_registers(crc, t3); 4042 4043 // XOR crc with next four bytes of buffer. 4044 lwz(t3, bufDisp, buf); 4045 if (bufInc != 0) { 4046 addi(buf, buf, bufInc); 4047 } 4048 xorr(t3, t3, crc); 4049 4050 // Chop crc into 4 single-byte pieces, shifted left 2 bits, to form the table indices. 4051 rlwinm(t0, t3, 2, 24-2, 31-2); // ((t1 >> 0) & 0xff) << 2 4052 rlwinm(t1, t3, 32+(2- 8), 24-2, 31-2); // ((t1 >> 8) & 0xff) << 2 4053 rlwinm(t2, t3, 32+(2-16), 24-2, 31-2); // ((t1 >> 16) & 0xff) << 2 4054 rlwinm(t3, t3, 32+(2-24), 24-2, 31-2); // ((t1 >> 24) & 0xff) << 2 4055 4056 // Use the pre-calculated column addresses. 4057 // Load pre-calculated table values. 4058 lwzx(t0, tc0, t0); 4059 lwzx(t1, tc1, t1); 4060 lwzx(t2, tc2, t2); 4061 lwzx(t3, tc3, t3); 4062 4063 // Calculate new crc from table values. 4064 xorr(t0, t0, t1); 4065 xorr(t2, t2, t3); 4066 xorr(crc, t0, t2); // Now crc contains the final checksum value. 4067 } 4068 4069 /** 4070 * @param crc register containing existing CRC (32-bit) 4071 * @param buf register pointing to input byte buffer (byte*) 4072 * @param len register containing number of bytes 4073 * @param table register pointing to CRC table 4074 * 4075 * Uses R9..R12 as work register. Must be saved/restored by caller! 4076 */ 4077 void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table, 4078 Register t0, Register t1, Register t2, Register t3, 4079 Register tc0, Register tc1, Register tc2, Register tc3, 4080 bool invertCRC) { 4081 assert_different_registers(crc, buf, len, table); 4082 4083 Label L_mainLoop, L_tail; 4084 Register tmp = t0; 4085 Register data = t0; 4086 Register tmp2 = t1; 4087 const int mainLoop_stepping = 8; 4088 const int tailLoop_stepping = 1; 4089 const int log_stepping = exact_log2(mainLoop_stepping); 4090 const int mainLoop_alignment = 32; // InputForNewCode > 4 ? InputForNewCode : 32; 4091 const int complexThreshold = 2*mainLoop_stepping; 4092 4093 // Don't test for len <= 0 here. This pathological case should not occur anyway. 4094 // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles 4095 // for all well-behaved cases. The situation itself is detected and handled correctly 4096 // within update_byteLoop_crc32. 4097 assert(tailLoop_stepping == 1, "check tailLoop_stepping!"); 4098 4099 BLOCK_COMMENT("kernel_crc32_2word {"); 4100 4101 if (invertCRC) { 4102 nand(crc, crc, crc); // 1s complement of crc 4103 } 4104 4105 // Check for short (<mainLoop_stepping) buffer. 4106 cmpdi(CCR0, len, complexThreshold); 4107 blt(CCR0, L_tail); 4108 4109 // Pre-mainLoop alignment did show a slight (1%) positive effect on performance. 4110 // We leave the code in for reference. Maybe we need alignment when we exploit vector instructions. 4111 { 4112 // Align buf addr to mainLoop_stepping boundary. 4113 neg(tmp2, buf); // Calculate # preLoop iterations for alignment. 4114 rldicl(tmp2, tmp2, 0, 64-log_stepping); // Rotate tmp2 0 bits, insert into tmp2, anding with mask with 1s from 62..63. 4115 4116 if (complexThreshold > mainLoop_stepping) { 4117 sub(len, len, tmp2); // Remaining bytes for main loop (>=mainLoop_stepping is guaranteed). 4118 } else { 4119 sub(tmp, len, tmp2); // Remaining bytes for main loop. 4120 cmpdi(CCR0, tmp, mainLoop_stepping); 4121 blt(CCR0, L_tail); // For less than one mainloop_stepping left, do only tail processing 4122 mr(len, tmp); // remaining bytes for main loop (>=mainLoop_stepping is guaranteed). 4123 } 4124 update_byteLoop_crc32(crc, buf, tmp2, table, data, false); 4125 } 4126 4127 srdi(tmp2, len, log_stepping); // #iterations for mainLoop 4128 andi(len, len, mainLoop_stepping-1); // remaining bytes for tailLoop 4129 mtctr(tmp2); 4130 4131 #ifdef VM_LITTLE_ENDIAN 4132 Register crc_rv = crc; 4133 #else 4134 Register crc_rv = tmp; // Load_reverse needs separate registers to work on. 4135 // Occupies tmp, but frees up crc. 4136 load_reverse_32(crc_rv, crc); // Revert byte order because we are dealing with big-endian data. 4137 tmp = crc; 4138 #endif 4139 4140 int reconstructTableOffset = crc32_table_columns(table, tc0, tc1, tc2, tc3); 4141 4142 align(mainLoop_alignment); // Octoword-aligned loop address. Shows 2% improvement. 4143 BIND(L_mainLoop); 4144 update_1word_crc32(crc_rv, buf, table, 0, 0, crc_rv, t1, t2, t3, tc0, tc1, tc2, tc3); 4145 update_1word_crc32(crc_rv, buf, table, 4, mainLoop_stepping, crc_rv, t1, t2, t3, tc0, tc1, tc2, tc3); 4146 bdnz(L_mainLoop); 4147 4148 #ifndef VM_LITTLE_ENDIAN 4149 load_reverse_32(crc, crc_rv); // Revert byte order because we are dealing with big-endian data. 4150 tmp = crc_rv; // Tmp uses it's original register again. 4151 #endif 4152 4153 // Restore original table address for tailLoop. 4154 if (reconstructTableOffset != 0) { 4155 addi(table, table, -reconstructTableOffset); 4156 } 4157 4158 // Process last few (<complexThreshold) bytes of buffer. 4159 BIND(L_tail); 4160 update_byteLoop_crc32(crc, buf, len, table, data, false); 4161 4162 if (invertCRC) { 4163 nand(crc, crc, crc); // 1s complement of crc 4164 } 4165 BLOCK_COMMENT("} kernel_crc32_2word"); 4166 } 4167 4168 /** 4169 * @param crc register containing existing CRC (32-bit) 4170 * @param buf register pointing to input byte buffer (byte*) 4171 * @param len register containing number of bytes 4172 * @param table register pointing to CRC table 4173 * 4174 * uses R9..R12 as work register. Must be saved/restored by caller! 4175 */ 4176 void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table, 4177 Register t0, Register t1, Register t2, Register t3, 4178 Register tc0, Register tc1, Register tc2, Register tc3, 4179 bool invertCRC) { 4180 assert_different_registers(crc, buf, len, table); 4181 4182 Label L_mainLoop, L_tail; 4183 Register tmp = t0; 4184 Register data = t0; 4185 Register tmp2 = t1; 4186 const int mainLoop_stepping = 4; 4187 const int tailLoop_stepping = 1; 4188 const int log_stepping = exact_log2(mainLoop_stepping); 4189 const int mainLoop_alignment = 32; // InputForNewCode > 4 ? InputForNewCode : 32; 4190 const int complexThreshold = 2*mainLoop_stepping; 4191 4192 // Don't test for len <= 0 here. This pathological case should not occur anyway. 4193 // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles 4194 // for all well-behaved cases. The situation itself is detected and handled correctly 4195 // within update_byteLoop_crc32. 4196 assert(tailLoop_stepping == 1, "check tailLoop_stepping!"); 4197 4198 BLOCK_COMMENT("kernel_crc32_1word {"); 4199 4200 if (invertCRC) { 4201 nand(crc, crc, crc); // 1s complement of crc 4202 } 4203 4204 // Check for short (<mainLoop_stepping) buffer. 4205 cmpdi(CCR0, len, complexThreshold); 4206 blt(CCR0, L_tail); 4207 4208 // Pre-mainLoop alignment did show a slight (1%) positive effect on performance. 4209 // We leave the code in for reference. Maybe we need alignment when we exploit vector instructions. 4210 { 4211 // Align buf addr to mainLoop_stepping boundary. 4212 neg(tmp2, buf); // Calculate # preLoop iterations for alignment. 4213 rldicl(tmp2, tmp2, 0, 64-log_stepping); // Rotate tmp2 0 bits, insert into tmp2, anding with mask with 1s from 62..63. 4214 4215 if (complexThreshold > mainLoop_stepping) { 4216 sub(len, len, tmp2); // Remaining bytes for main loop (>=mainLoop_stepping is guaranteed). 4217 } else { 4218 sub(tmp, len, tmp2); // Remaining bytes for main loop. 4219 cmpdi(CCR0, tmp, mainLoop_stepping); 4220 blt(CCR0, L_tail); // For less than one mainloop_stepping left, do only tail processing 4221 mr(len, tmp); // remaining bytes for main loop (>=mainLoop_stepping is guaranteed). 4222 } 4223 update_byteLoop_crc32(crc, buf, tmp2, table, data, false); 4224 } 4225 4226 srdi(tmp2, len, log_stepping); // #iterations for mainLoop 4227 andi(len, len, mainLoop_stepping-1); // remaining bytes for tailLoop 4228 mtctr(tmp2); 4229 4230 #ifdef VM_LITTLE_ENDIAN 4231 Register crc_rv = crc; 4232 #else 4233 Register crc_rv = tmp; // Load_reverse needs separate registers to work on. 4234 // Occupies tmp, but frees up crc. 4235 load_reverse_32(crc_rv, crc); // Revert byte order because we are dealing with big-endian data. 4236 tmp = crc; 4237 #endif 4238 4239 int reconstructTableOffset = crc32_table_columns(table, tc0, tc1, tc2, tc3); 4240 4241 align(mainLoop_alignment); // Octoword-aligned loop address. Shows 2% improvement. 4242 BIND(L_mainLoop); 4243 update_1word_crc32(crc_rv, buf, table, 0, mainLoop_stepping, crc_rv, t1, t2, t3, tc0, tc1, tc2, tc3); 4244 bdnz(L_mainLoop); 4245 4246 #ifndef VM_LITTLE_ENDIAN 4247 load_reverse_32(crc, crc_rv); // Revert byte order because we are dealing with big-endian data. 4248 tmp = crc_rv; // Tmp uses it's original register again. 4249 #endif 4250 4251 // Restore original table address for tailLoop. 4252 if (reconstructTableOffset != 0) { 4253 addi(table, table, -reconstructTableOffset); 4254 } 4255 4256 // Process last few (<complexThreshold) bytes of buffer. 4257 BIND(L_tail); 4258 update_byteLoop_crc32(crc, buf, len, table, data, false); 4259 4260 if (invertCRC) { 4261 nand(crc, crc, crc); // 1s complement of crc 4262 } 4263 BLOCK_COMMENT("} kernel_crc32_1word"); 4264 } 4265 4266 /** 4267 * @param crc register containing existing CRC (32-bit) 4268 * @param buf register pointing to input byte buffer (byte*) 4269 * @param len register containing number of bytes 4270 * @param table register pointing to CRC table 4271 * 4272 * Uses R7_ARG5, R8_ARG6 as work registers. 4273 */ 4274 void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table, 4275 Register t0, Register t1, Register t2, Register t3, 4276 bool invertCRC) { 4277 assert_different_registers(crc, buf, len, table); 4278 4279 Register data = t0; // Holds the current byte to be folded into crc. 4280 4281 BLOCK_COMMENT("kernel_crc32_1byte {"); 4282 4283 if (invertCRC) { 4284 nand(crc, crc, crc); // 1s complement of crc 4285 } 4286 4287 // Process all bytes in a single-byte loop. 4288 update_byteLoop_crc32(crc, buf, len, table, data, true); 4289 4290 if (invertCRC) { 4291 nand(crc, crc, crc); // 1s complement of crc 4292 } 4293 BLOCK_COMMENT("} kernel_crc32_1byte"); 4294 } 4295 4296 /** 4297 * @param crc register containing existing CRC (32-bit) 4298 * @param buf register pointing to input byte buffer (byte*) 4299 * @param len register containing number of bytes 4300 * @param table register pointing to CRC table 4301 * @param constants register pointing to CRC table for 128-bit aligned memory 4302 * @param barretConstants register pointing to table for barrett reduction 4303 * @param t0-t4 temp registers 4304 */ 4305 void MacroAssembler::kernel_crc32_1word_vpmsum(Register crc, Register buf, Register len, Register table, 4306 Register constants, Register barretConstants, 4307 Register t0, Register t1, Register t2, Register t3, Register t4, 4308 bool invertCRC) { 4309 assert_different_registers(crc, buf, len, table); 4310 4311 Label L_alignedHead, L_tail; 4312 4313 BLOCK_COMMENT("kernel_crc32_1word_vpmsum {"); 4314 4315 // 1. ~c 4316 if (invertCRC) { 4317 nand(crc, crc, crc); // 1s complement of crc 4318 } 4319 4320 // 2. use kernel_crc32_1word for short len 4321 clrldi(len, len, 32); 4322 cmpdi(CCR0, len, 512); 4323 blt(CCR0, L_tail); 4324 4325 // 3. calculate from 0 to first aligned address 4326 const int alignment = 16; 4327 Register prealign = t0; 4328 4329 andi_(prealign, buf, alignment - 1); 4330 beq(CCR0, L_alignedHead); 4331 subfic(prealign, prealign, alignment); 4332 4333 subf(len, prealign, len); 4334 update_byteLoop_crc32(crc, buf, prealign, table, t2, false); 4335 4336 // 4. calculate from first aligned address as far as possible 4337 BIND(L_alignedHead); 4338 kernel_crc32_1word_aligned(crc, buf, len, constants, barretConstants, t0, t1, t2, t3, t4); 4339 4340 // 5. remaining bytes 4341 BIND(L_tail); 4342 Register tc0 = t4; 4343 Register tc1 = constants; 4344 Register tc2 = barretConstants; 4345 kernel_crc32_1word(crc, buf, len, table, t0, t1, t2, t3, tc0, tc1, tc2, table, false); 4346 4347 // 6. ~c 4348 if (invertCRC) { 4349 nand(crc, crc, crc); // 1s complement of crc 4350 } 4351 4352 BLOCK_COMMENT("} kernel_crc32_1word_vpmsum"); 4353 } 4354 4355 /** 4356 * @param crc register containing existing CRC (32-bit) 4357 * @param buf register pointing to input byte buffer (byte*) 4358 * @param len register containing number of bytes (will get updated to remaining bytes) 4359 * @param constants register pointing to CRC table for 128-bit aligned memory 4360 * @param barretConstants register pointing to table for barrett reduction 4361 * @param t0-t4 temp registers 4362 * Precondition: len should be >= 512. Otherwise, nothing will be done. 4363 */ 4364 void MacroAssembler::kernel_crc32_1word_aligned(Register crc, Register buf, Register len, 4365 Register constants, Register barretConstants, 4366 Register t0, Register t1, Register t2, Register t3, Register t4) { 4367 4368 // Save non-volatile vector registers (frameless). 4369 Register offset = t1; 4370 int offsetInt = 0; 4371 offsetInt -= 16; li(offset, offsetInt); stvx(VR20, offset, R1_SP); 4372 offsetInt -= 16; li(offset, offsetInt); stvx(VR21, offset, R1_SP); 4373 offsetInt -= 16; li(offset, offsetInt); stvx(VR22, offset, R1_SP); 4374 offsetInt -= 16; li(offset, offsetInt); stvx(VR23, offset, R1_SP); 4375 offsetInt -= 16; li(offset, offsetInt); stvx(VR24, offset, R1_SP); 4376 offsetInt -= 16; li(offset, offsetInt); stvx(VR25, offset, R1_SP); 4377 #ifndef VM_LITTLE_ENDIAN 4378 offsetInt -= 16; li(offset, offsetInt); stvx(VR26, offset, R1_SP); 4379 #endif 4380 offsetInt -= 8; std(R14, offsetInt, R1_SP); 4381 offsetInt -= 8; std(R15, offsetInt, R1_SP); 4382 offsetInt -= 8; std(R16, offsetInt, R1_SP); 4383 offsetInt -= 8; std(R17, offsetInt, R1_SP); 4384 4385 // Implementation uses an inner loop which uses between 256 and 16 * unroll_factor 4386 // bytes per iteration. The basic scheme is: 4387 // lvx: load vector (Big Endian needs reversal) 4388 // vpmsumw: carry-less 32 bit multiplications with constant representing a large CRC shift 4389 // vxor: xor partial results together to get unroll_factor2 vectors 4390 4391 // Outer loop performs the CRC shifts needed to combine the unroll_factor2 vectors. 4392 4393 // Using 16 * unroll_factor / unroll_factor_2 bytes for constants. 4394 const int unroll_factor = 2048; 4395 const int unroll_factor2 = 8; 4396 4397 // Support registers. 4398 Register offs[] = { noreg, t0, t1, t2, t3, t4, crc /* will live in VCRC */, R14 }; 4399 Register num_bytes = R15, 4400 loop_count = R16, 4401 cur_const = R17; 4402 // Constant array for outer loop: unroll_factor2 - 1 registers, 4403 // Constant array for inner loop: unroll_factor / unroll_factor2 registers. 4404 VectorRegister consts0[] = { VR16, VR17, VR18, VR19, VR20, VR21, VR22 }, 4405 consts1[] = { VR23, VR24 }; 4406 // Data register arrays: 2 arrays with unroll_factor2 registers. 4407 VectorRegister data0[] = { VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7 }, 4408 data1[] = { VR8, VR9, VR10, VR11, VR12, VR13, VR14, VR15 }; 4409 4410 VectorRegister VCRC = data0[0]; 4411 VectorRegister Vc = VR25; 4412 VectorRegister swap_bytes = VR26; // Only for Big Endian. 4413 4414 // We have at least 1 iteration (ensured by caller). 4415 Label L_outer_loop, L_inner_loop, L_last; 4416 4417 // If supported set DSCR pre-fetch to deepest. 4418 if (VM_Version::has_mfdscr()) { 4419 load_const_optimized(t0, VM_Version::_dscr_val | 7); 4420 mtdscr(t0); 4421 } 4422 4423 mtvrwz(VCRC, crc); // crc lives lives in VCRC, now 4424 4425 for (int i = 1; i < unroll_factor2; ++i) { 4426 li(offs[i], 16 * i); 4427 } 4428 4429 // Load consts for outer loop 4430 lvx(consts0[0], constants); 4431 for (int i = 1; i < unroll_factor2 - 1; ++i) { 4432 lvx(consts0[i], offs[i], constants); 4433 } 4434 addi(constants, constants, (unroll_factor2 - 1) * 16); 4435 4436 load_const_optimized(num_bytes, 16 * unroll_factor); 4437 load_const_optimized(loop_count, unroll_factor / (2 * unroll_factor2) - 1); // One double-iteration peeled off. 4438 4439 // Reuse data registers outside of the loop. 4440 VectorRegister Vtmp = data1[0]; 4441 VectorRegister Vtmp2 = data1[1]; 4442 VectorRegister zeroes = data1[2]; 4443 4444 vspltisb(Vtmp, 0); 4445 vsldoi(VCRC, Vtmp, VCRC, 8); // 96 bit zeroes, 32 bit CRC. 4446 4447 // Load vector for vpermxor (to xor both 64 bit parts together) 4448 lvsl(Vtmp, buf); // 000102030405060708090a0b0c0d0e0f 4449 vspltisb(Vc, 4); 4450 vsl(Vc, Vtmp, Vc); // 00102030405060708090a0b0c0d0e0f0 4451 xxspltd(Vc->to_vsr(), Vc->to_vsr(), 0); 4452 vor(Vc, Vtmp, Vc); // 001122334455667708192a3b4c5d6e7f 4453 4454 #ifdef VM_LITTLE_ENDIAN 4455 #define BE_swap_bytes(x) 4456 #else 4457 vspltisb(Vtmp2, 0xf); 4458 vxor(swap_bytes, Vtmp, Vtmp2); 4459 #define BE_swap_bytes(x) vperm(x, x, x, swap_bytes) 4460 #endif 4461 4462 cmpd(CCR0, len, num_bytes); 4463 blt(CCR0, L_last); 4464 4465 // ********** Main loop start ********** 4466 align(32); 4467 bind(L_outer_loop); 4468 4469 // Begin of unrolled first iteration (no xor). 4470 lvx(data1[0], buf); 4471 mr(cur_const, constants); 4472 for (int i = 1; i < unroll_factor2 / 2; ++i) { 4473 lvx(data1[i], offs[i], buf); 4474 } 4475 vpermxor(VCRC, VCRC, VCRC, Vc); // xor both halves to 64 bit result. 4476 lvx(consts1[0], cur_const); 4477 mtctr(loop_count); 4478 for (int i = 0; i < unroll_factor2 / 2; ++i) { 4479 BE_swap_bytes(data1[i]); 4480 if (i == 0) { vxor(data1[0], data1[0], VCRC); } // xor in previous CRC. 4481 lvx(data1[i + unroll_factor2 / 2], offs[i + unroll_factor2 / 2], buf); 4482 vpmsumw(data0[i], data1[i], consts1[0]); 4483 } 4484 addi(buf, buf, 16 * unroll_factor2); 4485 subf(len, num_bytes, len); 4486 lvx(consts1[1], offs[1], cur_const); 4487 addi(cur_const, cur_const, 32); 4488 // Begin of unrolled second iteration (head). 4489 for (int i = 0; i < unroll_factor2 / 2; ++i) { 4490 BE_swap_bytes(data1[i + unroll_factor2 / 2]); 4491 if (i == 0) { lvx(data1[0], buf); } else { lvx(data1[i], offs[i], buf); } 4492 vpmsumw(data0[i + unroll_factor2 / 2], data1[i + unroll_factor2 / 2], consts1[0]); 4493 } 4494 for (int i = 0; i < unroll_factor2 / 2; ++i) { 4495 BE_swap_bytes(data1[i]); 4496 lvx(data1[i + unroll_factor2 / 2], offs[i + unroll_factor2 / 2], buf); 4497 vpmsumw(data1[i], data1[i], consts1[1]); 4498 } 4499 addi(buf, buf, 16 * unroll_factor2); 4500 4501 // Generate most performance relevant code. Loads + half of the vpmsumw have been generated. 4502 // Double-iteration allows using the 2 constant registers alternatingly. 4503 align(32); 4504 bind(L_inner_loop); 4505 for (int j = 1; j < 3; ++j) { // j < unroll_factor / unroll_factor2 - 1 for complete unrolling. 4506 if (j & 1) { 4507 lvx(consts1[0], cur_const); 4508 } else { 4509 lvx(consts1[1], offs[1], cur_const); 4510 addi(cur_const, cur_const, 32); 4511 } 4512 for (int i = 0; i < unroll_factor2; ++i) { 4513 int idx = i + unroll_factor2 / 2, inc = 0; // For modulo-scheduled input. 4514 if (idx >= unroll_factor2) { idx -= unroll_factor2; inc = 1; } 4515 BE_swap_bytes(data1[idx]); 4516 vxor(data0[i], data0[i], data1[i]); 4517 if (i == 0) lvx(data1[0], buf); else lvx(data1[i], offs[i], buf); 4518 vpmsumw(data1[idx], data1[idx], consts1[(j + inc) & 1]); 4519 } 4520 addi(buf, buf, 16 * unroll_factor2); 4521 } 4522 bdnz(L_inner_loop); 4523 4524 // Tail of last iteration (no loads). 4525 for (int i = 0; i < unroll_factor2 / 2; ++i) { 4526 BE_swap_bytes(data1[i + unroll_factor2 / 2]); 4527 vxor(data0[i], data0[i], data1[i]); 4528 vpmsumw(data1[i + unroll_factor2 / 2], data1[i + unroll_factor2 / 2], consts1[1]); 4529 } 4530 for (int i = 0; i < unroll_factor2 / 2; ++i) { 4531 vpmsumw(data0[i], data0[i], consts0[unroll_factor2 - 2 - i]); // First half of fixup shifts. 4532 vxor(data0[i + unroll_factor2 / 2], data0[i + unroll_factor2 / 2], data1[i + unroll_factor2 / 2]); 4533 } 4534 4535 // Last data register is ok, other ones need fixup shift. 4536 for (int i = unroll_factor2 / 2; i < unroll_factor2 - 1; ++i) { 4537 vpmsumw(data0[i], data0[i], consts0[unroll_factor2 - 2 - i]); 4538 } 4539 4540 // Combine to 128 bit result vector VCRC = data0[0]. 4541 for (int i = 1; i < unroll_factor2; i<<=1) { 4542 for (int j = 0; j <= unroll_factor2 - 2*i; j+=2*i) { 4543 vxor(data0[j], data0[j], data0[j+i]); 4544 } 4545 } 4546 cmpd(CCR0, len, num_bytes); 4547 bge(CCR0, L_outer_loop); 4548 4549 // Last chance with lower num_bytes. 4550 bind(L_last); 4551 srdi(loop_count, len, exact_log2(16 * 2 * unroll_factor2)); // Use double-iterations. 4552 add_const_optimized(constants, constants, 16 * (unroll_factor / unroll_factor2)); // Point behind last one. 4553 sldi(R0, loop_count, exact_log2(16 * 2)); // Bytes of constants to be used. 4554 clrrdi(num_bytes, len, exact_log2(16 * 2 * unroll_factor2)); 4555 subf(constants, R0, constants); // Point to constant to be used first. 4556 4557 addic_(loop_count, loop_count, -1); // One double-iteration peeled off. 4558 bgt(CCR0, L_outer_loop); 4559 // ********** Main loop end ********** 4560 #undef BE_swap_bytes 4561 4562 // Restore DSCR pre-fetch value. 4563 if (VM_Version::has_mfdscr()) { 4564 load_const_optimized(t0, VM_Version::_dscr_val); 4565 mtdscr(t0); 4566 } 4567 4568 vspltisb(zeroes, 0); 4569 4570 // Combine to 64 bit result. 4571 vpermxor(VCRC, VCRC, VCRC, Vc); // xor both halves to 64 bit result. 4572 4573 // Reduce to 32 bit CRC: Remainder by multiply-high. 4574 lvx(Vtmp, barretConstants); 4575 vsldoi(Vtmp2, zeroes, VCRC, 12); // Extract high 32 bit. 4576 vpmsumd(Vtmp2, Vtmp2, Vtmp); // Multiply by inverse long poly. 4577 vsldoi(Vtmp2, zeroes, Vtmp2, 12); // Extract high 32 bit. 4578 vsldoi(Vtmp, zeroes, Vtmp, 8); 4579 vpmsumd(Vtmp2, Vtmp2, Vtmp); // Multiply quotient by long poly. 4580 vxor(VCRC, VCRC, Vtmp2); // Remainder fits into 32 bit. 4581 4582 // Move result. len is already updated. 4583 vsldoi(VCRC, VCRC, zeroes, 8); 4584 mfvrd(crc, VCRC); 4585 4586 // Restore non-volatile Vector registers (frameless). 4587 offsetInt = 0; 4588 offsetInt -= 16; li(offset, offsetInt); lvx(VR20, offset, R1_SP); 4589 offsetInt -= 16; li(offset, offsetInt); lvx(VR21, offset, R1_SP); 4590 offsetInt -= 16; li(offset, offsetInt); lvx(VR22, offset, R1_SP); 4591 offsetInt -= 16; li(offset, offsetInt); lvx(VR23, offset, R1_SP); 4592 offsetInt -= 16; li(offset, offsetInt); lvx(VR24, offset, R1_SP); 4593 offsetInt -= 16; li(offset, offsetInt); lvx(VR25, offset, R1_SP); 4594 #ifndef VM_LITTLE_ENDIAN 4595 offsetInt -= 16; li(offset, offsetInt); lvx(VR26, offset, R1_SP); 4596 #endif 4597 offsetInt -= 8; ld(R14, offsetInt, R1_SP); 4598 offsetInt -= 8; ld(R15, offsetInt, R1_SP); 4599 offsetInt -= 8; ld(R16, offsetInt, R1_SP); 4600 offsetInt -= 8; ld(R17, offsetInt, R1_SP); 4601 } 4602 4603 void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp, bool invertCRC) { 4604 assert_different_registers(crc, buf, /* len, not used!! */ table, tmp); 4605 4606 BLOCK_COMMENT("kernel_crc32_singleByte:"); 4607 if (invertCRC) { 4608 nand(crc, crc, crc); // 1s complement of crc 4609 } 4610 4611 lbz(tmp, 0, buf); // Byte from buffer, zero-extended. 4612 update_byte_crc32(crc, tmp, table); 4613 4614 if (invertCRC) { 4615 nand(crc, crc, crc); // 1s complement of crc 4616 } 4617 } 4618 4619 void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table, bool invertCRC) { 4620 assert_different_registers(crc, val, table); 4621 4622 BLOCK_COMMENT("kernel_crc32_singleByteReg:"); 4623 if (invertCRC) { 4624 nand(crc, crc, crc); // 1s complement of crc 4625 } 4626 4627 update_byte_crc32(crc, val, table); 4628 4629 if (invertCRC) { 4630 nand(crc, crc, crc); // 1s complement of crc 4631 } 4632 } 4633 4634 // dest_lo += src1 + src2 4635 // dest_hi += carry1 + carry2 4636 void MacroAssembler::add2_with_carry(Register dest_hi, 4637 Register dest_lo, 4638 Register src1, Register src2) { 4639 li(R0, 0); 4640 addc(dest_lo, dest_lo, src1); 4641 adde(dest_hi, dest_hi, R0); 4642 addc(dest_lo, dest_lo, src2); 4643 adde(dest_hi, dest_hi, R0); 4644 } 4645 4646 // Multiply 64 bit by 64 bit first loop. 4647 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, 4648 Register x_xstart, 4649 Register y, Register y_idx, 4650 Register z, 4651 Register carry, 4652 Register product_high, Register product, 4653 Register idx, Register kdx, 4654 Register tmp) { 4655 // jlong carry, x[], y[], z[]; 4656 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) { 4657 // huge_128 product = y[idx] * x[xstart] + carry; 4658 // z[kdx] = (jlong)product; 4659 // carry = (jlong)(product >>> 64); 4660 // } 4661 // z[xstart] = carry; 4662 4663 Label L_first_loop, L_first_loop_exit; 4664 Label L_one_x, L_one_y, L_multiply; 4665 4666 addic_(xstart, xstart, -1); 4667 blt(CCR0, L_one_x); // Special case: length of x is 1. 4668 4669 // Load next two integers of x. 4670 sldi(tmp, xstart, LogBytesPerInt); 4671 ldx(x_xstart, x, tmp); 4672 #ifdef VM_LITTLE_ENDIAN 4673 rldicl(x_xstart, x_xstart, 32, 0); 4674 #endif 4675 4676 align(32, 16); 4677 bind(L_first_loop); 4678 4679 cmpdi(CCR0, idx, 1); 4680 blt(CCR0, L_first_loop_exit); 4681 addi(idx, idx, -2); 4682 beq(CCR0, L_one_y); 4683 4684 // Load next two integers of y. 4685 sldi(tmp, idx, LogBytesPerInt); 4686 ldx(y_idx, y, tmp); 4687 #ifdef VM_LITTLE_ENDIAN 4688 rldicl(y_idx, y_idx, 32, 0); 4689 #endif 4690 4691 4692 bind(L_multiply); 4693 multiply64(product_high, product, x_xstart, y_idx); 4694 4695 li(tmp, 0); 4696 addc(product, product, carry); // Add carry to result. 4697 adde(product_high, product_high, tmp); // Add carry of the last addition. 4698 addi(kdx, kdx, -2); 4699 4700 // Store result. 4701 #ifdef VM_LITTLE_ENDIAN 4702 rldicl(product, product, 32, 0); 4703 #endif 4704 sldi(tmp, kdx, LogBytesPerInt); 4705 stdx(product, z, tmp); 4706 mr_if_needed(carry, product_high); 4707 b(L_first_loop); 4708 4709 4710 bind(L_one_y); // Load one 32 bit portion of y as (0,value). 4711 4712 lwz(y_idx, 0, y); 4713 b(L_multiply); 4714 4715 4716 bind(L_one_x); // Load one 32 bit portion of x as (0,value). 4717 4718 lwz(x_xstart, 0, x); 4719 b(L_first_loop); 4720 4721 bind(L_first_loop_exit); 4722 } 4723 4724 // Multiply 64 bit by 64 bit and add 128 bit. 4725 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, 4726 Register z, Register yz_idx, 4727 Register idx, Register carry, 4728 Register product_high, Register product, 4729 Register tmp, int offset) { 4730 4731 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; 4732 // z[kdx] = (jlong)product; 4733 4734 sldi(tmp, idx, LogBytesPerInt); 4735 if (offset) { 4736 addi(tmp, tmp, offset); 4737 } 4738 ldx(yz_idx, y, tmp); 4739 #ifdef VM_LITTLE_ENDIAN 4740 rldicl(yz_idx, yz_idx, 32, 0); 4741 #endif 4742 4743 multiply64(product_high, product, x_xstart, yz_idx); 4744 ldx(yz_idx, z, tmp); 4745 #ifdef VM_LITTLE_ENDIAN 4746 rldicl(yz_idx, yz_idx, 32, 0); 4747 #endif 4748 4749 add2_with_carry(product_high, product, carry, yz_idx); 4750 4751 sldi(tmp, idx, LogBytesPerInt); 4752 if (offset) { 4753 addi(tmp, tmp, offset); 4754 } 4755 #ifdef VM_LITTLE_ENDIAN 4756 rldicl(product, product, 32, 0); 4757 #endif 4758 stdx(product, z, tmp); 4759 } 4760 4761 // Multiply 128 bit by 128 bit. Unrolled inner loop. 4762 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, 4763 Register y, Register z, 4764 Register yz_idx, Register idx, Register carry, 4765 Register product_high, Register product, 4766 Register carry2, Register tmp) { 4767 4768 // jlong carry, x[], y[], z[]; 4769 // int kdx = ystart+1; 4770 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 4771 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; 4772 // z[kdx+idx+1] = (jlong)product; 4773 // jlong carry2 = (jlong)(product >>> 64); 4774 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; 4775 // z[kdx+idx] = (jlong)product; 4776 // carry = (jlong)(product >>> 64); 4777 // } 4778 // idx += 2; 4779 // if (idx > 0) { 4780 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry; 4781 // z[kdx+idx] = (jlong)product; 4782 // carry = (jlong)(product >>> 64); 4783 // } 4784 4785 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 4786 const Register jdx = R0; 4787 4788 // Scale the index. 4789 srdi_(jdx, idx, 2); 4790 beq(CCR0, L_third_loop_exit); 4791 mtctr(jdx); 4792 4793 align(32, 16); 4794 bind(L_third_loop); 4795 4796 addi(idx, idx, -4); 4797 4798 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product_high, product, tmp, 8); 4799 mr_if_needed(carry2, product_high); 4800 4801 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product_high, product, tmp, 0); 4802 mr_if_needed(carry, product_high); 4803 bdnz(L_third_loop); 4804 4805 bind(L_third_loop_exit); // Handle any left-over operand parts. 4806 4807 andi_(idx, idx, 0x3); 4808 beq(CCR0, L_post_third_loop_done); 4809 4810 Label L_check_1; 4811 4812 addic_(idx, idx, -2); 4813 blt(CCR0, L_check_1); 4814 4815 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product_high, product, tmp, 0); 4816 mr_if_needed(carry, product_high); 4817 4818 bind(L_check_1); 4819 4820 addi(idx, idx, 0x2); 4821 andi_(idx, idx, 0x1); 4822 addic_(idx, idx, -1); 4823 blt(CCR0, L_post_third_loop_done); 4824 4825 sldi(tmp, idx, LogBytesPerInt); 4826 lwzx(yz_idx, y, tmp); 4827 multiply64(product_high, product, x_xstart, yz_idx); 4828 lwzx(yz_idx, z, tmp); 4829 4830 add2_with_carry(product_high, product, yz_idx, carry); 4831 4832 sldi(tmp, idx, LogBytesPerInt); 4833 stwx(product, z, tmp); 4834 srdi(product, product, 32); 4835 4836 sldi(product_high, product_high, 32); 4837 orr(product, product, product_high); 4838 mr_if_needed(carry, product); 4839 4840 bind(L_post_third_loop_done); 4841 } // multiply_128_x_128_loop 4842 4843 void MacroAssembler::muladd(Register out, Register in, 4844 Register offset, Register len, Register k, 4845 Register tmp1, Register tmp2, Register carry) { 4846 4847 // Labels 4848 Label LOOP, SKIP; 4849 4850 // Make sure length is positive. 4851 cmpdi (CCR0, len, 0); 4852 4853 // Prepare variables 4854 subi (offset, offset, 4); 4855 li (carry, 0); 4856 ble (CCR0, SKIP); 4857 4858 mtctr (len); 4859 subi (len, len, 1 ); 4860 sldi (len, len, 2 ); 4861 4862 // Main loop 4863 bind(LOOP); 4864 lwzx (tmp1, len, in ); 4865 lwzx (tmp2, offset, out ); 4866 mulld (tmp1, tmp1, k ); 4867 add (tmp2, carry, tmp2 ); 4868 add (tmp2, tmp1, tmp2 ); 4869 stwx (tmp2, offset, out ); 4870 srdi (carry, tmp2, 32 ); 4871 subi (offset, offset, 4 ); 4872 subi (len, len, 4 ); 4873 bdnz (LOOP); 4874 bind(SKIP); 4875 } 4876 4877 void MacroAssembler::multiply_to_len(Register x, Register xlen, 4878 Register y, Register ylen, 4879 Register z, Register zlen, 4880 Register tmp1, Register tmp2, 4881 Register tmp3, Register tmp4, 4882 Register tmp5, Register tmp6, 4883 Register tmp7, Register tmp8, 4884 Register tmp9, Register tmp10, 4885 Register tmp11, Register tmp12, 4886 Register tmp13) { 4887 4888 ShortBranchVerifier sbv(this); 4889 4890 assert_different_registers(x, xlen, y, ylen, z, zlen, 4891 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6); 4892 assert_different_registers(x, xlen, y, ylen, z, zlen, 4893 tmp1, tmp2, tmp3, tmp4, tmp5, tmp7); 4894 assert_different_registers(x, xlen, y, ylen, z, zlen, 4895 tmp1, tmp2, tmp3, tmp4, tmp5, tmp8); 4896 4897 const Register idx = tmp1; 4898 const Register kdx = tmp2; 4899 const Register xstart = tmp3; 4900 4901 const Register y_idx = tmp4; 4902 const Register carry = tmp5; 4903 const Register product = tmp6; 4904 const Register product_high = tmp7; 4905 const Register x_xstart = tmp8; 4906 const Register tmp = tmp9; 4907 4908 // First Loop. 4909 // 4910 // final static long LONG_MASK = 0xffffffffL; 4911 // int xstart = xlen - 1; 4912 // int ystart = ylen - 1; 4913 // long carry = 0; 4914 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 4915 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 4916 // z[kdx] = (int)product; 4917 // carry = product >>> 32; 4918 // } 4919 // z[xstart] = (int)carry; 4920 4921 mr_if_needed(idx, ylen); // idx = ylen 4922 mr_if_needed(kdx, zlen); // kdx = xlen + ylen 4923 li(carry, 0); // carry = 0 4924 4925 Label L_done; 4926 4927 addic_(xstart, xlen, -1); 4928 blt(CCR0, L_done); 4929 4930 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, 4931 carry, product_high, product, idx, kdx, tmp); 4932 4933 Label L_second_loop; 4934 4935 cmpdi(CCR0, kdx, 0); 4936 beq(CCR0, L_second_loop); 4937 4938 Label L_carry; 4939 4940 addic_(kdx, kdx, -1); 4941 beq(CCR0, L_carry); 4942 4943 // Store lower 32 bits of carry. 4944 sldi(tmp, kdx, LogBytesPerInt); 4945 stwx(carry, z, tmp); 4946 srdi(carry, carry, 32); 4947 addi(kdx, kdx, -1); 4948 4949 4950 bind(L_carry); 4951 4952 // Store upper 32 bits of carry. 4953 sldi(tmp, kdx, LogBytesPerInt); 4954 stwx(carry, z, tmp); 4955 4956 // Second and third (nested) loops. 4957 // 4958 // for (int i = xstart-1; i >= 0; i--) { // Second loop 4959 // carry = 0; 4960 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 4961 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 4962 // (z[k] & LONG_MASK) + carry; 4963 // z[k] = (int)product; 4964 // carry = product >>> 32; 4965 // } 4966 // z[i] = (int)carry; 4967 // } 4968 // 4969 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx 4970 4971 bind(L_second_loop); 4972 4973 li(carry, 0); // carry = 0; 4974 4975 addic_(xstart, xstart, -1); // i = xstart-1; 4976 blt(CCR0, L_done); 4977 4978 Register zsave = tmp10; 4979 4980 mr(zsave, z); 4981 4982 4983 Label L_last_x; 4984 4985 sldi(tmp, xstart, LogBytesPerInt); 4986 add(z, z, tmp); // z = z + k - j 4987 addi(z, z, 4); 4988 addic_(xstart, xstart, -1); // i = xstart-1; 4989 blt(CCR0, L_last_x); 4990 4991 sldi(tmp, xstart, LogBytesPerInt); 4992 ldx(x_xstart, x, tmp); 4993 #ifdef VM_LITTLE_ENDIAN 4994 rldicl(x_xstart, x_xstart, 32, 0); 4995 #endif 4996 4997 4998 Label L_third_loop_prologue; 4999 5000 bind(L_third_loop_prologue); 5001 5002 Register xsave = tmp11; 5003 Register xlensave = tmp12; 5004 Register ylensave = tmp13; 5005 5006 mr(xsave, x); 5007 mr(xlensave, xstart); 5008 mr(ylensave, ylen); 5009 5010 5011 multiply_128_x_128_loop(x_xstart, y, z, y_idx, ylen, 5012 carry, product_high, product, x, tmp); 5013 5014 mr(z, zsave); 5015 mr(x, xsave); 5016 mr(xlen, xlensave); // This is the decrement of the loop counter! 5017 mr(ylen, ylensave); 5018 5019 addi(tmp3, xlen, 1); 5020 sldi(tmp, tmp3, LogBytesPerInt); 5021 stwx(carry, z, tmp); 5022 addic_(tmp3, tmp3, -1); 5023 blt(CCR0, L_done); 5024 5025 srdi(carry, carry, 32); 5026 sldi(tmp, tmp3, LogBytesPerInt); 5027 stwx(carry, z, tmp); 5028 b(L_second_loop); 5029 5030 // Next infrequent code is moved outside loops. 5031 bind(L_last_x); 5032 5033 lwz(x_xstart, 0, x); 5034 b(L_third_loop_prologue); 5035 5036 bind(L_done); 5037 } // multiply_to_len 5038 5039 void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) { 5040 #ifdef ASSERT 5041 Label ok; 5042 if (check_equal) { 5043 beq(CCR0, ok); 5044 } else { 5045 bne(CCR0, ok); 5046 } 5047 stop(msg, id); 5048 bind(ok); 5049 #endif 5050 } 5051 5052 void MacroAssembler::asm_assert_mems_zero(bool check_equal, int size, int mem_offset, 5053 Register mem_base, const char* msg, int id) { 5054 #ifdef ASSERT 5055 switch (size) { 5056 case 4: 5057 lwz(R0, mem_offset, mem_base); 5058 cmpwi(CCR0, R0, 0); 5059 break; 5060 case 8: 5061 ld(R0, mem_offset, mem_base); 5062 cmpdi(CCR0, R0, 0); 5063 break; 5064 default: 5065 ShouldNotReachHere(); 5066 } 5067 asm_assert(check_equal, msg, id); 5068 #endif // ASSERT 5069 } 5070 5071 void MacroAssembler::verify_thread() { 5072 if (VerifyThread) { 5073 unimplemented("'VerifyThread' currently not implemented on PPC"); 5074 } 5075 } 5076 5077 // READ: oop. KILL: R0. Volatile floats perhaps. 5078 void MacroAssembler::verify_oop(Register oop, const char* msg) { 5079 if (!VerifyOops) { 5080 return; 5081 } 5082 5083 address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address(); 5084 const Register tmp = R11; // Will be preserved. 5085 const int nbytes_save = MacroAssembler::num_volatile_regs * 8; 5086 save_volatile_gprs(R1_SP, -nbytes_save); // except R0 5087 5088 mr_if_needed(R4_ARG2, oop); 5089 save_LR_CR(tmp); // save in old frame 5090 push_frame_reg_args(nbytes_save, tmp); 5091 // load FunctionDescriptor** / entry_address * 5092 load_const_optimized(tmp, fd, R0); 5093 // load FunctionDescriptor* / entry_address 5094 ld(tmp, 0, tmp); 5095 load_const_optimized(R3_ARG1, (address)msg, R0); 5096 // Call destination for its side effect. 5097 call_c(tmp); 5098 5099 pop_frame(); 5100 restore_LR_CR(tmp); 5101 restore_volatile_gprs(R1_SP, -nbytes_save); // except R0 5102 } 5103 5104 void MacroAssembler::verify_oop_addr(RegisterOrConstant offs, Register base, const char* msg) { 5105 if (!VerifyOops) { 5106 return; 5107 } 5108 5109 address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address(); 5110 const Register tmp = R11; // Will be preserved. 5111 const int nbytes_save = MacroAssembler::num_volatile_regs * 8; 5112 save_volatile_gprs(R1_SP, -nbytes_save); // except R0 5113 5114 ld(R4_ARG2, offs, base); 5115 save_LR_CR(tmp); // save in old frame 5116 push_frame_reg_args(nbytes_save, tmp); 5117 // load FunctionDescriptor** / entry_address * 5118 load_const_optimized(tmp, fd, R0); 5119 // load FunctionDescriptor* / entry_address 5120 ld(tmp, 0, tmp); 5121 load_const_optimized(R3_ARG1, (address)msg, R0); 5122 // Call destination for its side effect. 5123 call_c(tmp); 5124 5125 pop_frame(); 5126 restore_LR_CR(tmp); 5127 restore_volatile_gprs(R1_SP, -nbytes_save); // except R0 5128 } 5129 5130 const char* stop_types[] = { 5131 "stop", 5132 "untested", 5133 "unimplemented", 5134 "shouldnotreachhere" 5135 }; 5136 5137 static void stop_on_request(int tp, const char* msg) { 5138 tty->print("PPC assembly code requires stop: (%s) %s\n", stop_types[tp%/*stop_end*/4], msg); 5139 guarantee(false, "PPC assembly code requires stop: %s", msg); 5140 } 5141 5142 // Call a C-function that prints output. 5143 void MacroAssembler::stop(int type, const char* msg, int id) { 5144 #ifndef PRODUCT 5145 block_comment(err_msg("stop: %s %s {", stop_types[type%stop_end], msg)); 5146 #else 5147 block_comment("stop {"); 5148 #endif 5149 5150 // setup arguments 5151 load_const_optimized(R3_ARG1, type); 5152 load_const_optimized(R4_ARG2, (void *)msg, /*tmp=*/R0); 5153 call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), R3_ARG1, R4_ARG2); 5154 illtrap(); 5155 emit_int32(id); 5156 block_comment("} stop;"); 5157 } 5158 5159 #ifndef PRODUCT 5160 // Write pattern 0x0101010101010101 in memory region [low-before, high+after]. 5161 // Val, addr are temp registers. 5162 // If low == addr, addr is killed. 5163 // High is preserved. 5164 void MacroAssembler::zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) { 5165 if (!ZapMemory) return; 5166 5167 assert_different_registers(low, val); 5168 5169 BLOCK_COMMENT("zap memory region {"); 5170 load_const_optimized(val, 0x0101010101010101); 5171 int size = before + after; 5172 if (low == high && size < 5 && size > 0) { 5173 int offset = -before*BytesPerWord; 5174 for (int i = 0; i < size; ++i) { 5175 std(val, offset, low); 5176 offset += (1*BytesPerWord); 5177 } 5178 } else { 5179 addi(addr, low, -before*BytesPerWord); 5180 assert_different_registers(high, val); 5181 if (after) addi(high, high, after * BytesPerWord); 5182 Label loop; 5183 bind(loop); 5184 std(val, 0, addr); 5185 addi(addr, addr, 8); 5186 cmpd(CCR6, addr, high); 5187 ble(CCR6, loop); 5188 if (after) addi(high, high, -after * BytesPerWord); // Correct back to old value. 5189 } 5190 BLOCK_COMMENT("} zap memory region"); 5191 } 5192 5193 #endif // !PRODUCT 5194 5195 void SkipIfEqualZero::skip_to_label_if_equal_zero(MacroAssembler* masm, Register temp, 5196 const bool* flag_addr, Label& label) { 5197 int simm16_offset = masm->load_const_optimized(temp, (address)flag_addr, R0, true); 5198 assert(sizeof(bool) == 1, "PowerPC ABI"); 5199 masm->lbz(temp, simm16_offset, temp); 5200 masm->cmpwi(CCR0, temp, 0); 5201 masm->beq(CCR0, label); 5202 } 5203 5204 SkipIfEqualZero::SkipIfEqualZero(MacroAssembler* masm, Register temp, const bool* flag_addr) : _masm(masm), _label() { 5205 skip_to_label_if_equal_zero(masm, temp, flag_addr, _label); 5206 } 5207 5208 SkipIfEqualZero::~SkipIfEqualZero() { 5209 _masm->bind(_label); 5210 }