1 /* 2 * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016, 2017, SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/codeBuffer.hpp" 28 #include "asm/macroAssembler.inline.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "gc/shared/collectedHeap.inline.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "gc/shared/cardTableModRefBS.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/klass.inline.hpp" 36 #include "opto/compile.hpp" 37 #include "opto/intrinsicnode.hpp" 38 #include "opto/matcher.hpp" 39 #include "prims/methodHandles.hpp" 40 #include "registerSaver_s390.hpp" 41 #include "runtime/biasedLocking.hpp" 42 #include "runtime/icache.hpp" 43 #include "runtime/interfaceSupport.hpp" 44 #include "runtime/objectMonitor.hpp" 45 #include "runtime/os.hpp" 46 #include "runtime/safepoint.hpp" 47 #include "runtime/safepointMechanism.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 #include "runtime/stubRoutines.hpp" 50 #include "utilities/events.hpp" 51 #include "utilities/macros.hpp" 52 #if INCLUDE_ALL_GCS 53 #include "gc/g1/g1CollectedHeap.inline.hpp" 54 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 55 #include "gc/g1/heapRegion.hpp" 56 #endif 57 58 #include <ucontext.h> 59 60 #define BLOCK_COMMENT(str) block_comment(str) 61 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 62 63 // Move 32-bit register if destination and source are different. 64 void MacroAssembler::lr_if_needed(Register rd, Register rs) { 65 if (rs != rd) { z_lr(rd, rs); } 66 } 67 68 // Move register if destination and source are different. 69 void MacroAssembler::lgr_if_needed(Register rd, Register rs) { 70 if (rs != rd) { z_lgr(rd, rs); } 71 } 72 73 // Zero-extend 32-bit register into 64-bit register if destination and source are different. 74 void MacroAssembler::llgfr_if_needed(Register rd, Register rs) { 75 if (rs != rd) { z_llgfr(rd, rs); } 76 } 77 78 // Move float register if destination and source are different. 79 void MacroAssembler::ldr_if_needed(FloatRegister rd, FloatRegister rs) { 80 if (rs != rd) { z_ldr(rd, rs); } 81 } 82 83 // Move integer register if destination and source are different. 84 // It is assumed that shorter-than-int types are already 85 // appropriately sign-extended. 86 void MacroAssembler::move_reg_if_needed(Register dst, BasicType dst_type, Register src, 87 BasicType src_type) { 88 assert((dst_type != T_FLOAT) && (dst_type != T_DOUBLE), "use move_freg for float types"); 89 assert((src_type != T_FLOAT) && (src_type != T_DOUBLE), "use move_freg for float types"); 90 91 if (dst_type == src_type) { 92 lgr_if_needed(dst, src); // Just move all 64 bits. 93 return; 94 } 95 96 switch (dst_type) { 97 // Do not support these types for now. 98 // case T_BOOLEAN: 99 case T_BYTE: // signed byte 100 switch (src_type) { 101 case T_INT: 102 z_lgbr(dst, src); 103 break; 104 default: 105 ShouldNotReachHere(); 106 } 107 return; 108 109 case T_CHAR: 110 case T_SHORT: 111 switch (src_type) { 112 case T_INT: 113 if (dst_type == T_CHAR) { 114 z_llghr(dst, src); 115 } else { 116 z_lghr(dst, src); 117 } 118 break; 119 default: 120 ShouldNotReachHere(); 121 } 122 return; 123 124 case T_INT: 125 switch (src_type) { 126 case T_BOOLEAN: 127 case T_BYTE: 128 case T_CHAR: 129 case T_SHORT: 130 case T_INT: 131 case T_LONG: 132 case T_OBJECT: 133 case T_ARRAY: 134 case T_VOID: 135 case T_ADDRESS: 136 lr_if_needed(dst, src); 137 // llgfr_if_needed(dst, src); // zero-extend (in case we need to find a bug). 138 return; 139 140 default: 141 assert(false, "non-integer src type"); 142 return; 143 } 144 case T_LONG: 145 switch (src_type) { 146 case T_BOOLEAN: 147 case T_BYTE: 148 case T_CHAR: 149 case T_SHORT: 150 case T_INT: 151 z_lgfr(dst, src); // sign extension 152 return; 153 154 case T_LONG: 155 case T_OBJECT: 156 case T_ARRAY: 157 case T_VOID: 158 case T_ADDRESS: 159 lgr_if_needed(dst, src); 160 return; 161 162 default: 163 assert(false, "non-integer src type"); 164 return; 165 } 166 return; 167 case T_OBJECT: 168 case T_ARRAY: 169 case T_VOID: 170 case T_ADDRESS: 171 switch (src_type) { 172 // These types don't make sense to be converted to pointers: 173 // case T_BOOLEAN: 174 // case T_BYTE: 175 // case T_CHAR: 176 // case T_SHORT: 177 178 case T_INT: 179 z_llgfr(dst, src); // zero extension 180 return; 181 182 case T_LONG: 183 case T_OBJECT: 184 case T_ARRAY: 185 case T_VOID: 186 case T_ADDRESS: 187 lgr_if_needed(dst, src); 188 return; 189 190 default: 191 assert(false, "non-integer src type"); 192 return; 193 } 194 return; 195 default: 196 assert(false, "non-integer dst type"); 197 return; 198 } 199 } 200 201 // Move float register if destination and source are different. 202 void MacroAssembler::move_freg_if_needed(FloatRegister dst, BasicType dst_type, 203 FloatRegister src, BasicType src_type) { 204 assert((dst_type == T_FLOAT) || (dst_type == T_DOUBLE), "use move_reg for int types"); 205 assert((src_type == T_FLOAT) || (src_type == T_DOUBLE), "use move_reg for int types"); 206 if (dst_type == src_type) { 207 ldr_if_needed(dst, src); // Just move all 64 bits. 208 } else { 209 switch (dst_type) { 210 case T_FLOAT: 211 assert(src_type == T_DOUBLE, "invalid float type combination"); 212 z_ledbr(dst, src); 213 return; 214 case T_DOUBLE: 215 assert(src_type == T_FLOAT, "invalid float type combination"); 216 z_ldebr(dst, src); 217 return; 218 default: 219 assert(false, "non-float dst type"); 220 return; 221 } 222 } 223 } 224 225 // Optimized emitter for reg to mem operations. 226 // Uses modern instructions if running on modern hardware, classic instructions 227 // otherwise. Prefers (usually shorter) classic instructions if applicable. 228 // Data register (reg) cannot be used as work register. 229 // 230 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default). 231 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs! 232 void MacroAssembler::freg2mem_opt(FloatRegister reg, 233 int64_t disp, 234 Register index, 235 Register base, 236 void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register), 237 void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register), 238 Register scratch) { 239 index = (index == noreg) ? Z_R0 : index; 240 if (Displacement::is_shortDisp(disp)) { 241 (this->*classic)(reg, disp, index, base); 242 } else { 243 if (Displacement::is_validDisp(disp)) { 244 (this->*modern)(reg, disp, index, base); 245 } else { 246 if (scratch != Z_R0 && scratch != Z_R1) { 247 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 248 } else { 249 if (scratch != Z_R0) { // scratch == Z_R1 250 if ((scratch == index) || (index == base)) { 251 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 252 } else { 253 add2reg(scratch, disp, base); 254 (this->*classic)(reg, 0, index, scratch); 255 if (base == scratch) { 256 add2reg(base, -disp); // Restore base. 257 } 258 } 259 } else { // scratch == Z_R0 260 z_lgr(scratch, base); 261 add2reg(base, disp); 262 (this->*classic)(reg, 0, index, base); 263 z_lgr(base, scratch); // Restore base. 264 } 265 } 266 } 267 } 268 } 269 270 void MacroAssembler::freg2mem_opt(FloatRegister reg, const Address &a, bool is_double) { 271 if (is_double) { 272 freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stdy), CLASSIC_FFUN(z_std)); 273 } else { 274 freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stey), CLASSIC_FFUN(z_ste)); 275 } 276 } 277 278 // Optimized emitter for mem to reg operations. 279 // Uses modern instructions if running on modern hardware, classic instructions 280 // otherwise. Prefers (usually shorter) classic instructions if applicable. 281 // data register (reg) cannot be used as work register. 282 // 283 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default). 284 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs! 285 void MacroAssembler::mem2freg_opt(FloatRegister reg, 286 int64_t disp, 287 Register index, 288 Register base, 289 void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register), 290 void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register), 291 Register scratch) { 292 index = (index == noreg) ? Z_R0 : index; 293 if (Displacement::is_shortDisp(disp)) { 294 (this->*classic)(reg, disp, index, base); 295 } else { 296 if (Displacement::is_validDisp(disp)) { 297 (this->*modern)(reg, disp, index, base); 298 } else { 299 if (scratch != Z_R0 && scratch != Z_R1) { 300 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 301 } else { 302 if (scratch != Z_R0) { // scratch == Z_R1 303 if ((scratch == index) || (index == base)) { 304 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 305 } else { 306 add2reg(scratch, disp, base); 307 (this->*classic)(reg, 0, index, scratch); 308 if (base == scratch) { 309 add2reg(base, -disp); // Restore base. 310 } 311 } 312 } else { // scratch == Z_R0 313 z_lgr(scratch, base); 314 add2reg(base, disp); 315 (this->*classic)(reg, 0, index, base); 316 z_lgr(base, scratch); // Restore base. 317 } 318 } 319 } 320 } 321 } 322 323 void MacroAssembler::mem2freg_opt(FloatRegister reg, const Address &a, bool is_double) { 324 if (is_double) { 325 mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ldy), CLASSIC_FFUN(z_ld)); 326 } else { 327 mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ley), CLASSIC_FFUN(z_le)); 328 } 329 } 330 331 // Optimized emitter for reg to mem operations. 332 // Uses modern instructions if running on modern hardware, classic instructions 333 // otherwise. Prefers (usually shorter) classic instructions if applicable. 334 // Data register (reg) cannot be used as work register. 335 // 336 // Don't rely on register locking, instead pass a scratch register 337 // (Z_R0 by default) 338 // CAUTION! passing registers >= Z_R2 may produce bad results on old CPUs! 339 void MacroAssembler::reg2mem_opt(Register reg, 340 int64_t disp, 341 Register index, 342 Register base, 343 void (MacroAssembler::*modern) (Register, int64_t, Register, Register), 344 void (MacroAssembler::*classic)(Register, int64_t, Register, Register), 345 Register scratch) { 346 index = (index == noreg) ? Z_R0 : index; 347 if (Displacement::is_shortDisp(disp)) { 348 (this->*classic)(reg, disp, index, base); 349 } else { 350 if (Displacement::is_validDisp(disp)) { 351 (this->*modern)(reg, disp, index, base); 352 } else { 353 if (scratch != Z_R0 && scratch != Z_R1) { 354 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 355 } else { 356 if (scratch != Z_R0) { // scratch == Z_R1 357 if ((scratch == index) || (index == base)) { 358 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 359 } else { 360 add2reg(scratch, disp, base); 361 (this->*classic)(reg, 0, index, scratch); 362 if (base == scratch) { 363 add2reg(base, -disp); // Restore base. 364 } 365 } 366 } else { // scratch == Z_R0 367 if ((scratch == reg) || (scratch == base) || (reg == base)) { 368 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 369 } else { 370 z_lgr(scratch, base); 371 add2reg(base, disp); 372 (this->*classic)(reg, 0, index, base); 373 z_lgr(base, scratch); // Restore base. 374 } 375 } 376 } 377 } 378 } 379 } 380 381 int MacroAssembler::reg2mem_opt(Register reg, const Address &a, bool is_double) { 382 int store_offset = offset(); 383 if (is_double) { 384 reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_stg), CLASSIC_IFUN(z_stg)); 385 } else { 386 reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_sty), CLASSIC_IFUN(z_st)); 387 } 388 return store_offset; 389 } 390 391 // Optimized emitter for mem to reg operations. 392 // Uses modern instructions if running on modern hardware, classic instructions 393 // otherwise. Prefers (usually shorter) classic instructions if applicable. 394 // Data register (reg) will be used as work register where possible. 395 void MacroAssembler::mem2reg_opt(Register reg, 396 int64_t disp, 397 Register index, 398 Register base, 399 void (MacroAssembler::*modern) (Register, int64_t, Register, Register), 400 void (MacroAssembler::*classic)(Register, int64_t, Register, Register)) { 401 index = (index == noreg) ? Z_R0 : index; 402 if (Displacement::is_shortDisp(disp)) { 403 (this->*classic)(reg, disp, index, base); 404 } else { 405 if (Displacement::is_validDisp(disp)) { 406 (this->*modern)(reg, disp, index, base); 407 } else { 408 if ((reg == index) && (reg == base)) { 409 z_sllg(reg, reg, 1); 410 add2reg(reg, disp); 411 (this->*classic)(reg, 0, noreg, reg); 412 } else if ((reg == index) && (reg != Z_R0)) { 413 add2reg(reg, disp); 414 (this->*classic)(reg, 0, reg, base); 415 } else if (reg == base) { 416 add2reg(reg, disp); 417 (this->*classic)(reg, 0, index, reg); 418 } else if (reg != Z_R0) { 419 add2reg(reg, disp, base); 420 (this->*classic)(reg, 0, index, reg); 421 } else { // reg == Z_R0 && reg != base here 422 add2reg(base, disp); 423 (this->*classic)(reg, 0, index, base); 424 add2reg(base, -disp); 425 } 426 } 427 } 428 } 429 430 void MacroAssembler::mem2reg_opt(Register reg, const Address &a, bool is_double) { 431 if (is_double) { 432 z_lg(reg, a); 433 } else { 434 mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_ly), CLASSIC_IFUN(z_l)); 435 } 436 } 437 438 void MacroAssembler::mem2reg_signed_opt(Register reg, const Address &a) { 439 mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_lgf), CLASSIC_IFUN(z_lgf)); 440 } 441 442 void MacroAssembler::and_imm(Register r, long mask, 443 Register tmp /* = Z_R0 */, 444 bool wide /* = false */) { 445 assert(wide || Immediate::is_simm32(mask), "mask value too large"); 446 447 if (!wide) { 448 z_nilf(r, mask); 449 return; 450 } 451 452 assert(r != tmp, " need a different temporary register !"); 453 load_const_optimized(tmp, mask); 454 z_ngr(r, tmp); 455 } 456 457 // Calculate the 1's complement. 458 // Note: The condition code is neither preserved nor correctly set by this code!!! 459 // Note: (wide == false) does not protect the high order half of the target register 460 // from alteration. It only serves as optimization hint for 32-bit results. 461 void MacroAssembler::not_(Register r1, Register r2, bool wide) { 462 463 if ((r2 == noreg) || (r2 == r1)) { // Calc 1's complement in place. 464 z_xilf(r1, -1); 465 if (wide) { 466 z_xihf(r1, -1); 467 } 468 } else { // Distinct src and dst registers. 469 if (VM_Version::has_DistinctOpnds()) { 470 load_const_optimized(r1, -1); 471 z_xgrk(r1, r2, r1); 472 } else { 473 if (wide) { 474 z_lgr(r1, r2); 475 z_xilf(r1, -1); 476 z_xihf(r1, -1); 477 } else { 478 z_lr(r1, r2); 479 z_xilf(r1, -1); 480 } 481 } 482 } 483 } 484 485 unsigned long MacroAssembler::create_mask(int lBitPos, int rBitPos) { 486 assert(lBitPos >= 0, "zero is leftmost bit position"); 487 assert(rBitPos <= 63, "63 is rightmost bit position"); 488 assert(lBitPos <= rBitPos, "inverted selection interval"); 489 return (lBitPos == 0 ? (unsigned long)(-1L) : ((1UL<<(63-lBitPos+1))-1)) & (~((1UL<<(63-rBitPos))-1)); 490 } 491 492 // Helper function for the "Rotate_then_<logicalOP>" emitters. 493 // Rotate src, then mask register contents such that only bits in range survive. 494 // For oneBits == false, all bits not in range are set to 0. Useful for deleting all bits outside range. 495 // For oneBits == true, all bits not in range are set to 1. Useful for preserving all bits outside range. 496 // The caller must ensure that the selected range only contains bits with defined value. 497 void MacroAssembler::rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos, 498 int nRotate, bool src32bit, bool dst32bit, bool oneBits) { 499 assert(!(dst32bit && lBitPos < 32), "selection interval out of range for int destination"); 500 bool sll4rll = (nRotate >= 0) && (nRotate <= (63-rBitPos)); // Substitute SLL(G) for RLL(G). 501 bool srl4rll = (nRotate < 0) && (-nRotate <= lBitPos); // Substitute SRL(G) for RLL(G). 502 // Pre-determine which parts of dst will be zero after shift/rotate. 503 bool llZero = sll4rll && (nRotate >= 16); 504 bool lhZero = (sll4rll && (nRotate >= 32)) || (srl4rll && (nRotate <= -48)); 505 bool lfZero = llZero && lhZero; 506 bool hlZero = (sll4rll && (nRotate >= 48)) || (srl4rll && (nRotate <= -32)); 507 bool hhZero = (srl4rll && (nRotate <= -16)); 508 bool hfZero = hlZero && hhZero; 509 510 // rotate then mask src operand. 511 // if oneBits == true, all bits outside selected range are 1s. 512 // if oneBits == false, all bits outside selected range are 0s. 513 if (src32bit) { // There might be garbage in the upper 32 bits which will get masked away. 514 if (dst32bit) { 515 z_rll(dst, src, nRotate); // Copy and rotate, upper half of reg remains undisturbed. 516 } else { 517 if (sll4rll) { z_sllg(dst, src, nRotate); } 518 else if (srl4rll) { z_srlg(dst, src, -nRotate); } 519 else { z_rllg(dst, src, nRotate); } 520 } 521 } else { 522 if (sll4rll) { z_sllg(dst, src, nRotate); } 523 else if (srl4rll) { z_srlg(dst, src, -nRotate); } 524 else { z_rllg(dst, src, nRotate); } 525 } 526 527 unsigned long range_mask = create_mask(lBitPos, rBitPos); 528 unsigned int range_mask_h = (unsigned int)(range_mask >> 32); 529 unsigned int range_mask_l = (unsigned int)range_mask; 530 unsigned short range_mask_hh = (unsigned short)(range_mask >> 48); 531 unsigned short range_mask_hl = (unsigned short)(range_mask >> 32); 532 unsigned short range_mask_lh = (unsigned short)(range_mask >> 16); 533 unsigned short range_mask_ll = (unsigned short)range_mask; 534 // Works for z9 and newer H/W. 535 if (oneBits) { 536 if ((~range_mask_l) != 0) { z_oilf(dst, ~range_mask_l); } // All bits outside range become 1s. 537 if (((~range_mask_h) != 0) && !dst32bit) { z_oihf(dst, ~range_mask_h); } 538 } else { 539 // All bits outside range become 0s 540 if (((~range_mask_l) != 0) && !lfZero) { 541 z_nilf(dst, range_mask_l); 542 } 543 if (((~range_mask_h) != 0) && !dst32bit && !hfZero) { 544 z_nihf(dst, range_mask_h); 545 } 546 } 547 } 548 549 // Rotate src, then insert selected range from rotated src into dst. 550 // Clear dst before, if requested. 551 void MacroAssembler::rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos, 552 int nRotate, bool clear_dst) { 553 // This version does not depend on src being zero-extended int2long. 554 nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. 555 z_risbg(dst, src, lBitPos, rBitPos, nRotate, clear_dst); // Rotate, then insert selected, clear the rest. 556 } 557 558 // Rotate src, then and selected range from rotated src into dst. 559 // Set condition code only if so requested. Otherwise it is unpredictable. 560 // See performance note in macroAssembler_s390.hpp for important information. 561 void MacroAssembler::rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos, 562 int nRotate, bool test_only) { 563 guarantee(!test_only, "Emitter not fit for test_only instruction variant."); 564 // This version does not depend on src being zero-extended int2long. 565 nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. 566 z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected. 567 } 568 569 // Rotate src, then or selected range from rotated src into dst. 570 // Set condition code only if so requested. Otherwise it is unpredictable. 571 // See performance note in macroAssembler_s390.hpp for important information. 572 void MacroAssembler::rotate_then_or(Register dst, Register src, int lBitPos, int rBitPos, 573 int nRotate, bool test_only) { 574 guarantee(!test_only, "Emitter not fit for test_only instruction variant."); 575 // This version does not depend on src being zero-extended int2long. 576 nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. 577 z_rosbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected. 578 } 579 580 // Rotate src, then xor selected range from rotated src into dst. 581 // Set condition code only if so requested. Otherwise it is unpredictable. 582 // See performance note in macroAssembler_s390.hpp for important information. 583 void MacroAssembler::rotate_then_xor(Register dst, Register src, int lBitPos, int rBitPos, 584 int nRotate, bool test_only) { 585 guarantee(!test_only, "Emitter not fit for test_only instruction variant."); 586 // This version does not depend on src being zero-extended int2long. 587 nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. 588 z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected. 589 } 590 591 void MacroAssembler::add64(Register r1, RegisterOrConstant inc) { 592 if (inc.is_register()) { 593 z_agr(r1, inc.as_register()); 594 } else { // constant 595 intptr_t imm = inc.as_constant(); 596 add2reg(r1, imm); 597 } 598 } 599 // Helper function to multiply the 64bit contents of a register by a 16bit constant. 600 // The optimization tries to avoid the mghi instruction, since it uses the FPU for 601 // calculation and is thus rather slow. 602 // 603 // There is no handling for special cases, e.g. cval==0 or cval==1. 604 // 605 // Returns len of generated code block. 606 unsigned int MacroAssembler::mul_reg64_const16(Register rval, Register work, int cval) { 607 int block_start = offset(); 608 609 bool sign_flip = cval < 0; 610 cval = sign_flip ? -cval : cval; 611 612 BLOCK_COMMENT("Reg64*Con16 {"); 613 614 int bit1 = cval & -cval; 615 if (bit1 == cval) { 616 z_sllg(rval, rval, exact_log2(bit1)); 617 if (sign_flip) { z_lcgr(rval, rval); } 618 } else { 619 int bit2 = (cval-bit1) & -(cval-bit1); 620 if ((bit1+bit2) == cval) { 621 z_sllg(work, rval, exact_log2(bit1)); 622 z_sllg(rval, rval, exact_log2(bit2)); 623 z_agr(rval, work); 624 if (sign_flip) { z_lcgr(rval, rval); } 625 } else { 626 if (sign_flip) { z_mghi(rval, -cval); } 627 else { z_mghi(rval, cval); } 628 } 629 } 630 BLOCK_COMMENT("} Reg64*Con16"); 631 632 int block_end = offset(); 633 return block_end - block_start; 634 } 635 636 // Generic operation r1 := r2 + imm. 637 // 638 // Should produce the best code for each supported CPU version. 639 // r2 == noreg yields r1 := r1 + imm 640 // imm == 0 emits either no instruction or r1 := r2 ! 641 // NOTES: 1) Don't use this function where fixed sized 642 // instruction sequences are required!!! 643 // 2) Don't use this function if condition code 644 // setting is required! 645 // 3) Despite being declared as int64_t, the parameter imm 646 // must be a simm_32 value (= signed 32-bit integer). 647 void MacroAssembler::add2reg(Register r1, int64_t imm, Register r2) { 648 assert(Immediate::is_simm32(imm), "probably an implicit conversion went wrong"); 649 650 if (r2 == noreg) { r2 = r1; } 651 652 // Handle special case imm == 0. 653 if (imm == 0) { 654 lgr_if_needed(r1, r2); 655 // Nothing else to do. 656 return; 657 } 658 659 if (!PreferLAoverADD || (r2 == Z_R0)) { 660 bool distinctOpnds = VM_Version::has_DistinctOpnds(); 661 662 // Can we encode imm in 16 bits signed? 663 if (Immediate::is_simm16(imm)) { 664 if (r1 == r2) { 665 z_aghi(r1, imm); 666 return; 667 } 668 if (distinctOpnds) { 669 z_aghik(r1, r2, imm); 670 return; 671 } 672 z_lgr(r1, r2); 673 z_aghi(r1, imm); 674 return; 675 } 676 } else { 677 // Can we encode imm in 12 bits unsigned? 678 if (Displacement::is_shortDisp(imm)) { 679 z_la(r1, imm, r2); 680 return; 681 } 682 // Can we encode imm in 20 bits signed? 683 if (Displacement::is_validDisp(imm)) { 684 // Always use LAY instruction, so we don't need the tmp register. 685 z_lay(r1, imm, r2); 686 return; 687 } 688 689 } 690 691 // Can handle it (all possible values) with long immediates. 692 lgr_if_needed(r1, r2); 693 z_agfi(r1, imm); 694 } 695 696 // Generic operation r := b + x + d 697 // 698 // Addition of several operands with address generation semantics - sort of: 699 // - no restriction on the registers. Any register will do for any operand. 700 // - x == noreg: operand will be disregarded. 701 // - b == noreg: will use (contents of) result reg as operand (r := r + d). 702 // - x == Z_R0: just disregard 703 // - b == Z_R0: use as operand. This is not address generation semantics!!! 704 // 705 // The same restrictions as on add2reg() are valid!!! 706 void MacroAssembler::add2reg_with_index(Register r, int64_t d, Register x, Register b) { 707 assert(Immediate::is_simm32(d), "probably an implicit conversion went wrong"); 708 709 if (x == noreg) { x = Z_R0; } 710 if (b == noreg) { b = r; } 711 712 // Handle special case x == R0. 713 if (x == Z_R0) { 714 // Can simply add the immediate value to the base register. 715 add2reg(r, d, b); 716 return; 717 } 718 719 if (!PreferLAoverADD || (b == Z_R0)) { 720 bool distinctOpnds = VM_Version::has_DistinctOpnds(); 721 // Handle special case d == 0. 722 if (d == 0) { 723 if (b == x) { z_sllg(r, b, 1); return; } 724 if (r == x) { z_agr(r, b); return; } 725 if (r == b) { z_agr(r, x); return; } 726 if (distinctOpnds) { z_agrk(r, x, b); return; } 727 z_lgr(r, b); 728 z_agr(r, x); 729 } else { 730 if (x == b) { z_sllg(r, x, 1); } 731 else if (r == x) { z_agr(r, b); } 732 else if (r == b) { z_agr(r, x); } 733 else if (distinctOpnds) { z_agrk(r, x, b); } 734 else { 735 z_lgr(r, b); 736 z_agr(r, x); 737 } 738 add2reg(r, d); 739 } 740 } else { 741 // Can we encode imm in 12 bits unsigned? 742 if (Displacement::is_shortDisp(d)) { 743 z_la(r, d, x, b); 744 return; 745 } 746 // Can we encode imm in 20 bits signed? 747 if (Displacement::is_validDisp(d)) { 748 z_lay(r, d, x, b); 749 return; 750 } 751 z_la(r, 0, x, b); 752 add2reg(r, d); 753 } 754 } 755 756 // Generic emitter (32bit) for direct memory increment. 757 // For optimal code, do not specify Z_R0 as temp register. 758 void MacroAssembler::add2mem_32(const Address &a, int64_t imm, Register tmp) { 759 if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) { 760 z_asi(a, imm); 761 } else { 762 z_lgf(tmp, a); 763 add2reg(tmp, imm); 764 z_st(tmp, a); 765 } 766 } 767 768 void MacroAssembler::add2mem_64(const Address &a, int64_t imm, Register tmp) { 769 if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) { 770 z_agsi(a, imm); 771 } else { 772 z_lg(tmp, a); 773 add2reg(tmp, imm); 774 z_stg(tmp, a); 775 } 776 } 777 778 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) { 779 switch (size_in_bytes) { 780 case 8: z_lg(dst, src); break; 781 case 4: is_signed ? z_lgf(dst, src) : z_llgf(dst, src); break; 782 case 2: is_signed ? z_lgh(dst, src) : z_llgh(dst, src); break; 783 case 1: is_signed ? z_lgb(dst, src) : z_llgc(dst, src); break; 784 default: ShouldNotReachHere(); 785 } 786 } 787 788 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { 789 switch (size_in_bytes) { 790 case 8: z_stg(src, dst); break; 791 case 4: z_st(src, dst); break; 792 case 2: z_sth(src, dst); break; 793 case 1: z_stc(src, dst); break; 794 default: ShouldNotReachHere(); 795 } 796 } 797 798 // Split a si20 offset (20bit, signed) into an ui12 offset (12bit, unsigned) and 799 // a high-order summand in register tmp. 800 // 801 // return value: < 0: No split required, si20 actually has property uimm12. 802 // >= 0: Split performed. Use return value as uimm12 displacement and 803 // tmp as index register. 804 int MacroAssembler::split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate) { 805 assert(Immediate::is_simm20(si20_offset), "sanity"); 806 int lg_off = (int)si20_offset & 0x0fff; // Punch out low-order 12 bits, always positive. 807 int ll_off = (int)si20_offset & ~0x0fff; // Force low-order 12 bits to zero. 808 assert((Displacement::is_shortDisp(si20_offset) && (ll_off == 0)) || 809 !Displacement::is_shortDisp(si20_offset), "unexpected offset values"); 810 assert((lg_off+ll_off) == si20_offset, "offset splitup error"); 811 812 Register work = accumulate? Z_R0 : tmp; 813 814 if (fixed_codelen) { // Len of code = 10 = 4 + 6. 815 z_lghi(work, ll_off>>12); // Implicit sign extension. 816 z_slag(work, work, 12); 817 } else { // Len of code = 0..10. 818 if (ll_off == 0) { return -1; } 819 // ll_off has 8 significant bits (at most) plus sign. 820 if ((ll_off & 0x0000f000) == 0) { // Non-zero bits only in upper halfbyte. 821 z_llilh(work, ll_off >> 16); 822 if (ll_off < 0) { // Sign-extension required. 823 z_lgfr(work, work); 824 } 825 } else { 826 if ((ll_off & 0x000f0000) == 0) { // Non-zero bits only in lower halfbyte. 827 z_llill(work, ll_off); 828 } else { // Non-zero bits in both halfbytes. 829 z_lghi(work, ll_off>>12); // Implicit sign extension. 830 z_slag(work, work, 12); 831 } 832 } 833 } 834 if (accumulate) { z_algr(tmp, work); } // len of code += 4 835 return lg_off; 836 } 837 838 void MacroAssembler::load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) { 839 if (Displacement::is_validDisp(si20)) { 840 z_ley(t, si20, a); 841 } else { 842 // Fixed_codelen = true is a simple way to ensure that the size of load_float_largeoffset 843 // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant 844 // pool loads). 845 bool accumulate = true; 846 bool fixed_codelen = true; 847 Register work; 848 849 if (fixed_codelen) { 850 z_lgr(tmp, a); // Lgr_if_needed not applicable due to fixed_codelen. 851 } else { 852 accumulate = (a == tmp); 853 } 854 work = tmp; 855 856 int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate); 857 if (disp12 < 0) { 858 z_le(t, si20, work); 859 } else { 860 if (accumulate) { 861 z_le(t, disp12, work); 862 } else { 863 z_le(t, disp12, work, a); 864 } 865 } 866 } 867 } 868 869 void MacroAssembler::load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) { 870 if (Displacement::is_validDisp(si20)) { 871 z_ldy(t, si20, a); 872 } else { 873 // Fixed_codelen = true is a simple way to ensure that the size of load_double_largeoffset 874 // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant 875 // pool loads). 876 bool accumulate = true; 877 bool fixed_codelen = true; 878 Register work; 879 880 if (fixed_codelen) { 881 z_lgr(tmp, a); // Lgr_if_needed not applicable due to fixed_codelen. 882 } else { 883 accumulate = (a == tmp); 884 } 885 work = tmp; 886 887 int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate); 888 if (disp12 < 0) { 889 z_ld(t, si20, work); 890 } else { 891 if (accumulate) { 892 z_ld(t, disp12, work); 893 } else { 894 z_ld(t, disp12, work, a); 895 } 896 } 897 } 898 } 899 900 // PCrelative TOC access. 901 // Returns distance (in bytes) from current position to start of consts section. 902 // Returns 0 (zero) if no consts section exists or if it has size zero. 903 long MacroAssembler::toc_distance() { 904 CodeSection* cs = code()->consts(); 905 return (long)((cs != NULL) ? cs->start()-pc() : 0); 906 } 907 908 // Implementation on x86/sparc assumes that constant and instruction section are 909 // adjacent, but this doesn't hold. Two special situations may occur, that we must 910 // be able to handle: 911 // 1. const section may be located apart from the inst section. 912 // 2. const section may be empty 913 // In both cases, we use the const section's start address to compute the "TOC", 914 // this seems to occur only temporarily; in the final step we always seem to end up 915 // with the pc-relatice variant. 916 // 917 // PC-relative offset could be +/-2**32 -> use long for disp 918 // Furthermore: makes no sense to have special code for 919 // adjacent const and inst sections. 920 void MacroAssembler::load_toc(Register Rtoc) { 921 // Simply use distance from start of const section (should be patched in the end). 922 long disp = toc_distance(); 923 924 RelocationHolder rspec = internal_word_Relocation::spec(pc() + disp); 925 relocate(rspec); 926 z_larl(Rtoc, RelAddr::pcrel_off32(disp)); // Offset is in halfwords. 927 } 928 929 // PCrelative TOC access. 930 // Load from anywhere pcrelative (with relocation of load instr) 931 void MacroAssembler::load_long_pcrelative(Register Rdst, address dataLocation) { 932 address pc = this->pc(); 933 ptrdiff_t total_distance = dataLocation - pc; 934 RelocationHolder rspec = internal_word_Relocation::spec(dataLocation); 935 936 assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory"); 937 assert(total_distance != 0, "sanity"); 938 939 // Some extra safety net. 940 if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) { 941 guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance); 942 } 943 944 (this)->relocate(rspec, relocInfo::pcrel_addr_format); 945 z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance)); 946 } 947 948 949 // PCrelative TOC access. 950 // Load from anywhere pcrelative (with relocation of load instr) 951 // loaded addr has to be relocated when added to constant pool. 952 void MacroAssembler::load_addr_pcrelative(Register Rdst, address addrLocation) { 953 address pc = this->pc(); 954 ptrdiff_t total_distance = addrLocation - pc; 955 RelocationHolder rspec = internal_word_Relocation::spec(addrLocation); 956 957 assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory"); 958 959 // Some extra safety net. 960 if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) { 961 guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance); 962 } 963 964 (this)->relocate(rspec, relocInfo::pcrel_addr_format); 965 z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance)); 966 } 967 968 // Generic operation: load a value from memory and test. 969 // CondCode indicates the sign (<0, ==0, >0) of the loaded value. 970 void MacroAssembler::load_and_test_byte(Register dst, const Address &a) { 971 z_lb(dst, a); 972 z_ltr(dst, dst); 973 } 974 975 void MacroAssembler::load_and_test_short(Register dst, const Address &a) { 976 int64_t disp = a.disp20(); 977 if (Displacement::is_shortDisp(disp)) { 978 z_lh(dst, a); 979 } else if (Displacement::is_longDisp(disp)) { 980 z_lhy(dst, a); 981 } else { 982 guarantee(false, "displacement out of range"); 983 } 984 z_ltr(dst, dst); 985 } 986 987 void MacroAssembler::load_and_test_int(Register dst, const Address &a) { 988 z_lt(dst, a); 989 } 990 991 void MacroAssembler::load_and_test_int2long(Register dst, const Address &a) { 992 z_ltgf(dst, a); 993 } 994 995 void MacroAssembler::load_and_test_long(Register dst, const Address &a) { 996 z_ltg(dst, a); 997 } 998 999 // Test a bit in memory. 1000 void MacroAssembler::testbit(const Address &a, unsigned int bit) { 1001 assert(a.index() == noreg, "no index reg allowed in testbit"); 1002 if (bit <= 7) { 1003 z_tm(a.disp() + 3, a.base(), 1 << bit); 1004 } else if (bit <= 15) { 1005 z_tm(a.disp() + 2, a.base(), 1 << (bit - 8)); 1006 } else if (bit <= 23) { 1007 z_tm(a.disp() + 1, a.base(), 1 << (bit - 16)); 1008 } else if (bit <= 31) { 1009 z_tm(a.disp() + 0, a.base(), 1 << (bit - 24)); 1010 } else { 1011 ShouldNotReachHere(); 1012 } 1013 } 1014 1015 // Test a bit in a register. Result is reflected in CC. 1016 void MacroAssembler::testbit(Register r, unsigned int bitPos) { 1017 if (bitPos < 16) { 1018 z_tmll(r, 1U<<bitPos); 1019 } else if (bitPos < 32) { 1020 z_tmlh(r, 1U<<(bitPos-16)); 1021 } else if (bitPos < 48) { 1022 z_tmhl(r, 1U<<(bitPos-32)); 1023 } else if (bitPos < 64) { 1024 z_tmhh(r, 1U<<(bitPos-48)); 1025 } else { 1026 ShouldNotReachHere(); 1027 } 1028 } 1029 1030 void MacroAssembler::prefetch_read(Address a) { 1031 z_pfd(1, a.disp20(), a.indexOrR0(), a.base()); 1032 } 1033 void MacroAssembler::prefetch_update(Address a) { 1034 z_pfd(2, a.disp20(), a.indexOrR0(), a.base()); 1035 } 1036 1037 // Clear a register, i.e. load const zero into reg. 1038 // Return len (in bytes) of generated instruction(s). 1039 // whole_reg: Clear 64 bits if true, 32 bits otherwise. 1040 // set_cc: Use instruction that sets the condition code, if true. 1041 int MacroAssembler::clear_reg(Register r, bool whole_reg, bool set_cc) { 1042 unsigned int start_off = offset(); 1043 if (whole_reg) { 1044 set_cc ? z_xgr(r, r) : z_laz(r, 0, Z_R0); 1045 } else { // Only 32bit register. 1046 set_cc ? z_xr(r, r) : z_lhi(r, 0); 1047 } 1048 return offset() - start_off; 1049 } 1050 1051 #ifdef ASSERT 1052 int MacroAssembler::preset_reg(Register r, unsigned long pattern, int pattern_len) { 1053 switch (pattern_len) { 1054 case 1: 1055 pattern = (pattern & 0x000000ff) | ((pattern & 0x000000ff)<<8); 1056 case 2: 1057 pattern = (pattern & 0x0000ffff) | ((pattern & 0x0000ffff)<<16); 1058 case 4: 1059 pattern = (pattern & 0xffffffffL) | ((pattern & 0xffffffffL)<<32); 1060 case 8: 1061 return load_const_optimized_rtn_len(r, pattern, true); 1062 break; 1063 default: 1064 guarantee(false, "preset_reg: bad len"); 1065 } 1066 return 0; 1067 } 1068 #endif 1069 1070 // addr: Address descriptor of memory to clear index register will not be used ! 1071 // size: Number of bytes to clear. 1072 // !!! DO NOT USE THEM FOR ATOMIC MEMORY CLEARING !!! 1073 // !!! Use store_const() instead !!! 1074 void MacroAssembler::clear_mem(const Address& addr, unsigned size) { 1075 guarantee(size <= 256, "MacroAssembler::clear_mem: size too large"); 1076 1077 if (size == 1) { 1078 z_mvi(addr, 0); 1079 return; 1080 } 1081 1082 switch (size) { 1083 case 2: z_mvhhi(addr, 0); 1084 return; 1085 case 4: z_mvhi(addr, 0); 1086 return; 1087 case 8: z_mvghi(addr, 0); 1088 return; 1089 default: ; // Fallthru to xc. 1090 } 1091 1092 z_xc(addr, size, addr); 1093 } 1094 1095 void MacroAssembler::align(int modulus) { 1096 while (offset() % modulus != 0) z_nop(); 1097 } 1098 1099 // Special version for non-relocateable code if required alignment 1100 // is larger than CodeEntryAlignment. 1101 void MacroAssembler::align_address(int modulus) { 1102 while ((uintptr_t)pc() % modulus != 0) z_nop(); 1103 } 1104 1105 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 1106 Register temp_reg, 1107 int64_t extra_slot_offset) { 1108 // On Z, we can have index and disp in an Address. So don't call argument_offset, 1109 // which issues an unnecessary add instruction. 1110 int stackElementSize = Interpreter::stackElementSize; 1111 int64_t offset = extra_slot_offset * stackElementSize; 1112 const Register argbase = Z_esp; 1113 if (arg_slot.is_constant()) { 1114 offset += arg_slot.as_constant() * stackElementSize; 1115 return Address(argbase, offset); 1116 } 1117 // else 1118 assert(temp_reg != noreg, "must specify"); 1119 assert(temp_reg != Z_ARG1, "base and index are conflicting"); 1120 z_sllg(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize)); // tempreg = arg_slot << 3 1121 return Address(argbase, temp_reg, offset); 1122 } 1123 1124 1125 //=================================================================== 1126 //=== START C O N S T A N T S I N C O D E S T R E A M === 1127 //=================================================================== 1128 //=== P A T CH A B L E C O N S T A N T S === 1129 //=================================================================== 1130 1131 1132 //--------------------------------------------------- 1133 // Load (patchable) constant into register 1134 //--------------------------------------------------- 1135 1136 1137 // Load absolute address (and try to optimize). 1138 // Note: This method is usable only for position-fixed code, 1139 // referring to a position-fixed target location. 1140 // If not so, relocations and patching must be used. 1141 void MacroAssembler::load_absolute_address(Register d, address addr) { 1142 assert(addr != NULL, "should not happen"); 1143 BLOCK_COMMENT("load_absolute_address:"); 1144 if (addr == NULL) { 1145 z_larl(d, pc()); // Dummy emit for size calc. 1146 return; 1147 } 1148 1149 if (RelAddr::is_in_range_of_RelAddr32(addr, pc())) { 1150 z_larl(d, addr); 1151 return; 1152 } 1153 1154 load_const_optimized(d, (long)addr); 1155 } 1156 1157 // Load a 64bit constant. 1158 // Patchable code sequence, but not atomically patchable. 1159 // Make sure to keep code size constant -> no value-dependent optimizations. 1160 // Do not kill condition code. 1161 void MacroAssembler::load_const(Register t, long x) { 1162 Assembler::z_iihf(t, (int)(x >> 32)); 1163 Assembler::z_iilf(t, (int)(x & 0xffffffff)); 1164 } 1165 1166 // Load a 32bit constant into a 64bit register, sign-extend or zero-extend. 1167 // Patchable code sequence, but not atomically patchable. 1168 // Make sure to keep code size constant -> no value-dependent optimizations. 1169 // Do not kill condition code. 1170 void MacroAssembler::load_const_32to64(Register t, int64_t x, bool sign_extend) { 1171 if (sign_extend) { Assembler::z_lgfi(t, x); } 1172 else { Assembler::z_llilf(t, x); } 1173 } 1174 1175 // Load narrow oop constant, no decompression. 1176 void MacroAssembler::load_narrow_oop(Register t, narrowOop a) { 1177 assert(UseCompressedOops, "must be on to call this method"); 1178 load_const_32to64(t, a, false /*sign_extend*/); 1179 } 1180 1181 // Load narrow klass constant, compression required. 1182 void MacroAssembler::load_narrow_klass(Register t, Klass* k) { 1183 assert(UseCompressedClassPointers, "must be on to call this method"); 1184 narrowKlass encoded_k = Klass::encode_klass(k); 1185 load_const_32to64(t, encoded_k, false /*sign_extend*/); 1186 } 1187 1188 //------------------------------------------------------ 1189 // Compare (patchable) constant with register. 1190 //------------------------------------------------------ 1191 1192 // Compare narrow oop in reg with narrow oop constant, no decompression. 1193 void MacroAssembler::compare_immediate_narrow_oop(Register oop1, narrowOop oop2) { 1194 assert(UseCompressedOops, "must be on to call this method"); 1195 1196 Assembler::z_clfi(oop1, oop2); 1197 } 1198 1199 // Compare narrow oop in reg with narrow oop constant, no decompression. 1200 void MacroAssembler::compare_immediate_narrow_klass(Register klass1, Klass* klass2) { 1201 assert(UseCompressedClassPointers, "must be on to call this method"); 1202 narrowKlass encoded_k = Klass::encode_klass(klass2); 1203 1204 Assembler::z_clfi(klass1, encoded_k); 1205 } 1206 1207 //---------------------------------------------------------- 1208 // Check which kind of load_constant we have here. 1209 //---------------------------------------------------------- 1210 1211 // Detection of CPU version dependent load_const sequence. 1212 // The detection is valid only for code sequences generated by load_const, 1213 // not load_const_optimized. 1214 bool MacroAssembler::is_load_const(address a) { 1215 unsigned long inst1, inst2; 1216 unsigned int len1, len2; 1217 1218 len1 = get_instruction(a, &inst1); 1219 len2 = get_instruction(a + len1, &inst2); 1220 1221 return is_z_iihf(inst1) && is_z_iilf(inst2); 1222 } 1223 1224 // Detection of CPU version dependent load_const_32to64 sequence. 1225 // Mostly used for narrow oops and narrow Klass pointers. 1226 // The detection is valid only for code sequences generated by load_const_32to64. 1227 bool MacroAssembler::is_load_const_32to64(address pos) { 1228 unsigned long inst1, inst2; 1229 unsigned int len1; 1230 1231 len1 = get_instruction(pos, &inst1); 1232 return is_z_llilf(inst1); 1233 } 1234 1235 // Detection of compare_immediate_narrow sequence. 1236 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop. 1237 bool MacroAssembler::is_compare_immediate32(address pos) { 1238 return is_equal(pos, CLFI_ZOPC, RIL_MASK); 1239 } 1240 1241 // Detection of compare_immediate_narrow sequence. 1242 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop. 1243 bool MacroAssembler::is_compare_immediate_narrow_oop(address pos) { 1244 return is_compare_immediate32(pos); 1245 } 1246 1247 // Detection of compare_immediate_narrow sequence. 1248 // The detection is valid only for code sequences generated by compare_immediate_narrow_klass. 1249 bool MacroAssembler::is_compare_immediate_narrow_klass(address pos) { 1250 return is_compare_immediate32(pos); 1251 } 1252 1253 //----------------------------------- 1254 // patch the load_constant 1255 //----------------------------------- 1256 1257 // CPU-version dependend patching of load_const. 1258 void MacroAssembler::patch_const(address a, long x) { 1259 assert(is_load_const(a), "not a load of a constant"); 1260 set_imm32((address)a, (int) ((x >> 32) & 0xffffffff)); 1261 set_imm32((address)(a + 6), (int)(x & 0xffffffff)); 1262 } 1263 1264 // Patching the value of CPU version dependent load_const_32to64 sequence. 1265 // The passed ptr MUST be in compressed format! 1266 int MacroAssembler::patch_load_const_32to64(address pos, int64_t np) { 1267 assert(is_load_const_32to64(pos), "not a load of a narrow ptr (oop or klass)"); 1268 1269 set_imm32(pos, np); 1270 return 6; 1271 } 1272 1273 // Patching the value of CPU version dependent compare_immediate_narrow sequence. 1274 // The passed ptr MUST be in compressed format! 1275 int MacroAssembler::patch_compare_immediate_32(address pos, int64_t np) { 1276 assert(is_compare_immediate32(pos), "not a compressed ptr compare"); 1277 1278 set_imm32(pos, np); 1279 return 6; 1280 } 1281 1282 // Patching the immediate value of CPU version dependent load_narrow_oop sequence. 1283 // The passed ptr must NOT be in compressed format! 1284 int MacroAssembler::patch_load_narrow_oop(address pos, oop o) { 1285 assert(UseCompressedOops, "Can only patch compressed oops"); 1286 1287 narrowOop no = oopDesc::encode_heap_oop(o); 1288 return patch_load_const_32to64(pos, no); 1289 } 1290 1291 // Patching the immediate value of CPU version dependent load_narrow_klass sequence. 1292 // The passed ptr must NOT be in compressed format! 1293 int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) { 1294 assert(UseCompressedClassPointers, "Can only patch compressed klass pointers"); 1295 1296 narrowKlass nk = Klass::encode_klass(k); 1297 return patch_load_const_32to64(pos, nk); 1298 } 1299 1300 // Patching the immediate value of CPU version dependent compare_immediate_narrow_oop sequence. 1301 // The passed ptr must NOT be in compressed format! 1302 int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) { 1303 assert(UseCompressedOops, "Can only patch compressed oops"); 1304 1305 narrowOop no = oopDesc::encode_heap_oop(o); 1306 return patch_compare_immediate_32(pos, no); 1307 } 1308 1309 // Patching the immediate value of CPU version dependent compare_immediate_narrow_klass sequence. 1310 // The passed ptr must NOT be in compressed format! 1311 int MacroAssembler::patch_compare_immediate_narrow_klass(address pos, Klass* k) { 1312 assert(UseCompressedClassPointers, "Can only patch compressed klass pointers"); 1313 1314 narrowKlass nk = Klass::encode_klass(k); 1315 return patch_compare_immediate_32(pos, nk); 1316 } 1317 1318 //------------------------------------------------------------------------ 1319 // Extract the constant from a load_constant instruction stream. 1320 //------------------------------------------------------------------------ 1321 1322 // Get constant from a load_const sequence. 1323 long MacroAssembler::get_const(address a) { 1324 assert(is_load_const(a), "not a load of a constant"); 1325 unsigned long x; 1326 x = (((unsigned long) (get_imm32(a,0) & 0xffffffff)) << 32); 1327 x |= (((unsigned long) (get_imm32(a,1) & 0xffffffff))); 1328 return (long) x; 1329 } 1330 1331 //-------------------------------------- 1332 // Store a constant in memory. 1333 //-------------------------------------- 1334 1335 // General emitter to move a constant to memory. 1336 // The store is atomic. 1337 // o Address must be given in RS format (no index register) 1338 // o Displacement should be 12bit unsigned for efficiency. 20bit signed also supported. 1339 // o Constant can be 1, 2, 4, or 8 bytes, signed or unsigned. 1340 // o Memory slot can be 1, 2, 4, or 8 bytes, signed or unsigned. 1341 // o Memory slot must be at least as wide as constant, will assert otherwise. 1342 // o Signed constants will sign-extend, unsigned constants will zero-extend to slot width. 1343 int MacroAssembler::store_const(const Address &dest, long imm, 1344 unsigned int lm, unsigned int lc, 1345 Register scratch) { 1346 int64_t disp = dest.disp(); 1347 Register base = dest.base(); 1348 assert(!dest.has_index(), "not supported"); 1349 assert((lm==1)||(lm==2)||(lm==4)||(lm==8), "memory length not supported"); 1350 assert((lc==1)||(lc==2)||(lc==4)||(lc==8), "constant length not supported"); 1351 assert(lm>=lc, "memory slot too small"); 1352 assert(lc==8 || Immediate::is_simm(imm, lc*8), "const out of range"); 1353 assert(Displacement::is_validDisp(disp), "displacement out of range"); 1354 1355 bool is_shortDisp = Displacement::is_shortDisp(disp); 1356 int store_offset = -1; 1357 1358 // For target len == 1 it's easy. 1359 if (lm == 1) { 1360 store_offset = offset(); 1361 if (is_shortDisp) { 1362 z_mvi(disp, base, imm); 1363 return store_offset; 1364 } else { 1365 z_mviy(disp, base, imm); 1366 return store_offset; 1367 } 1368 } 1369 1370 // All the "good stuff" takes an unsigned displacement. 1371 if (is_shortDisp) { 1372 // NOTE: Cannot use clear_mem for imm==0, because it is not atomic. 1373 1374 store_offset = offset(); 1375 switch (lm) { 1376 case 2: // Lc == 1 handled correctly here, even for unsigned. Instruction does no widening. 1377 z_mvhhi(disp, base, imm); 1378 return store_offset; 1379 case 4: 1380 if (Immediate::is_simm16(imm)) { 1381 z_mvhi(disp, base, imm); 1382 return store_offset; 1383 } 1384 break; 1385 case 8: 1386 if (Immediate::is_simm16(imm)) { 1387 z_mvghi(disp, base, imm); 1388 return store_offset; 1389 } 1390 break; 1391 default: 1392 ShouldNotReachHere(); 1393 break; 1394 } 1395 } 1396 1397 // Can't optimize, so load value and store it. 1398 guarantee(scratch != noreg, " need a scratch register here !"); 1399 if (imm != 0) { 1400 load_const_optimized(scratch, imm); // Preserves CC anyway. 1401 } else { 1402 // Leave CC alone!! 1403 (void) clear_reg(scratch, true, false); // Indicate unused result. 1404 } 1405 1406 store_offset = offset(); 1407 if (is_shortDisp) { 1408 switch (lm) { 1409 case 2: 1410 z_sth(scratch, disp, Z_R0, base); 1411 return store_offset; 1412 case 4: 1413 z_st(scratch, disp, Z_R0, base); 1414 return store_offset; 1415 case 8: 1416 z_stg(scratch, disp, Z_R0, base); 1417 return store_offset; 1418 default: 1419 ShouldNotReachHere(); 1420 break; 1421 } 1422 } else { 1423 switch (lm) { 1424 case 2: 1425 z_sthy(scratch, disp, Z_R0, base); 1426 return store_offset; 1427 case 4: 1428 z_sty(scratch, disp, Z_R0, base); 1429 return store_offset; 1430 case 8: 1431 z_stg(scratch, disp, Z_R0, base); 1432 return store_offset; 1433 default: 1434 ShouldNotReachHere(); 1435 break; 1436 } 1437 } 1438 return -1; // should not reach here 1439 } 1440 1441 //=================================================================== 1442 //=== N O T P A T CH A B L E C O N S T A N T S === 1443 //=================================================================== 1444 1445 // Load constant x into register t with a fast instrcution sequence 1446 // depending on the bits in x. Preserves CC under all circumstances. 1447 int MacroAssembler::load_const_optimized_rtn_len(Register t, long x, bool emit) { 1448 if (x == 0) { 1449 int len; 1450 if (emit) { 1451 len = clear_reg(t, true, false); 1452 } else { 1453 len = 4; 1454 } 1455 return len; 1456 } 1457 1458 if (Immediate::is_simm16(x)) { 1459 if (emit) { z_lghi(t, x); } 1460 return 4; 1461 } 1462 1463 // 64 bit value: | part1 | part2 | part3 | part4 | 1464 // At least one part is not zero! 1465 int part1 = ((x >> 32) & 0xffff0000) >> 16; 1466 int part2 = (x >> 32) & 0x0000ffff; 1467 int part3 = (x & 0xffff0000) >> 16; 1468 int part4 = (x & 0x0000ffff); 1469 1470 // Lower word only (unsigned). 1471 if ((part1 == 0) && (part2 == 0)) { 1472 if (part3 == 0) { 1473 if (emit) z_llill(t, part4); 1474 return 4; 1475 } 1476 if (part4 == 0) { 1477 if (emit) z_llilh(t, part3); 1478 return 4; 1479 } 1480 if (emit) z_llilf(t, (int)(x & 0xffffffff)); 1481 return 6; 1482 } 1483 1484 // Upper word only. 1485 if ((part3 == 0) && (part4 == 0)) { 1486 if (part1 == 0) { 1487 if (emit) z_llihl(t, part2); 1488 return 4; 1489 } 1490 if (part2 == 0) { 1491 if (emit) z_llihh(t, part1); 1492 return 4; 1493 } 1494 if (emit) z_llihf(t, (int)(x >> 32)); 1495 return 6; 1496 } 1497 1498 // Lower word only (signed). 1499 if ((part1 == 0x0000ffff) && (part2 == 0x0000ffff) && ((part3 & 0x00008000) != 0)) { 1500 if (emit) z_lgfi(t, (int)(x & 0xffffffff)); 1501 return 6; 1502 } 1503 1504 int len = 0; 1505 1506 if ((part1 == 0) || (part2 == 0)) { 1507 if (part1 == 0) { 1508 if (emit) z_llihl(t, part2); 1509 len += 4; 1510 } else { 1511 if (emit) z_llihh(t, part1); 1512 len += 4; 1513 } 1514 } else { 1515 if (emit) z_llihf(t, (int)(x >> 32)); 1516 len += 6; 1517 } 1518 1519 if ((part3 == 0) || (part4 == 0)) { 1520 if (part3 == 0) { 1521 if (emit) z_iill(t, part4); 1522 len += 4; 1523 } else { 1524 if (emit) z_iilh(t, part3); 1525 len += 4; 1526 } 1527 } else { 1528 if (emit) z_iilf(t, (int)(x & 0xffffffff)); 1529 len += 6; 1530 } 1531 return len; 1532 } 1533 1534 //===================================================================== 1535 //=== H I G H E R L E V E L B R A N C H E M I T T E R S === 1536 //===================================================================== 1537 1538 // Note: In the worst case, one of the scratch registers is destroyed!!! 1539 void MacroAssembler::compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { 1540 // Right operand is constant. 1541 if (x2.is_constant()) { 1542 jlong value = x2.as_constant(); 1543 compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/true); 1544 return; 1545 } 1546 1547 // Right operand is in register. 1548 compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/true); 1549 } 1550 1551 // Note: In the worst case, one of the scratch registers is destroyed!!! 1552 void MacroAssembler::compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { 1553 // Right operand is constant. 1554 if (x2.is_constant()) { 1555 jlong value = x2.as_constant(); 1556 compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/false); 1557 return; 1558 } 1559 1560 // Right operand is in register. 1561 compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/false); 1562 } 1563 1564 // Note: In the worst case, one of the scratch registers is destroyed!!! 1565 void MacroAssembler::compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { 1566 // Right operand is constant. 1567 if (x2.is_constant()) { 1568 jlong value = x2.as_constant(); 1569 compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/true); 1570 return; 1571 } 1572 1573 // Right operand is in register. 1574 compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/true); 1575 } 1576 1577 void MacroAssembler::compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { 1578 // Right operand is constant. 1579 if (x2.is_constant()) { 1580 jlong value = x2.as_constant(); 1581 compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/false); 1582 return; 1583 } 1584 1585 // Right operand is in register. 1586 compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/false); 1587 } 1588 1589 // Generate an optimal branch to the branch target. 1590 // Optimal means that a relative branch (brc or brcl) is used if the 1591 // branch distance is short enough. Loading the target address into a 1592 // register and branching via reg is used as fallback only. 1593 // 1594 // Used registers: 1595 // Z_R1 - work reg. Holds branch target address. 1596 // Used in fallback case only. 1597 // 1598 // This version of branch_optimized is good for cases where the target address is known 1599 // and constant, i.e. is never changed (no relocation, no patching). 1600 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, address branch_addr) { 1601 address branch_origin = pc(); 1602 1603 if (RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) { 1604 z_brc(cond, branch_addr); 1605 } else if (RelAddr::is_in_range_of_RelAddr32(branch_addr, branch_origin)) { 1606 z_brcl(cond, branch_addr); 1607 } else { 1608 load_const_optimized(Z_R1, branch_addr); // CC must not get killed by load_const_optimized. 1609 z_bcr(cond, Z_R1); 1610 } 1611 } 1612 1613 // This version of branch_optimized is good for cases where the target address 1614 // is potentially not yet known at the time the code is emitted. 1615 // 1616 // One very common case is a branch to an unbound label which is handled here. 1617 // The caller might know (or hope) that the branch distance is short enough 1618 // to be encoded in a 16bit relative address. In this case he will pass a 1619 // NearLabel branch_target. 1620 // Care must be taken with unbound labels. Each call to target(label) creates 1621 // an entry in the patch queue for that label to patch all references of the label 1622 // once it gets bound. Those recorded patch locations must be patchable. Otherwise, 1623 // an assertion fires at patch time. 1624 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& branch_target) { 1625 if (branch_target.is_bound()) { 1626 address branch_addr = target(branch_target); 1627 branch_optimized(cond, branch_addr); 1628 } else if (branch_target.is_near()) { 1629 z_brc(cond, branch_target); // Caller assures that the target will be in range for z_brc. 1630 } else { 1631 z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time. 1632 } 1633 } 1634 1635 // Generate an optimal compare and branch to the branch target. 1636 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the 1637 // branch distance is short enough. Loading the target address into a 1638 // register and branching via reg is used as fallback only. 1639 // 1640 // Input: 1641 // r1 - left compare operand 1642 // r2 - right compare operand 1643 void MacroAssembler::compare_and_branch_optimized(Register r1, 1644 Register r2, 1645 Assembler::branch_condition cond, 1646 address branch_addr, 1647 bool len64, 1648 bool has_sign) { 1649 unsigned int casenum = (len64?2:0)+(has_sign?0:1); 1650 1651 address branch_origin = pc(); 1652 if (VM_Version::has_CompareBranch() && RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) { 1653 switch (casenum) { 1654 case 0: z_crj( r1, r2, cond, branch_addr); break; 1655 case 1: z_clrj (r1, r2, cond, branch_addr); break; 1656 case 2: z_cgrj(r1, r2, cond, branch_addr); break; 1657 case 3: z_clgrj(r1, r2, cond, branch_addr); break; 1658 default: ShouldNotReachHere(); break; 1659 } 1660 } else { 1661 switch (casenum) { 1662 case 0: z_cr( r1, r2); break; 1663 case 1: z_clr(r1, r2); break; 1664 case 2: z_cgr(r1, r2); break; 1665 case 3: z_clgr(r1, r2); break; 1666 default: ShouldNotReachHere(); break; 1667 } 1668 branch_optimized(cond, branch_addr); 1669 } 1670 } 1671 1672 // Generate an optimal compare and branch to the branch target. 1673 // Optimal means that a relative branch (clgij, brc or brcl) is used if the 1674 // branch distance is short enough. Loading the target address into a 1675 // register and branching via reg is used as fallback only. 1676 // 1677 // Input: 1678 // r1 - left compare operand (in register) 1679 // x2 - right compare operand (immediate) 1680 void MacroAssembler::compare_and_branch_optimized(Register r1, 1681 jlong x2, 1682 Assembler::branch_condition cond, 1683 Label& branch_target, 1684 bool len64, 1685 bool has_sign) { 1686 address branch_origin = pc(); 1687 bool x2_imm8 = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2)); 1688 bool is_RelAddr16 = branch_target.is_near() || 1689 (branch_target.is_bound() && 1690 RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin)); 1691 unsigned int casenum = (len64?2:0)+(has_sign?0:1); 1692 1693 if (VM_Version::has_CompareBranch() && is_RelAddr16 && x2_imm8) { 1694 switch (casenum) { 1695 case 0: z_cij( r1, x2, cond, branch_target); break; 1696 case 1: z_clij(r1, x2, cond, branch_target); break; 1697 case 2: z_cgij(r1, x2, cond, branch_target); break; 1698 case 3: z_clgij(r1, x2, cond, branch_target); break; 1699 default: ShouldNotReachHere(); break; 1700 } 1701 return; 1702 } 1703 1704 if (x2 == 0) { 1705 switch (casenum) { 1706 case 0: z_ltr(r1, r1); break; 1707 case 1: z_ltr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication! 1708 case 2: z_ltgr(r1, r1); break; 1709 case 3: z_ltgr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication! 1710 default: ShouldNotReachHere(); break; 1711 } 1712 } else { 1713 if ((has_sign && Immediate::is_simm16(x2)) || (!has_sign && Immediate::is_uimm(x2, 15))) { 1714 switch (casenum) { 1715 case 0: z_chi(r1, x2); break; 1716 case 1: z_chi(r1, x2); break; // positive immediate < 2**15 1717 case 2: z_cghi(r1, x2); break; 1718 case 3: z_cghi(r1, x2); break; // positive immediate < 2**15 1719 default: break; 1720 } 1721 } else if ( (has_sign && Immediate::is_simm32(x2)) || (!has_sign && Immediate::is_uimm32(x2)) ) { 1722 switch (casenum) { 1723 case 0: z_cfi( r1, x2); break; 1724 case 1: z_clfi(r1, x2); break; 1725 case 2: z_cgfi(r1, x2); break; 1726 case 3: z_clgfi(r1, x2); break; 1727 default: ShouldNotReachHere(); break; 1728 } 1729 } else { 1730 // No instruction with immediate operand possible, so load into register. 1731 Register scratch = (r1 != Z_R0) ? Z_R0 : Z_R1; 1732 load_const_optimized(scratch, x2); 1733 switch (casenum) { 1734 case 0: z_cr( r1, scratch); break; 1735 case 1: z_clr(r1, scratch); break; 1736 case 2: z_cgr(r1, scratch); break; 1737 case 3: z_clgr(r1, scratch); break; 1738 default: ShouldNotReachHere(); break; 1739 } 1740 } 1741 } 1742 branch_optimized(cond, branch_target); 1743 } 1744 1745 // Generate an optimal compare and branch to the branch target. 1746 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the 1747 // branch distance is short enough. Loading the target address into a 1748 // register and branching via reg is used as fallback only. 1749 // 1750 // Input: 1751 // r1 - left compare operand 1752 // r2 - right compare operand 1753 void MacroAssembler::compare_and_branch_optimized(Register r1, 1754 Register r2, 1755 Assembler::branch_condition cond, 1756 Label& branch_target, 1757 bool len64, 1758 bool has_sign) { 1759 unsigned int casenum = (len64 ? 2 : 0) + (has_sign ? 0 : 1); 1760 1761 if (branch_target.is_bound()) { 1762 address branch_addr = target(branch_target); 1763 compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign); 1764 } else { 1765 if (VM_Version::has_CompareBranch() && branch_target.is_near()) { 1766 switch (casenum) { 1767 case 0: z_crj( r1, r2, cond, branch_target); break; 1768 case 1: z_clrj( r1, r2, cond, branch_target); break; 1769 case 2: z_cgrj( r1, r2, cond, branch_target); break; 1770 case 3: z_clgrj(r1, r2, cond, branch_target); break; 1771 default: ShouldNotReachHere(); break; 1772 } 1773 } else { 1774 switch (casenum) { 1775 case 0: z_cr( r1, r2); break; 1776 case 1: z_clr(r1, r2); break; 1777 case 2: z_cgr(r1, r2); break; 1778 case 3: z_clgr(r1, r2); break; 1779 default: ShouldNotReachHere(); break; 1780 } 1781 branch_optimized(cond, branch_target); 1782 } 1783 } 1784 } 1785 1786 //=========================================================================== 1787 //=== END H I G H E R L E V E L B R A N C H E M I T T E R S === 1788 //=========================================================================== 1789 1790 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { 1791 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1792 int index = oop_recorder()->allocate_metadata_index(obj); 1793 RelocationHolder rspec = metadata_Relocation::spec(index); 1794 return AddressLiteral((address)obj, rspec); 1795 } 1796 1797 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { 1798 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1799 int index = oop_recorder()->find_index(obj); 1800 RelocationHolder rspec = metadata_Relocation::spec(index); 1801 return AddressLiteral((address)obj, rspec); 1802 } 1803 1804 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) { 1805 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1806 int oop_index = oop_recorder()->allocate_oop_index(obj); 1807 return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); 1808 } 1809 1810 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { 1811 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1812 int oop_index = oop_recorder()->find_index(obj); 1813 return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); 1814 } 1815 1816 // NOTE: destroys r 1817 void MacroAssembler::c2bool(Register r, Register t) { 1818 z_lcr(t, r); // t = -r 1819 z_or(r, t); // r = -r OR r 1820 z_srl(r, 31); // Yields 0 if r was 0, 1 otherwise. 1821 } 1822 1823 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 1824 Register tmp, 1825 int offset) { 1826 intptr_t value = *delayed_value_addr; 1827 if (value != 0) { 1828 return RegisterOrConstant(value + offset); 1829 } 1830 1831 BLOCK_COMMENT("delayed_value {"); 1832 // Load indirectly to solve generation ordering problem. 1833 load_absolute_address(tmp, (address) delayed_value_addr); // tmp = a; 1834 z_lg(tmp, 0, tmp); // tmp = *tmp; 1835 1836 #ifdef ASSERT 1837 NearLabel L; 1838 compare64_and_branch(tmp, (intptr_t)0L, Assembler::bcondNotEqual, L); 1839 z_illtrap(); 1840 bind(L); 1841 #endif 1842 1843 if (offset != 0) { 1844 z_agfi(tmp, offset); // tmp = tmp + offset; 1845 } 1846 1847 BLOCK_COMMENT("} delayed_value"); 1848 return RegisterOrConstant(tmp); 1849 } 1850 1851 // Patch instruction `inst' at offset `inst_pos' to refer to `dest_pos' 1852 // and return the resulting instruction. 1853 // Dest_pos and inst_pos are 32 bit only. These parms can only designate 1854 // relative positions. 1855 // Use correct argument types. Do not pre-calculate distance. 1856 unsigned long MacroAssembler::patched_branch(address dest_pos, unsigned long inst, address inst_pos) { 1857 int c = 0; 1858 unsigned long patched_inst = 0; 1859 if (is_call_pcrelative_short(inst) || 1860 is_branch_pcrelative_short(inst) || 1861 is_branchoncount_pcrelative_short(inst) || 1862 is_branchonindex32_pcrelative_short(inst)) { 1863 c = 1; 1864 int m = fmask(15, 0); // simm16(-1, 16, 32); 1865 int v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 32); 1866 patched_inst = (inst & ~m) | v; 1867 } else if (is_compareandbranch_pcrelative_short(inst)) { 1868 c = 2; 1869 long m = fmask(31, 16); // simm16(-1, 16, 48); 1870 long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48); 1871 patched_inst = (inst & ~m) | v; 1872 } else if (is_branchonindex64_pcrelative_short(inst)) { 1873 c = 3; 1874 long m = fmask(31, 16); // simm16(-1, 16, 48); 1875 long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48); 1876 patched_inst = (inst & ~m) | v; 1877 } else if (is_call_pcrelative_long(inst) || is_branch_pcrelative_long(inst)) { 1878 c = 4; 1879 long m = fmask(31, 0); // simm32(-1, 16, 48); 1880 long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48); 1881 patched_inst = (inst & ~m) | v; 1882 } else if (is_pcrelative_long(inst)) { // These are the non-branch pc-relative instructions. 1883 c = 5; 1884 long m = fmask(31, 0); // simm32(-1, 16, 48); 1885 long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48); 1886 patched_inst = (inst & ~m) | v; 1887 } else { 1888 print_dbg_msg(tty, inst, "not a relative branch", 0); 1889 dump_code_range(tty, inst_pos, 32, "not a pcrelative branch"); 1890 ShouldNotReachHere(); 1891 } 1892 1893 long new_off = get_pcrel_offset(patched_inst); 1894 if (new_off != (dest_pos-inst_pos)) { 1895 tty->print_cr("case %d: dest_pos = %p, inst_pos = %p, disp = %ld(%12.12lx)", c, dest_pos, inst_pos, new_off, new_off); 1896 print_dbg_msg(tty, inst, "<- original instruction: branch patching error", 0); 1897 print_dbg_msg(tty, patched_inst, "<- patched instruction: branch patching error", 0); 1898 #ifdef LUCY_DBG 1899 VM_Version::z_SIGSEGV(); 1900 #endif 1901 ShouldNotReachHere(); 1902 } 1903 return patched_inst; 1904 } 1905 1906 // Only called when binding labels (share/vm/asm/assembler.cpp) 1907 // Pass arguments as intended. Do not pre-calculate distance. 1908 void MacroAssembler::pd_patch_instruction(address branch, address target) { 1909 unsigned long stub_inst; 1910 int inst_len = get_instruction(branch, &stub_inst); 1911 1912 set_instruction(branch, patched_branch(target, stub_inst, branch), inst_len); 1913 } 1914 1915 1916 // Extract relative address (aka offset). 1917 // inv_simm16 works for 4-byte instructions only. 1918 // compare and branch instructions are 6-byte and have a 16bit offset "in the middle". 1919 long MacroAssembler::get_pcrel_offset(unsigned long inst) { 1920 1921 if (MacroAssembler::is_pcrelative_short(inst)) { 1922 if (((inst&0xFFFFffff00000000UL) == 0) && ((inst&0x00000000FFFF0000UL) != 0)) { 1923 return RelAddr::inv_pcrel_off16(inv_simm16(inst)); 1924 } else { 1925 return RelAddr::inv_pcrel_off16(inv_simm16_48(inst)); 1926 } 1927 } 1928 1929 if (MacroAssembler::is_pcrelative_long(inst)) { 1930 return RelAddr::inv_pcrel_off32(inv_simm32(inst)); 1931 } 1932 1933 print_dbg_msg(tty, inst, "not a pcrelative instruction", 6); 1934 #ifdef LUCY_DBG 1935 VM_Version::z_SIGSEGV(); 1936 #else 1937 ShouldNotReachHere(); 1938 #endif 1939 return -1; 1940 } 1941 1942 long MacroAssembler::get_pcrel_offset(address pc) { 1943 unsigned long inst; 1944 unsigned int len = get_instruction(pc, &inst); 1945 1946 #ifdef ASSERT 1947 long offset; 1948 if (MacroAssembler::is_pcrelative_short(inst) || MacroAssembler::is_pcrelative_long(inst)) { 1949 offset = get_pcrel_offset(inst); 1950 } else { 1951 offset = -1; 1952 } 1953 1954 if (offset == -1) { 1955 dump_code_range(tty, pc, 32, "not a pcrelative instruction"); 1956 #ifdef LUCY_DBG 1957 VM_Version::z_SIGSEGV(); 1958 #else 1959 ShouldNotReachHere(); 1960 #endif 1961 } 1962 return offset; 1963 #else 1964 return get_pcrel_offset(inst); 1965 #endif // ASSERT 1966 } 1967 1968 // Get target address from pc-relative instructions. 1969 address MacroAssembler::get_target_addr_pcrel(address pc) { 1970 assert(is_pcrelative_long(pc), "not a pcrelative instruction"); 1971 return pc + get_pcrel_offset(pc); 1972 } 1973 1974 // Patch pc relative load address. 1975 void MacroAssembler::patch_target_addr_pcrel(address pc, address con) { 1976 unsigned long inst; 1977 // Offset is +/- 2**32 -> use long. 1978 ptrdiff_t distance = con - pc; 1979 1980 get_instruction(pc, &inst); 1981 1982 if (is_pcrelative_short(inst)) { 1983 *(short *)(pc+2) = RelAddr::pcrel_off16(con, pc); // Instructions are at least 2-byte aligned, no test required. 1984 1985 // Some extra safety net. 1986 if (!RelAddr::is_in_range_of_RelAddr16(distance)) { 1987 print_dbg_msg(tty, inst, "distance out of range (16bit)", 4); 1988 dump_code_range(tty, pc, 32, "distance out of range (16bit)"); 1989 guarantee(RelAddr::is_in_range_of_RelAddr16(distance), "too far away (more than +/- 2**16"); 1990 } 1991 return; 1992 } 1993 1994 if (is_pcrelative_long(inst)) { 1995 *(int *)(pc+2) = RelAddr::pcrel_off32(con, pc); 1996 1997 // Some Extra safety net. 1998 if (!RelAddr::is_in_range_of_RelAddr32(distance)) { 1999 print_dbg_msg(tty, inst, "distance out of range (32bit)", 6); 2000 dump_code_range(tty, pc, 32, "distance out of range (32bit)"); 2001 guarantee(RelAddr::is_in_range_of_RelAddr32(distance), "too far away (more than +/- 2**32"); 2002 } 2003 return; 2004 } 2005 2006 guarantee(false, "not a pcrelative instruction to patch!"); 2007 } 2008 2009 // "Current PC" here means the address just behind the basr instruction. 2010 address MacroAssembler::get_PC(Register result) { 2011 z_basr(result, Z_R0); // Don't branch, just save next instruction address in result. 2012 return pc(); 2013 } 2014 2015 // Get current PC + offset. 2016 // Offset given in bytes, must be even! 2017 // "Current PC" here means the address of the larl instruction plus the given offset. 2018 address MacroAssembler::get_PC(Register result, int64_t offset) { 2019 address here = pc(); 2020 z_larl(result, offset/2); // Save target instruction address in result. 2021 return here + offset; 2022 } 2023 2024 void MacroAssembler::instr_size(Register size, Register pc) { 2025 // Extract 2 most significant bits of current instruction. 2026 z_llgc(size, Address(pc)); 2027 z_srl(size, 6); 2028 // Compute (x+3)&6 which translates 0->2, 1->4, 2->4, 3->6. 2029 z_ahi(size, 3); 2030 z_nill(size, 6); 2031 } 2032 2033 // Resize_frame with SP(new) = SP(old) - [offset]. 2034 void MacroAssembler::resize_frame_sub(Register offset, Register fp, bool load_fp) 2035 { 2036 assert_different_registers(offset, fp, Z_SP); 2037 if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); } 2038 2039 z_sgr(Z_SP, offset); 2040 z_stg(fp, _z_abi(callers_sp), Z_SP); 2041 } 2042 2043 // Resize_frame with SP(new) = [newSP] + offset. 2044 // This emitter is useful if we already have calculated a pointer 2045 // into the to-be-allocated stack space, e.g. with special alignment properties, 2046 // but need some additional space, e.g. for spilling. 2047 // newSP is the pre-calculated pointer. It must not be modified. 2048 // fp holds, or is filled with, the frame pointer. 2049 // offset is the additional increment which is added to addr to form the new SP. 2050 // Note: specify a negative value to reserve more space! 2051 // load_fp == true only indicates that fp is not pre-filled with the frame pointer. 2052 // It does not guarantee that fp contains the frame pointer at the end. 2053 void MacroAssembler::resize_frame_abs_with_offset(Register newSP, Register fp, int offset, bool load_fp) { 2054 assert_different_registers(newSP, fp, Z_SP); 2055 2056 if (load_fp) { 2057 z_lg(fp, _z_abi(callers_sp), Z_SP); 2058 } 2059 2060 add2reg(Z_SP, offset, newSP); 2061 z_stg(fp, _z_abi(callers_sp), Z_SP); 2062 } 2063 2064 // Resize_frame with SP(new) = [newSP]. 2065 // load_fp == true only indicates that fp is not pre-filled with the frame pointer. 2066 // It does not guarantee that fp contains the frame pointer at the end. 2067 void MacroAssembler::resize_frame_absolute(Register newSP, Register fp, bool load_fp) { 2068 assert_different_registers(newSP, fp, Z_SP); 2069 2070 if (load_fp) { 2071 z_lg(fp, _z_abi(callers_sp), Z_SP); // need to use load/store. 2072 } 2073 2074 z_lgr(Z_SP, newSP); 2075 if (newSP != Z_R0) { // make sure we generate correct code, no matter what register newSP uses. 2076 z_stg(fp, _z_abi(callers_sp), newSP); 2077 } else { 2078 z_stg(fp, _z_abi(callers_sp), Z_SP); 2079 } 2080 } 2081 2082 // Resize_frame with SP(new) = SP(old) + offset. 2083 void MacroAssembler::resize_frame(RegisterOrConstant offset, Register fp, bool load_fp) { 2084 assert_different_registers(fp, Z_SP); 2085 2086 if (load_fp) { 2087 z_lg(fp, _z_abi(callers_sp), Z_SP); 2088 } 2089 add64(Z_SP, offset); 2090 z_stg(fp, _z_abi(callers_sp), Z_SP); 2091 } 2092 2093 void MacroAssembler::push_frame(Register bytes, Register old_sp, bool copy_sp, bool bytes_with_inverted_sign) { 2094 #ifdef ASSERT 2095 assert_different_registers(bytes, old_sp, Z_SP); 2096 if (!copy_sp) { 2097 z_cgr(old_sp, Z_SP); 2098 asm_assert_eq("[old_sp]!=[Z_SP]", 0x211); 2099 } 2100 #endif 2101 if (copy_sp) { z_lgr(old_sp, Z_SP); } 2102 if (bytes_with_inverted_sign) { 2103 z_agr(Z_SP, bytes); 2104 } else { 2105 z_sgr(Z_SP, bytes); // Z_sgfr sufficient, but probably not faster. 2106 } 2107 z_stg(old_sp, _z_abi(callers_sp), Z_SP); 2108 } 2109 2110 unsigned int MacroAssembler::push_frame(unsigned int bytes, Register scratch) { 2111 long offset = Assembler::align(bytes, frame::alignment_in_bytes); 2112 assert(offset > 0, "should push a frame with positive size, size = %ld.", offset); 2113 assert(Displacement::is_validDisp(-offset), "frame size out of range, size = %ld", offset); 2114 2115 // We must not write outside the current stack bounds (given by Z_SP). 2116 // Thus, we have to first update Z_SP and then store the previous SP as stack linkage. 2117 // We rely on Z_R0 by default to be available as scratch. 2118 z_lgr(scratch, Z_SP); 2119 add2reg(Z_SP, -offset); 2120 z_stg(scratch, _z_abi(callers_sp), Z_SP); 2121 #ifdef ASSERT 2122 // Just make sure nobody uses the value in the default scratch register. 2123 // When another register is used, the caller might rely on it containing the frame pointer. 2124 if (scratch == Z_R0) { 2125 z_iihf(scratch, 0xbaadbabe); 2126 z_iilf(scratch, 0xdeadbeef); 2127 } 2128 #endif 2129 return offset; 2130 } 2131 2132 // Push a frame of size `bytes' plus abi160 on top. 2133 unsigned int MacroAssembler::push_frame_abi160(unsigned int bytes) { 2134 BLOCK_COMMENT("push_frame_abi160 {"); 2135 unsigned int res = push_frame(bytes + frame::z_abi_160_size); 2136 BLOCK_COMMENT("} push_frame_abi160"); 2137 return res; 2138 } 2139 2140 // Pop current C frame. 2141 void MacroAssembler::pop_frame() { 2142 BLOCK_COMMENT("pop_frame:"); 2143 Assembler::z_lg(Z_SP, _z_abi(callers_sp), Z_SP); 2144 } 2145 2146 // Pop current C frame and restore return PC register (Z_R14). 2147 void MacroAssembler::pop_frame_restore_retPC(int frame_size_in_bytes) { 2148 BLOCK_COMMENT("pop_frame_restore_retPC:"); 2149 int retPC_offset = _z_abi16(return_pc) + frame_size_in_bytes; 2150 // If possible, pop frame by add instead of load (a penny saved is a penny got :-). 2151 if (Displacement::is_validDisp(retPC_offset)) { 2152 z_lg(Z_R14, retPC_offset, Z_SP); 2153 add2reg(Z_SP, frame_size_in_bytes); 2154 } else { 2155 add2reg(Z_SP, frame_size_in_bytes); 2156 restore_return_pc(); 2157 } 2158 } 2159 2160 void MacroAssembler::call_VM_leaf_base(address entry_point, bool allow_relocation) { 2161 if (allow_relocation) { 2162 call_c(entry_point); 2163 } else { 2164 call_c_static(entry_point); 2165 } 2166 } 2167 2168 void MacroAssembler::call_VM_leaf_base(address entry_point) { 2169 bool allow_relocation = true; 2170 call_VM_leaf_base(entry_point, allow_relocation); 2171 } 2172 2173 void MacroAssembler::call_VM_base(Register oop_result, 2174 Register last_java_sp, 2175 address entry_point, 2176 bool allow_relocation, 2177 bool check_exceptions) { // Defaults to true. 2178 // Allow_relocation indicates, if true, that the generated code shall 2179 // be fit for code relocation or referenced data relocation. In other 2180 // words: all addresses must be considered variable. PC-relative addressing 2181 // is not possible then. 2182 // On the other hand, if (allow_relocation == false), addresses and offsets 2183 // may be considered stable, enabling us to take advantage of some PC-relative 2184 // addressing tweaks. These might improve performance and reduce code size. 2185 2186 // Determine last_java_sp register. 2187 if (!last_java_sp->is_valid()) { 2188 last_java_sp = Z_SP; // Load Z_SP as SP. 2189 } 2190 2191 set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, Z_R1, allow_relocation); 2192 2193 // ARG1 must hold thread address. 2194 z_lgr(Z_ARG1, Z_thread); 2195 2196 address return_pc = NULL; 2197 if (allow_relocation) { 2198 return_pc = call_c(entry_point); 2199 } else { 2200 return_pc = call_c_static(entry_point); 2201 } 2202 2203 reset_last_Java_frame(allow_relocation); 2204 2205 // C++ interp handles this in the interpreter. 2206 check_and_handle_popframe(Z_thread); 2207 check_and_handle_earlyret(Z_thread); 2208 2209 // Check for pending exceptions. 2210 if (check_exceptions) { 2211 // Check for pending exceptions (java_thread is set upon return). 2212 load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset())); 2213 2214 // This used to conditionally jump to forward_exception however it is 2215 // possible if we relocate that the branch will not reach. So we must jump 2216 // around so we can always reach. 2217 2218 Label ok; 2219 z_bre(ok); // Bcondequal is the same as bcondZero. 2220 call_stub(StubRoutines::forward_exception_entry()); 2221 bind(ok); 2222 } 2223 2224 // Get oop result if there is one and reset the value in the thread. 2225 if (oop_result->is_valid()) { 2226 get_vm_result(oop_result); 2227 } 2228 2229 _last_calls_return_pc = return_pc; // Wipe out other (error handling) calls. 2230 } 2231 2232 void MacroAssembler::call_VM_base(Register oop_result, 2233 Register last_java_sp, 2234 address entry_point, 2235 bool check_exceptions) { // Defaults to true. 2236 bool allow_relocation = true; 2237 call_VM_base(oop_result, last_java_sp, entry_point, allow_relocation, check_exceptions); 2238 } 2239 2240 // VM calls without explicit last_java_sp. 2241 2242 void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) { 2243 // Call takes possible detour via InterpreterMacroAssembler. 2244 call_VM_base(oop_result, noreg, entry_point, true, check_exceptions); 2245 } 2246 2247 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 2248 // Z_ARG1 is reserved for the thread. 2249 lgr_if_needed(Z_ARG2, arg_1); 2250 call_VM(oop_result, entry_point, check_exceptions); 2251 } 2252 2253 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 2254 // Z_ARG1 is reserved for the thread. 2255 lgr_if_needed(Z_ARG2, arg_1); 2256 assert(arg_2 != Z_ARG2, "smashed argument"); 2257 lgr_if_needed(Z_ARG3, arg_2); 2258 call_VM(oop_result, entry_point, check_exceptions); 2259 } 2260 2261 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, 2262 Register arg_3, bool check_exceptions) { 2263 // Z_ARG1 is reserved for the thread. 2264 lgr_if_needed(Z_ARG2, arg_1); 2265 assert(arg_2 != Z_ARG2, "smashed argument"); 2266 lgr_if_needed(Z_ARG3, arg_2); 2267 assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument"); 2268 lgr_if_needed(Z_ARG4, arg_3); 2269 call_VM(oop_result, entry_point, check_exceptions); 2270 } 2271 2272 // VM static calls without explicit last_java_sp. 2273 2274 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, bool check_exceptions) { 2275 // Call takes possible detour via InterpreterMacroAssembler. 2276 call_VM_base(oop_result, noreg, entry_point, false, check_exceptions); 2277 } 2278 2279 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2, 2280 Register arg_3, bool check_exceptions) { 2281 // Z_ARG1 is reserved for the thread. 2282 lgr_if_needed(Z_ARG2, arg_1); 2283 assert(arg_2 != Z_ARG2, "smashed argument"); 2284 lgr_if_needed(Z_ARG3, arg_2); 2285 assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument"); 2286 lgr_if_needed(Z_ARG4, arg_3); 2287 call_VM_static(oop_result, entry_point, check_exceptions); 2288 } 2289 2290 // VM calls with explicit last_java_sp. 2291 2292 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions) { 2293 // Call takes possible detour via InterpreterMacroAssembler. 2294 call_VM_base(oop_result, last_java_sp, entry_point, true, check_exceptions); 2295 } 2296 2297 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { 2298 // Z_ARG1 is reserved for the thread. 2299 lgr_if_needed(Z_ARG2, arg_1); 2300 call_VM(oop_result, last_java_sp, entry_point, check_exceptions); 2301 } 2302 2303 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, 2304 Register arg_2, bool check_exceptions) { 2305 // Z_ARG1 is reserved for the thread. 2306 lgr_if_needed(Z_ARG2, arg_1); 2307 assert(arg_2 != Z_ARG2, "smashed argument"); 2308 lgr_if_needed(Z_ARG3, arg_2); 2309 call_VM(oop_result, last_java_sp, entry_point, check_exceptions); 2310 } 2311 2312 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, 2313 Register arg_2, Register arg_3, bool check_exceptions) { 2314 // Z_ARG1 is reserved for the thread. 2315 lgr_if_needed(Z_ARG2, arg_1); 2316 assert(arg_2 != Z_ARG2, "smashed argument"); 2317 lgr_if_needed(Z_ARG3, arg_2); 2318 assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument"); 2319 lgr_if_needed(Z_ARG4, arg_3); 2320 call_VM(oop_result, last_java_sp, entry_point, check_exceptions); 2321 } 2322 2323 // VM leaf calls. 2324 2325 void MacroAssembler::call_VM_leaf(address entry_point) { 2326 // Call takes possible detour via InterpreterMacroAssembler. 2327 call_VM_leaf_base(entry_point, true); 2328 } 2329 2330 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) { 2331 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2332 call_VM_leaf(entry_point); 2333 } 2334 2335 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) { 2336 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2337 assert(arg_2 != Z_ARG1, "smashed argument"); 2338 if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); 2339 call_VM_leaf(entry_point); 2340 } 2341 2342 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) { 2343 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2344 assert(arg_2 != Z_ARG1, "smashed argument"); 2345 if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); 2346 assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument"); 2347 if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3); 2348 call_VM_leaf(entry_point); 2349 } 2350 2351 // Static VM leaf calls. 2352 // Really static VM leaf calls are never patched. 2353 2354 void MacroAssembler::call_VM_leaf_static(address entry_point) { 2355 // Call takes possible detour via InterpreterMacroAssembler. 2356 call_VM_leaf_base(entry_point, false); 2357 } 2358 2359 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1) { 2360 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2361 call_VM_leaf_static(entry_point); 2362 } 2363 2364 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2) { 2365 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2366 assert(arg_2 != Z_ARG1, "smashed argument"); 2367 if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); 2368 call_VM_leaf_static(entry_point); 2369 } 2370 2371 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3) { 2372 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2373 assert(arg_2 != Z_ARG1, "smashed argument"); 2374 if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); 2375 assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument"); 2376 if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3); 2377 call_VM_leaf_static(entry_point); 2378 } 2379 2380 // Don't use detour via call_c(reg). 2381 address MacroAssembler::call_c(address function_entry) { 2382 load_const(Z_R1, function_entry); 2383 return call(Z_R1); 2384 } 2385 2386 // Variant for really static (non-relocatable) calls which are never patched. 2387 address MacroAssembler::call_c_static(address function_entry) { 2388 load_absolute_address(Z_R1, function_entry); 2389 #if 0 // def ASSERT 2390 // Verify that call site did not move. 2391 load_const_optimized(Z_R0, function_entry); 2392 z_cgr(Z_R1, Z_R0); 2393 z_brc(bcondEqual, 3); 2394 z_illtrap(0xba); 2395 #endif 2396 return call(Z_R1); 2397 } 2398 2399 address MacroAssembler::call_c_opt(address function_entry) { 2400 bool success = call_far_patchable(function_entry, -2 /* emit relocation + constant */); 2401 _last_calls_return_pc = success ? pc() : NULL; 2402 return _last_calls_return_pc; 2403 } 2404 2405 // Identify a call_far_patchable instruction: LARL + LG + BASR 2406 // 2407 // nop ; optionally, if required for alignment 2408 // lgrl rx,A(TOC entry) ; PC-relative access into constant pool 2409 // basr Z_R14,rx ; end of this instruction must be aligned to a word boundary 2410 // 2411 // Code pattern will eventually get patched into variant2 (see below for detection code). 2412 // 2413 bool MacroAssembler::is_call_far_patchable_variant0_at(address instruction_addr) { 2414 address iaddr = instruction_addr; 2415 2416 // Check for the actual load instruction. 2417 if (!is_load_const_from_toc(iaddr)) { return false; } 2418 iaddr += load_const_from_toc_size(); 2419 2420 // Check for the call (BASR) instruction, finally. 2421 assert(iaddr-instruction_addr+call_byregister_size() == call_far_patchable_size(), "size mismatch"); 2422 return is_call_byregister(iaddr); 2423 } 2424 2425 // Identify a call_far_patchable instruction: BRASL 2426 // 2427 // Code pattern to suits atomic patching: 2428 // nop ; Optionally, if required for alignment. 2429 // nop ... ; Multiple filler nops to compensate for size difference (variant0 is longer). 2430 // nop ; For code pattern detection: Prepend each BRASL with a nop. 2431 // brasl Z_R14,<reladdr> ; End of code must be 4-byte aligned ! 2432 bool MacroAssembler::is_call_far_patchable_variant2_at(address instruction_addr) { 2433 const address call_addr = (address)((intptr_t)instruction_addr + call_far_patchable_size() - call_far_pcrelative_size()); 2434 2435 // Check for correct number of leading nops. 2436 address iaddr; 2437 for (iaddr = instruction_addr; iaddr < call_addr; iaddr += nop_size()) { 2438 if (!is_z_nop(iaddr)) { return false; } 2439 } 2440 assert(iaddr == call_addr, "sanity"); 2441 2442 // --> Check for call instruction. 2443 if (is_call_far_pcrelative(call_addr)) { 2444 assert(call_addr-instruction_addr+call_far_pcrelative_size() == call_far_patchable_size(), "size mismatch"); 2445 return true; 2446 } 2447 2448 return false; 2449 } 2450 2451 // Emit a NOT mt-safely patchable 64 bit absolute call. 2452 // If toc_offset == -2, then the destination of the call (= target) is emitted 2453 // to the constant pool and a runtime_call relocation is added 2454 // to the code buffer. 2455 // If toc_offset != -2, target must already be in the constant pool at 2456 // _ctableStart+toc_offset (a caller can retrieve toc_offset 2457 // from the runtime_call relocation). 2458 // Special handling of emitting to scratch buffer when there is no constant pool. 2459 // Slightly changed code pattern. We emit an additional nop if we would 2460 // not end emitting at a word aligned address. This is to ensure 2461 // an atomically patchable displacement in brasl instructions. 2462 // 2463 // A call_far_patchable comes in different flavors: 2464 // - LARL(CP) / LG(CP) / BR (address in constant pool, access via CP register) 2465 // - LGRL(CP) / BR (address in constant pool, pc-relative accesss) 2466 // - BRASL (relative address of call target coded in instruction) 2467 // All flavors occupy the same amount of space. Length differences are compensated 2468 // by leading nops, such that the instruction sequence always ends at the same 2469 // byte offset. This is required to keep the return offset constant. 2470 // Furthermore, the return address (the end of the instruction sequence) is forced 2471 // to be on a 4-byte boundary. This is required for atomic patching, should we ever 2472 // need to patch the call target of the BRASL flavor. 2473 // RETURN value: false, if no constant pool entry could be allocated, true otherwise. 2474 bool MacroAssembler::call_far_patchable(address target, int64_t tocOffset) { 2475 // Get current pc and ensure word alignment for end of instr sequence. 2476 const address start_pc = pc(); 2477 const intptr_t start_off = offset(); 2478 assert(!call_far_patchable_requires_alignment_nop(start_pc), "call_far_patchable requires aligned address"); 2479 const ptrdiff_t dist = (ptrdiff_t)(target - (start_pc + 2)); // Prepend each BRASL with a nop. 2480 const bool emit_target_to_pool = (tocOffset == -2) && !code_section()->scratch_emit(); 2481 const bool emit_relative_call = !emit_target_to_pool && 2482 RelAddr::is_in_range_of_RelAddr32(dist) && 2483 ReoptimizeCallSequences && 2484 !code_section()->scratch_emit(); 2485 2486 if (emit_relative_call) { 2487 // Add padding to get the same size as below. 2488 const unsigned int padding = call_far_patchable_size() - call_far_pcrelative_size(); 2489 unsigned int current_padding; 2490 for (current_padding = 0; current_padding < padding; current_padding += nop_size()) { z_nop(); } 2491 assert(current_padding == padding, "sanity"); 2492 2493 // relative call: len = 2(nop) + 6 (brasl) 2494 // CodeBlob resize cannot occur in this case because 2495 // this call is emitted into pre-existing space. 2496 z_nop(); // Prepend each BRASL with a nop. 2497 z_brasl(Z_R14, target); 2498 } else { 2499 // absolute call: Get address from TOC. 2500 // len = (load TOC){6|0} + (load from TOC){6} + (basr){2} = {14|8} 2501 if (emit_target_to_pool) { 2502 // When emitting the call for the first time, we do not need to use 2503 // the pc-relative version. It will be patched anyway, when the code 2504 // buffer is copied. 2505 // Relocation is not needed when !ReoptimizeCallSequences. 2506 relocInfo::relocType rt = ReoptimizeCallSequences ? relocInfo::runtime_call_w_cp_type : relocInfo::none; 2507 AddressLiteral dest(target, rt); 2508 // Store_oop_in_toc() adds dest to the constant table. As side effect, this kills 2509 // inst_mark(). Reset if possible. 2510 bool reset_mark = (inst_mark() == pc()); 2511 tocOffset = store_oop_in_toc(dest); 2512 if (reset_mark) { set_inst_mark(); } 2513 if (tocOffset == -1) { 2514 return false; // Couldn't create constant pool entry. 2515 } 2516 } 2517 assert(offset() == start_off, "emit no code before this point!"); 2518 2519 address tocPos = pc() + tocOffset; 2520 if (emit_target_to_pool) { 2521 tocPos = code()->consts()->start() + tocOffset; 2522 } 2523 load_long_pcrelative(Z_R14, tocPos); 2524 z_basr(Z_R14, Z_R14); 2525 } 2526 2527 #ifdef ASSERT 2528 // Assert that we can identify the emitted call. 2529 assert(is_call_far_patchable_at(addr_at(start_off)), "can't identify emitted call"); 2530 assert(offset() == start_off+call_far_patchable_size(), "wrong size"); 2531 2532 if (emit_target_to_pool) { 2533 assert(get_dest_of_call_far_patchable_at(addr_at(start_off), code()->consts()->start()) == target, 2534 "wrong encoding of dest address"); 2535 } 2536 #endif 2537 return true; // success 2538 } 2539 2540 // Identify a call_far_patchable instruction. 2541 // For more detailed information see header comment of call_far_patchable. 2542 bool MacroAssembler::is_call_far_patchable_at(address instruction_addr) { 2543 return is_call_far_patchable_variant2_at(instruction_addr) || // short version: BRASL 2544 is_call_far_patchable_variant0_at(instruction_addr); // long version LARL + LG + BASR 2545 } 2546 2547 // Does the call_far_patchable instruction use a pc-relative encoding 2548 // of the call destination? 2549 bool MacroAssembler::is_call_far_patchable_pcrelative_at(address instruction_addr) { 2550 // Variant 2 is pc-relative. 2551 return is_call_far_patchable_variant2_at(instruction_addr); 2552 } 2553 2554 bool MacroAssembler::is_call_far_pcrelative(address instruction_addr) { 2555 // Prepend each BRASL with a nop. 2556 return is_z_nop(instruction_addr) && is_z_brasl(instruction_addr + nop_size()); // Match at position after one nop required. 2557 } 2558 2559 // Set destination address of a call_far_patchable instruction. 2560 void MacroAssembler::set_dest_of_call_far_patchable_at(address instruction_addr, address dest, int64_t tocOffset) { 2561 ResourceMark rm; 2562 2563 // Now that CP entry is verified, patch call to a pc-relative call (if circumstances permit). 2564 int code_size = MacroAssembler::call_far_patchable_size(); 2565 CodeBuffer buf(instruction_addr, code_size); 2566 MacroAssembler masm(&buf); 2567 masm.call_far_patchable(dest, tocOffset); 2568 ICache::invalidate_range(instruction_addr, code_size); // Empty on z. 2569 } 2570 2571 // Get dest address of a call_far_patchable instruction. 2572 address MacroAssembler::get_dest_of_call_far_patchable_at(address instruction_addr, address ctable) { 2573 // Dynamic TOC: absolute address in constant pool. 2574 // Check variant2 first, it is more frequent. 2575 2576 // Relative address encoded in call instruction. 2577 if (is_call_far_patchable_variant2_at(instruction_addr)) { 2578 return MacroAssembler::get_target_addr_pcrel(instruction_addr + nop_size()); // Prepend each BRASL with a nop. 2579 2580 // Absolute address in constant pool. 2581 } else if (is_call_far_patchable_variant0_at(instruction_addr)) { 2582 address iaddr = instruction_addr; 2583 2584 long tocOffset = get_load_const_from_toc_offset(iaddr); 2585 address tocLoc = iaddr + tocOffset; 2586 return *(address *)(tocLoc); 2587 } else { 2588 fprintf(stderr, "MacroAssembler::get_dest_of_call_far_patchable_at has a problem at %p:\n", instruction_addr); 2589 fprintf(stderr, "not a call_far_patchable: %16.16lx %16.16lx, len = %d\n", 2590 *(unsigned long*)instruction_addr, 2591 *(unsigned long*)(instruction_addr+8), 2592 call_far_patchable_size()); 2593 Disassembler::decode(instruction_addr, instruction_addr+call_far_patchable_size()); 2594 ShouldNotReachHere(); 2595 return NULL; 2596 } 2597 } 2598 2599 void MacroAssembler::align_call_far_patchable(address pc) { 2600 if (call_far_patchable_requires_alignment_nop(pc)) { z_nop(); } 2601 } 2602 2603 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { 2604 } 2605 2606 void MacroAssembler::check_and_handle_popframe(Register java_thread) { 2607 } 2608 2609 // Read from the polling page. 2610 // Use TM or TMY instruction, depending on read offset. 2611 // offset = 0: Use TM, safepoint polling. 2612 // offset < 0: Use TMY, profiling safepoint polling. 2613 void MacroAssembler::load_from_polling_page(Register polling_page_address, int64_t offset) { 2614 if (Immediate::is_uimm12(offset)) { 2615 z_tm(offset, polling_page_address, mask_safepoint); 2616 } else { 2617 z_tmy(offset, polling_page_address, mask_profiling); 2618 } 2619 } 2620 2621 // Check whether z_instruction is a read access to the polling page 2622 // which was emitted by load_from_polling_page(..). 2623 bool MacroAssembler::is_load_from_polling_page(address instr_loc) { 2624 unsigned long z_instruction; 2625 unsigned int ilen = get_instruction(instr_loc, &z_instruction); 2626 2627 if (ilen == 2) { return false; } // It's none of the allowed instructions. 2628 2629 if (ilen == 4) { 2630 if (!is_z_tm(z_instruction)) { return false; } // It's len=4, but not a z_tm. fail. 2631 2632 int ms = inv_mask(z_instruction,8,32); // mask 2633 int ra = inv_reg(z_instruction,16,32); // base register 2634 int ds = inv_uimm12(z_instruction); // displacement 2635 2636 if (!(ds == 0 && ra != 0 && ms == mask_safepoint)) { 2637 return false; // It's not a z_tm(0, ra, mask_safepoint). Fail. 2638 } 2639 2640 } else { /* if (ilen == 6) */ 2641 2642 assert(!is_z_lg(z_instruction), "old form (LG) polling page access. Please fix and use TM(Y)."); 2643 2644 if (!is_z_tmy(z_instruction)) { return false; } // It's len=6, but not a z_tmy. fail. 2645 2646 int ms = inv_mask(z_instruction,8,48); // mask 2647 int ra = inv_reg(z_instruction,16,48); // base register 2648 int ds = inv_simm20(z_instruction); // displacement 2649 } 2650 2651 return true; 2652 } 2653 2654 // Extract poll address from instruction and ucontext. 2655 address MacroAssembler::get_poll_address(address instr_loc, void* ucontext) { 2656 assert(ucontext != NULL, "must have ucontext"); 2657 ucontext_t* uc = (ucontext_t*) ucontext; 2658 unsigned long z_instruction; 2659 unsigned int ilen = get_instruction(instr_loc, &z_instruction); 2660 2661 if (ilen == 4 && is_z_tm(z_instruction)) { 2662 int ra = inv_reg(z_instruction, 16, 32); // base register 2663 int ds = inv_uimm12(z_instruction); // displacement 2664 address addr = (address)uc->uc_mcontext.gregs[ra]; 2665 return addr + ds; 2666 } else if (ilen == 6 && is_z_tmy(z_instruction)) { 2667 int ra = inv_reg(z_instruction, 16, 48); // base register 2668 int ds = inv_simm20(z_instruction); // displacement 2669 address addr = (address)uc->uc_mcontext.gregs[ra]; 2670 return addr + ds; 2671 } 2672 2673 ShouldNotReachHere(); 2674 return NULL; 2675 } 2676 2677 // Extract poll register from instruction. 2678 uint MacroAssembler::get_poll_register(address instr_loc) { 2679 unsigned long z_instruction; 2680 unsigned int ilen = get_instruction(instr_loc, &z_instruction); 2681 2682 if (ilen == 4 && is_z_tm(z_instruction)) { 2683 return (uint)inv_reg(z_instruction, 16, 32); // base register 2684 } else if (ilen == 6 && is_z_tmy(z_instruction)) { 2685 return (uint)inv_reg(z_instruction, 16, 48); // base register 2686 } 2687 2688 ShouldNotReachHere(); 2689 return 0; 2690 } 2691 2692 bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) { 2693 ShouldNotCallThis(); 2694 return false; 2695 } 2696 2697 // Write serialization page so VM thread can do a pseudo remote membar 2698 // We use the current thread pointer to calculate a thread specific 2699 // offset to write to within the page. This minimizes bus traffic 2700 // due to cache line collision. 2701 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { 2702 assert_different_registers(tmp1, tmp2); 2703 z_sllg(tmp2, thread, os::get_serialize_page_shift_count()); 2704 load_const_optimized(tmp1, (long) os::get_memory_serialize_page()); 2705 2706 int mask = os::get_serialize_page_mask(); 2707 if (Immediate::is_uimm16(mask)) { 2708 z_nill(tmp2, mask); 2709 z_llghr(tmp2, tmp2); 2710 } else { 2711 z_nilf(tmp2, mask); 2712 z_llgfr(tmp2, tmp2); 2713 } 2714 2715 z_release(); 2716 z_st(Z_R0, 0, tmp2, tmp1); 2717 } 2718 2719 void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) { 2720 if (SafepointMechanism::uses_thread_local_poll()) { 2721 const Address poll_byte_addr(Z_thread, in_bytes(Thread::polling_page_offset()) + 7 /* Big Endian */); 2722 // Armed page has poll_bit set. 2723 z_tm(poll_byte_addr, SafepointMechanism::poll_bit()); 2724 z_brnaz(slow_path); 2725 } else { 2726 load_const_optimized(temp_reg, SafepointSynchronize::address_of_state()); 2727 z_cli(/*SafepointSynchronize::sz_state()*/4-1, temp_reg, SafepointSynchronize::_not_synchronized); 2728 z_brne(slow_path); 2729 } 2730 } 2731 2732 // Don't rely on register locking, always use Z_R1 as scratch register instead. 2733 void MacroAssembler::bang_stack_with_offset(int offset) { 2734 // Stack grows down, caller passes positive offset. 2735 assert(offset > 0, "must bang with positive offset"); 2736 if (Displacement::is_validDisp(-offset)) { 2737 z_tmy(-offset, Z_SP, mask_stackbang); 2738 } else { 2739 add2reg(Z_R1, -offset, Z_SP); // Do not destroy Z_SP!!! 2740 z_tm(0, Z_R1, mask_stackbang); // Just banging. 2741 } 2742 } 2743 2744 void MacroAssembler::reserved_stack_check(Register return_pc) { 2745 // Test if reserved zone needs to be enabled. 2746 Label no_reserved_zone_enabling; 2747 assert(return_pc == Z_R14, "Return pc must be in R14 before z_br() to StackOverflow stub."); 2748 BLOCK_COMMENT("reserved_stack_check {"); 2749 2750 z_clg(Z_SP, Address(Z_thread, JavaThread::reserved_stack_activation_offset())); 2751 z_brl(no_reserved_zone_enabling); 2752 2753 // Enable reserved zone again, throw stack overflow exception. 2754 save_return_pc(); 2755 push_frame_abi160(0); 2756 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), Z_thread); 2757 pop_frame(); 2758 restore_return_pc(); 2759 2760 load_const_optimized(Z_R1, StubRoutines::throw_delayed_StackOverflowError_entry()); 2761 // Don't use call() or z_basr(), they will invalidate Z_R14 which contains the return pc. 2762 z_br(Z_R1); 2763 2764 should_not_reach_here(); 2765 2766 bind(no_reserved_zone_enabling); 2767 BLOCK_COMMENT("} reserved_stack_check"); 2768 } 2769 2770 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 2771 void MacroAssembler::tlab_allocate(Register obj, 2772 Register var_size_in_bytes, 2773 int con_size_in_bytes, 2774 Register t1, 2775 Label& slow_case) { 2776 assert_different_registers(obj, var_size_in_bytes, t1); 2777 Register end = t1; 2778 Register thread = Z_thread; 2779 2780 z_lg(obj, Address(thread, JavaThread::tlab_top_offset())); 2781 if (var_size_in_bytes == noreg) { 2782 z_lay(end, Address(obj, con_size_in_bytes)); 2783 } else { 2784 z_lay(end, Address(obj, var_size_in_bytes)); 2785 } 2786 z_cg(end, Address(thread, JavaThread::tlab_end_offset())); 2787 branch_optimized(bcondHigh, slow_case); 2788 2789 // Update the tlab top pointer. 2790 z_stg(end, Address(thread, JavaThread::tlab_top_offset())); 2791 2792 // Recover var_size_in_bytes if necessary. 2793 if (var_size_in_bytes == end) { 2794 z_sgr(var_size_in_bytes, obj); 2795 } 2796 } 2797 2798 // Emitter for interface method lookup. 2799 // input: recv_klass, intf_klass, itable_index 2800 // output: method_result 2801 // kills: itable_index, temp1_reg, Z_R0, Z_R1 2802 // TODO: Temp2_reg is unused. we may use this emitter also in the itable stubs. 2803 // If the register is still not needed then, remove it. 2804 void MacroAssembler::lookup_interface_method(Register recv_klass, 2805 Register intf_klass, 2806 RegisterOrConstant itable_index, 2807 Register method_result, 2808 Register temp1_reg, 2809 Register temp2_reg, 2810 Label& no_such_interface) { 2811 2812 const Register vtable_len = temp1_reg; // Used to compute itable_entry_addr. 2813 const Register itable_entry_addr = Z_R1_scratch; 2814 const Register itable_interface = Z_R0_scratch; 2815 2816 BLOCK_COMMENT("lookup_interface_method {"); 2817 2818 // Load start of itable entries into itable_entry_addr. 2819 z_llgf(vtable_len, Address(recv_klass, Klass::vtable_length_offset())); 2820 z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes())); 2821 2822 // Loop over all itable entries until desired interfaceOop(Rinterface) found. 2823 const int vtable_base_offset = in_bytes(Klass::vtable_start_offset()); 2824 2825 add2reg_with_index(itable_entry_addr, 2826 vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(), 2827 recv_klass, vtable_len); 2828 2829 const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize; 2830 Label search; 2831 2832 bind(search); 2833 2834 // Handle IncompatibleClassChangeError. 2835 // If the entry is NULL then we've reached the end of the table 2836 // without finding the expected interface, so throw an exception. 2837 load_and_test_long(itable_interface, Address(itable_entry_addr)); 2838 z_bre(no_such_interface); 2839 2840 add2reg(itable_entry_addr, itable_offset_search_inc); 2841 z_cgr(itable_interface, intf_klass); 2842 z_brne(search); 2843 2844 // Entry found and itable_entry_addr points to it, get offset of vtable for interface. 2845 2846 const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() - 2847 itableOffsetEntry::interface_offset_in_bytes()) - 2848 itable_offset_search_inc; 2849 2850 // Compute itableMethodEntry and get method and entry point 2851 // we use addressing with index and displacement, since the formula 2852 // for computing the entry's offset has a fixed and a dynamic part, 2853 // the latter depending on the matched interface entry and on the case, 2854 // that the itable index has been passed as a register, not a constant value. 2855 int method_offset = itableMethodEntry::method_offset_in_bytes(); 2856 // Fixed part (displacement), common operand. 2857 Register itable_offset; // Dynamic part (index register). 2858 2859 if (itable_index.is_register()) { 2860 // Compute the method's offset in that register, for the formula, see the 2861 // else-clause below. 2862 itable_offset = itable_index.as_register(); 2863 2864 z_sllg(itable_offset, itable_offset, exact_log2(itableMethodEntry::size() * wordSize)); 2865 z_agf(itable_offset, vtable_offset_offset, itable_entry_addr); 2866 } else { 2867 itable_offset = Z_R1_scratch; 2868 // Displacement increases. 2869 method_offset += itableMethodEntry::size() * wordSize * itable_index.as_constant(); 2870 2871 // Load index from itable. 2872 z_llgf(itable_offset, vtable_offset_offset, itable_entry_addr); 2873 } 2874 2875 // Finally load the method's oop. 2876 z_lg(method_result, method_offset, itable_offset, recv_klass); 2877 BLOCK_COMMENT("} lookup_interface_method"); 2878 } 2879 2880 // Lookup for virtual method invocation. 2881 void MacroAssembler::lookup_virtual_method(Register recv_klass, 2882 RegisterOrConstant vtable_index, 2883 Register method_result) { 2884 assert_different_registers(recv_klass, vtable_index.register_or_noreg()); 2885 assert(vtableEntry::size() * wordSize == wordSize, 2886 "else adjust the scaling in the code below"); 2887 2888 BLOCK_COMMENT("lookup_virtual_method {"); 2889 2890 const int base = in_bytes(Klass::vtable_start_offset()); 2891 2892 if (vtable_index.is_constant()) { 2893 // Load with base + disp. 2894 Address vtable_entry_addr(recv_klass, 2895 vtable_index.as_constant() * wordSize + 2896 base + 2897 vtableEntry::method_offset_in_bytes()); 2898 2899 z_lg(method_result, vtable_entry_addr); 2900 } else { 2901 // Shift index properly and load with base + index + disp. 2902 Register vindex = vtable_index.as_register(); 2903 Address vtable_entry_addr(recv_klass, vindex, 2904 base + vtableEntry::method_offset_in_bytes()); 2905 2906 z_sllg(vindex, vindex, exact_log2(wordSize)); 2907 z_lg(method_result, vtable_entry_addr); 2908 } 2909 BLOCK_COMMENT("} lookup_virtual_method"); 2910 } 2911 2912 // Factor out code to call ic_miss_handler. 2913 // Generate code to call the inline cache miss handler. 2914 // 2915 // In most cases, this code will be generated out-of-line. 2916 // The method parameters are intended to provide some variability. 2917 // ICM - Label which has to be bound to the start of useful code (past any traps). 2918 // trapMarker - Marking byte for the generated illtrap instructions (if any). 2919 // Any value except 0x00 is supported. 2920 // = 0x00 - do not generate illtrap instructions. 2921 // use nops to fill ununsed space. 2922 // requiredSize - required size of the generated code. If the actually 2923 // generated code is smaller, use padding instructions to fill up. 2924 // = 0 - no size requirement, no padding. 2925 // scratch - scratch register to hold branch target address. 2926 // 2927 // The method returns the code offset of the bound label. 2928 unsigned int MacroAssembler::call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch) { 2929 intptr_t startOffset = offset(); 2930 2931 // Prevent entry at content_begin(). 2932 if (trapMarker != 0) { 2933 z_illtrap(trapMarker); 2934 } 2935 2936 // Load address of inline cache miss code into scratch register 2937 // and branch to cache miss handler. 2938 BLOCK_COMMENT("IC miss handler {"); 2939 BIND(ICM); 2940 unsigned int labelOffset = offset(); 2941 AddressLiteral icmiss(SharedRuntime::get_ic_miss_stub()); 2942 2943 load_const_optimized(scratch, icmiss); 2944 z_br(scratch); 2945 2946 // Fill unused space. 2947 if (requiredSize > 0) { 2948 while ((offset() - startOffset) < requiredSize) { 2949 if (trapMarker == 0) { 2950 z_nop(); 2951 } else { 2952 z_illtrap(trapMarker); 2953 } 2954 } 2955 } 2956 BLOCK_COMMENT("} IC miss handler"); 2957 return labelOffset; 2958 } 2959 2960 void MacroAssembler::nmethod_UEP(Label& ic_miss) { 2961 Register ic_reg = as_Register(Matcher::inline_cache_reg_encode()); 2962 int klass_offset = oopDesc::klass_offset_in_bytes(); 2963 if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) { 2964 if (VM_Version::has_CompareBranch()) { 2965 z_cgij(Z_ARG1, 0, Assembler::bcondEqual, ic_miss); 2966 } else { 2967 z_ltgr(Z_ARG1, Z_ARG1); 2968 z_bre(ic_miss); 2969 } 2970 } 2971 // Compare cached class against klass from receiver. 2972 compare_klass_ptr(ic_reg, klass_offset, Z_ARG1, false); 2973 z_brne(ic_miss); 2974 } 2975 2976 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 2977 Register super_klass, 2978 Register temp1_reg, 2979 Label* L_success, 2980 Label* L_failure, 2981 Label* L_slow_path, 2982 RegisterOrConstant super_check_offset) { 2983 2984 const int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2985 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2986 2987 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 2988 bool need_slow_path = (must_load_sco || 2989 super_check_offset.constant_or_zero() == sc_offset); 2990 2991 // Input registers must not overlap. 2992 assert_different_registers(sub_klass, super_klass, temp1_reg); 2993 if (super_check_offset.is_register()) { 2994 assert_different_registers(sub_klass, super_klass, 2995 super_check_offset.as_register()); 2996 } else if (must_load_sco) { 2997 assert(temp1_reg != noreg, "supply either a temp or a register offset"); 2998 } 2999 3000 const Register Rsuper_check_offset = temp1_reg; 3001 3002 NearLabel L_fallthrough; 3003 int label_nulls = 0; 3004 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 3005 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 3006 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 3007 assert(label_nulls <= 1 || 3008 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), 3009 "at most one NULL in the batch, usually"); 3010 3011 BLOCK_COMMENT("check_klass_subtype_fast_path {"); 3012 // If the pointers are equal, we are done (e.g., String[] elements). 3013 // This self-check enables sharing of secondary supertype arrays among 3014 // non-primary types such as array-of-interface. Otherwise, each such 3015 // type would need its own customized SSA. 3016 // We move this check to the front of the fast path because many 3017 // type checks are in fact trivially successful in this manner, 3018 // so we get a nicely predicted branch right at the start of the check. 3019 compare64_and_branch(sub_klass, super_klass, bcondEqual, *L_success); 3020 3021 // Check the supertype display, which is uint. 3022 if (must_load_sco) { 3023 z_llgf(Rsuper_check_offset, sco_offset, super_klass); 3024 super_check_offset = RegisterOrConstant(Rsuper_check_offset); 3025 } 3026 Address super_check_addr(sub_klass, super_check_offset, 0); 3027 z_cg(super_klass, super_check_addr); // compare w/ displayed supertype 3028 3029 // This check has worked decisively for primary supers. 3030 // Secondary supers are sought in the super_cache ('super_cache_addr'). 3031 // (Secondary supers are interfaces and very deeply nested subtypes.) 3032 // This works in the same check above because of a tricky aliasing 3033 // between the super_cache and the primary super display elements. 3034 // (The 'super_check_addr' can address either, as the case requires.) 3035 // Note that the cache is updated below if it does not help us find 3036 // what we need immediately. 3037 // So if it was a primary super, we can just fail immediately. 3038 // Otherwise, it's the slow path for us (no success at this point). 3039 3040 // Hacked jmp, which may only be used just before L_fallthrough. 3041 #define final_jmp(label) \ 3042 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 3043 else { branch_optimized(Assembler::bcondAlways, label); } /*omit semicolon*/ 3044 3045 if (super_check_offset.is_register()) { 3046 branch_optimized(Assembler::bcondEqual, *L_success); 3047 z_cfi(super_check_offset.as_register(), sc_offset); 3048 if (L_failure == &L_fallthrough) { 3049 branch_optimized(Assembler::bcondEqual, *L_slow_path); 3050 } else { 3051 branch_optimized(Assembler::bcondNotEqual, *L_failure); 3052 final_jmp(*L_slow_path); 3053 } 3054 } else if (super_check_offset.as_constant() == sc_offset) { 3055 // Need a slow path; fast failure is impossible. 3056 if (L_slow_path == &L_fallthrough) { 3057 branch_optimized(Assembler::bcondEqual, *L_success); 3058 } else { 3059 branch_optimized(Assembler::bcondNotEqual, *L_slow_path); 3060 final_jmp(*L_success); 3061 } 3062 } else { 3063 // No slow path; it's a fast decision. 3064 if (L_failure == &L_fallthrough) { 3065 branch_optimized(Assembler::bcondEqual, *L_success); 3066 } else { 3067 branch_optimized(Assembler::bcondNotEqual, *L_failure); 3068 final_jmp(*L_success); 3069 } 3070 } 3071 3072 bind(L_fallthrough); 3073 #undef local_brc 3074 #undef final_jmp 3075 BLOCK_COMMENT("} check_klass_subtype_fast_path"); 3076 // fallthru (to slow path) 3077 } 3078 3079 void MacroAssembler::check_klass_subtype_slow_path(Register Rsubklass, 3080 Register Rsuperklass, 3081 Register Rarray_ptr, // tmp 3082 Register Rlength, // tmp 3083 Label* L_success, 3084 Label* L_failure) { 3085 // Input registers must not overlap. 3086 // Also check for R1 which is explicitely used here. 3087 assert_different_registers(Z_R1, Rsubklass, Rsuperklass, Rarray_ptr, Rlength); 3088 NearLabel L_fallthrough, L_loop; 3089 int label_nulls = 0; 3090 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 3091 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 3092 assert(label_nulls <= 1, "at most one NULL in the batch"); 3093 3094 const int ss_offset = in_bytes(Klass::secondary_supers_offset()); 3095 const int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 3096 3097 const int length_offset = Array<Klass*>::length_offset_in_bytes(); 3098 const int base_offset = Array<Klass*>::base_offset_in_bytes(); 3099 3100 // Hacked jmp, which may only be used just before L_fallthrough. 3101 #define final_jmp(label) \ 3102 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 3103 else branch_optimized(Assembler::bcondAlways, label) /*omit semicolon*/ 3104 3105 NearLabel loop_iterate, loop_count, match; 3106 3107 BLOCK_COMMENT("check_klass_subtype_slow_path {"); 3108 z_lg(Rarray_ptr, ss_offset, Rsubklass); 3109 3110 load_and_test_int(Rlength, Address(Rarray_ptr, length_offset)); 3111 branch_optimized(Assembler::bcondZero, *L_failure); 3112 3113 // Oops in table are NO MORE compressed. 3114 z_cg(Rsuperklass, base_offset, Rarray_ptr); // Check array element for match. 3115 z_bre(match); // Shortcut for array length = 1. 3116 3117 // No match yet, so we must walk the array's elements. 3118 z_lngfr(Rlength, Rlength); 3119 z_sllg(Rlength, Rlength, LogBytesPerWord); // -#bytes of cache array 3120 z_llill(Z_R1, BytesPerWord); // Set increment/end index. 3121 add2reg(Rlength, 2 * BytesPerWord); // start index = -(n-2)*BytesPerWord 3122 z_slgr(Rarray_ptr, Rlength); // start addr: += (n-2)*BytesPerWord 3123 z_bru(loop_count); 3124 3125 BIND(loop_iterate); 3126 z_cg(Rsuperklass, base_offset, Rlength, Rarray_ptr); // Check array element for match. 3127 z_bre(match); 3128 BIND(loop_count); 3129 z_brxlg(Rlength, Z_R1, loop_iterate); 3130 3131 // Rsuperklass not found among secondary super classes -> failure. 3132 branch_optimized(Assembler::bcondAlways, *L_failure); 3133 3134 // Got a hit. Return success (zero result). Set cache. 3135 // Cache load doesn't happen here. For speed it is directly emitted by the compiler. 3136 3137 BIND(match); 3138 3139 z_stg(Rsuperklass, sc_offset, Rsubklass); // Save result to cache. 3140 3141 final_jmp(*L_success); 3142 3143 // Exit to the surrounding code. 3144 BIND(L_fallthrough); 3145 #undef local_brc 3146 #undef final_jmp 3147 BLOCK_COMMENT("} check_klass_subtype_slow_path"); 3148 } 3149 3150 // Emitter for combining fast and slow path. 3151 void MacroAssembler::check_klass_subtype(Register sub_klass, 3152 Register super_klass, 3153 Register temp1_reg, 3154 Register temp2_reg, 3155 Label& L_success) { 3156 NearLabel failure; 3157 BLOCK_COMMENT(err_msg("check_klass_subtype(%s subclass of %s) {", sub_klass->name(), super_klass->name())); 3158 check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg, 3159 &L_success, &failure, NULL); 3160 check_klass_subtype_slow_path(sub_klass, super_klass, 3161 temp1_reg, temp2_reg, &L_success, NULL); 3162 BIND(failure); 3163 BLOCK_COMMENT("} check_klass_subtype"); 3164 } 3165 3166 // Increment a counter at counter_address when the eq condition code is 3167 // set. Kills registers tmp1_reg and tmp2_reg and preserves the condition code. 3168 void MacroAssembler::increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg) { 3169 Label l; 3170 z_brne(l); 3171 load_const(tmp1_reg, counter_address); 3172 add2mem_32(Address(tmp1_reg), 1, tmp2_reg); 3173 z_cr(tmp1_reg, tmp1_reg); // Set cc to eq. 3174 bind(l); 3175 } 3176 3177 // Semantics are dependent on the slow_case label: 3178 // If the slow_case label is not NULL, failure to biased-lock the object 3179 // transfers control to the location of the slow_case label. If the 3180 // object could be biased-locked, control is transferred to the done label. 3181 // The condition code is unpredictable. 3182 // 3183 // If the slow_case label is NULL, failure to biased-lock the object results 3184 // in a transfer of control to the done label with a condition code of not_equal. 3185 // If the biased-lock could be successfully obtained, control is transfered to 3186 // the done label with a condition code of equal. 3187 // It is mandatory to react on the condition code At the done label. 3188 // 3189 void MacroAssembler::biased_locking_enter(Register obj_reg, 3190 Register mark_reg, 3191 Register temp_reg, 3192 Register temp2_reg, // May be Z_RO! 3193 Label &done, 3194 Label *slow_case) { 3195 assert(UseBiasedLocking, "why call this otherwise?"); 3196 assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg); 3197 3198 Label cas_label; // Try, if implemented, CAS locking. Fall thru to slow path otherwise. 3199 3200 BLOCK_COMMENT("biased_locking_enter {"); 3201 3202 // Biased locking 3203 // See whether the lock is currently biased toward our thread and 3204 // whether the epoch is still valid. 3205 // Note that the runtime guarantees sufficient alignment of JavaThread 3206 // pointers to allow age to be placed into low bits. 3207 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, 3208 "biased locking makes assumptions about bit layout"); 3209 z_lr(temp_reg, mark_reg); 3210 z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place); 3211 z_chi(temp_reg, markOopDesc::biased_lock_pattern); 3212 z_brne(cas_label); // Try cas if object is not biased, i.e. cannot be biased locked. 3213 3214 load_prototype_header(temp_reg, obj_reg); 3215 load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place)); 3216 3217 z_ogr(temp_reg, Z_thread); 3218 z_xgr(temp_reg, mark_reg); 3219 z_ngr(temp_reg, temp2_reg); 3220 if (PrintBiasedLockingStatistics) { 3221 increment_counter_eq((address) BiasedLocking::biased_lock_entry_count_addr(), mark_reg, temp2_reg); 3222 // Restore mark_reg. 3223 z_lg(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg); 3224 } 3225 branch_optimized(Assembler::bcondEqual, done); // Biased lock obtained, return success. 3226 3227 Label try_revoke_bias; 3228 Label try_rebias; 3229 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); 3230 3231 //---------------------------------------------------------------------------- 3232 // At this point we know that the header has the bias pattern and 3233 // that we are not the bias owner in the current epoch. We need to 3234 // figure out more details about the state of the header in order to 3235 // know what operations can be legally performed on the object's 3236 // header. 3237 3238 // If the low three bits in the xor result aren't clear, that means 3239 // the prototype header is no longer biased and we have to revoke 3240 // the bias on this object. 3241 z_tmll(temp_reg, markOopDesc::biased_lock_mask_in_place); 3242 z_brnaz(try_revoke_bias); 3243 3244 // Biasing is still enabled for this data type. See whether the 3245 // epoch of the current bias is still valid, meaning that the epoch 3246 // bits of the mark word are equal to the epoch bits of the 3247 // prototype header. (Note that the prototype header's epoch bits 3248 // only change at a safepoint.) If not, attempt to rebias the object 3249 // toward the current thread. Note that we must be absolutely sure 3250 // that the current epoch is invalid in order to do this because 3251 // otherwise the manipulations it performs on the mark word are 3252 // illegal. 3253 z_tmll(temp_reg, markOopDesc::epoch_mask_in_place); 3254 z_brnaz(try_rebias); 3255 3256 //---------------------------------------------------------------------------- 3257 // The epoch of the current bias is still valid but we know nothing 3258 // about the owner; it might be set or it might be clear. Try to 3259 // acquire the bias of the object using an atomic operation. If this 3260 // fails we will go in to the runtime to revoke the object's bias. 3261 // Note that we first construct the presumed unbiased header so we 3262 // don't accidentally blow away another thread's valid bias. 3263 z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | 3264 markOopDesc::epoch_mask_in_place); 3265 z_lgr(temp_reg, Z_thread); 3266 z_llgfr(mark_reg, mark_reg); 3267 z_ogr(temp_reg, mark_reg); 3268 3269 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 3270 3271 z_csg(mark_reg, temp_reg, 0, obj_reg); 3272 3273 // If the biasing toward our thread failed, this means that 3274 // another thread succeeded in biasing it toward itself and we 3275 // need to revoke that bias. The revocation will occur in the 3276 // interpreter runtime in the slow case. 3277 3278 if (PrintBiasedLockingStatistics) { 3279 increment_counter_eq((address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), 3280 temp_reg, temp2_reg); 3281 } 3282 if (slow_case != NULL) { 3283 branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way. 3284 } 3285 branch_optimized(Assembler::bcondAlways, done); // Biased lock status given in condition code. 3286 3287 //---------------------------------------------------------------------------- 3288 bind(try_rebias); 3289 // At this point we know the epoch has expired, meaning that the 3290 // current "bias owner", if any, is actually invalid. Under these 3291 // circumstances _only_, we are allowed to use the current header's 3292 // value as the comparison value when doing the cas to acquire the 3293 // bias in the current epoch. In other words, we allow transfer of 3294 // the bias from one thread to another directly in this situation. 3295 3296 z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); 3297 load_prototype_header(temp_reg, obj_reg); 3298 z_llgfr(mark_reg, mark_reg); 3299 3300 z_ogr(temp_reg, Z_thread); 3301 3302 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 3303 3304 z_csg(mark_reg, temp_reg, 0, obj_reg); 3305 3306 // If the biasing toward our thread failed, this means that 3307 // another thread succeeded in biasing it toward itself and we 3308 // need to revoke that bias. The revocation will occur in the 3309 // interpreter runtime in the slow case. 3310 3311 if (PrintBiasedLockingStatistics) { 3312 increment_counter_eq((address) BiasedLocking::rebiased_lock_entry_count_addr(), temp_reg, temp2_reg); 3313 } 3314 if (slow_case != NULL) { 3315 branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way. 3316 } 3317 z_bru(done); // Biased lock status given in condition code. 3318 3319 //---------------------------------------------------------------------------- 3320 bind(try_revoke_bias); 3321 // The prototype mark in the klass doesn't have the bias bit set any 3322 // more, indicating that objects of this data type are not supposed 3323 // to be biased any more. We are going to try to reset the mark of 3324 // this object to the prototype value and fall through to the 3325 // CAS-based locking scheme. Note that if our CAS fails, it means 3326 // that another thread raced us for the privilege of revoking the 3327 // bias of this particular object, so it's okay to continue in the 3328 // normal locking code. 3329 load_prototype_header(temp_reg, obj_reg); 3330 3331 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 3332 3333 z_csg(mark_reg, temp_reg, 0, obj_reg); 3334 3335 // Fall through to the normal CAS-based lock, because no matter what 3336 // the result of the above CAS, some thread must have succeeded in 3337 // removing the bias bit from the object's header. 3338 if (PrintBiasedLockingStatistics) { 3339 // z_cgr(mark_reg, temp2_reg); 3340 increment_counter_eq((address) BiasedLocking::revoked_lock_entry_count_addr(), temp_reg, temp2_reg); 3341 } 3342 3343 bind(cas_label); 3344 BLOCK_COMMENT("} biased_locking_enter"); 3345 } 3346 3347 void MacroAssembler::biased_locking_exit(Register mark_addr, Register temp_reg, Label& done) { 3348 // Check for biased locking unlock case, which is a no-op 3349 // Note: we do not have to check the thread ID for two reasons. 3350 // First, the interpreter checks for IllegalMonitorStateException at 3351 // a higher level. Second, if the bias was revoked while we held the 3352 // lock, the object could not be rebiased toward another thread, so 3353 // the bias bit would be clear. 3354 BLOCK_COMMENT("biased_locking_exit {"); 3355 3356 z_lg(temp_reg, 0, mark_addr); 3357 z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place); 3358 3359 z_chi(temp_reg, markOopDesc::biased_lock_pattern); 3360 z_bre(done); 3361 BLOCK_COMMENT("} biased_locking_exit"); 3362 } 3363 3364 void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) { 3365 Register displacedHeader = temp1; 3366 Register currentHeader = temp1; 3367 Register temp = temp2; 3368 NearLabel done, object_has_monitor; 3369 3370 BLOCK_COMMENT("compiler_fast_lock_object {"); 3371 3372 // Load markOop from oop into mark. 3373 z_lg(displacedHeader, 0, oop); 3374 3375 if (try_bias) { 3376 biased_locking_enter(oop, displacedHeader, temp, Z_R0, done); 3377 } 3378 3379 // Handle existing monitor. 3380 if ((EmitSync & 0x01) == 0) { 3381 // The object has an existing monitor iff (mark & monitor_value) != 0. 3382 guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word"); 3383 z_lr(temp, displacedHeader); 3384 z_nill(temp, markOopDesc::monitor_value); 3385 z_brne(object_has_monitor); 3386 } 3387 3388 // Set mark to markOop | markOopDesc::unlocked_value. 3389 z_oill(displacedHeader, markOopDesc::unlocked_value); 3390 3391 // Load Compare Value application register. 3392 3393 // Initialize the box (must happen before we update the object mark). 3394 z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box); 3395 3396 // Memory Fence (in cmpxchgd) 3397 // Compare object markOop with mark and if equal exchange scratch1 with object markOop. 3398 3399 // If the compare-and-swap succeeded, then we found an unlocked object and we 3400 // have now locked it. 3401 z_csg(displacedHeader, box, 0, oop); 3402 assert(currentHeader==displacedHeader, "must be same register"); // Identified two registers from z/Architecture. 3403 z_bre(done); 3404 3405 // We did not see an unlocked object so try the fast recursive case. 3406 3407 z_sgr(currentHeader, Z_SP); 3408 load_const_optimized(temp, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place)); 3409 3410 z_ngr(currentHeader, temp); 3411 // z_brne(done); 3412 // z_release(); 3413 z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box); 3414 3415 z_bru(done); 3416 3417 if ((EmitSync & 0x01) == 0) { 3418 Register zero = temp; 3419 Register monitor_tagged = displacedHeader; // Tagged with markOopDesc::monitor_value. 3420 bind(object_has_monitor); 3421 // The object's monitor m is unlocked iff m->owner == NULL, 3422 // otherwise m->owner may contain a thread or a stack address. 3423 // 3424 // Try to CAS m->owner from NULL to current thread. 3425 z_lghi(zero, 0); 3426 // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ. 3427 z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged); 3428 // Store a non-null value into the box. 3429 z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box); 3430 #ifdef ASSERT 3431 z_brne(done); 3432 // We've acquired the monitor, check some invariants. 3433 // Invariant 1: _recursions should be 0. 3434 asm_assert_mem8_is_zero(OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), monitor_tagged, 3435 "monitor->_recursions should be 0", -1); 3436 z_ltgr(zero, zero); // Set CR=EQ. 3437 #endif 3438 } 3439 bind(done); 3440 3441 BLOCK_COMMENT("} compiler_fast_lock_object"); 3442 // If locking was successful, CR should indicate 'EQ'. 3443 // The compiler or the native wrapper generates a branch to the runtime call 3444 // _complete_monitor_locking_Java. 3445 } 3446 3447 void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) { 3448 Register displacedHeader = temp1; 3449 Register currentHeader = temp2; 3450 Register temp = temp1; 3451 Register monitor = temp2; 3452 3453 Label done, object_has_monitor; 3454 3455 BLOCK_COMMENT("compiler_fast_unlock_object {"); 3456 3457 if (try_bias) { 3458 biased_locking_exit(oop, currentHeader, done); 3459 } 3460 3461 // Find the lock address and load the displaced header from the stack. 3462 // if the displaced header is zero, we have a recursive unlock. 3463 load_and_test_long(displacedHeader, Address(box, BasicLock::displaced_header_offset_in_bytes())); 3464 z_bre(done); 3465 3466 // Handle existing monitor. 3467 if ((EmitSync & 0x02) == 0) { 3468 // The object has an existing monitor iff (mark & monitor_value) != 0. 3469 z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop); 3470 guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word"); 3471 z_nill(currentHeader, markOopDesc::monitor_value); 3472 z_brne(object_has_monitor); 3473 } 3474 3475 // Check if it is still a light weight lock, this is true if we see 3476 // the stack address of the basicLock in the markOop of the object 3477 // copy box to currentHeader such that csg does not kill it. 3478 z_lgr(currentHeader, box); 3479 z_csg(currentHeader, displacedHeader, 0, oop); 3480 z_bru(done); // Csg sets CR as desired. 3481 3482 // Handle existing monitor. 3483 if ((EmitSync & 0x02) == 0) { 3484 bind(object_has_monitor); 3485 z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop); // CurrentHeader is tagged with monitor_value set. 3486 load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); 3487 z_brne(done); 3488 load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 3489 z_brne(done); 3490 load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); 3491 z_brne(done); 3492 load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); 3493 z_brne(done); 3494 z_release(); 3495 z_stg(temp/*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), currentHeader); 3496 } 3497 3498 bind(done); 3499 3500 BLOCK_COMMENT("} compiler_fast_unlock_object"); 3501 // flag == EQ indicates success 3502 // flag == NE indicates failure 3503 } 3504 3505 // Write to card table for modification at store_addr - register is destroyed afterwards. 3506 void MacroAssembler::card_write_barrier_post(Register store_addr, Register tmp) { 3507 CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set(); 3508 assert(bs->kind() == BarrierSet::CardTableForRS || 3509 bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); 3510 assert_different_registers(store_addr, tmp); 3511 z_srlg(store_addr, store_addr, CardTableModRefBS::card_shift); 3512 load_absolute_address(tmp, (address)bs->byte_map_base); 3513 z_agr(store_addr, tmp); 3514 z_mvi(0, store_addr, 0); // Store byte 0. 3515 } 3516 3517 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) { 3518 NearLabel Ldone; 3519 z_ltgr(tmp1, value); 3520 z_bre(Ldone); // Use NULL result as-is. 3521 3522 z_nill(value, ~JNIHandles::weak_tag_mask); 3523 z_lg(value, 0, value); // Resolve (untagged) jobject. 3524 3525 #if INCLUDE_ALL_GCS 3526 if (UseG1GC) { 3527 NearLabel Lnot_weak; 3528 z_tmll(tmp1, JNIHandles::weak_tag_mask); // Test for jweak tag. 3529 z_braz(Lnot_weak); 3530 verify_oop(value); 3531 g1_write_barrier_pre(noreg /* obj */, 3532 noreg /* offset */, 3533 value /* pre_val */, 3534 noreg /* val */, 3535 tmp1 /* tmp1 */, 3536 tmp2 /* tmp2 */, 3537 true /* pre_val_needed */); 3538 bind(Lnot_weak); 3539 } 3540 #endif // INCLUDE_ALL_GCS 3541 verify_oop(value); 3542 bind(Ldone); 3543 } 3544 3545 #if INCLUDE_ALL_GCS 3546 3547 //------------------------------------------------------ 3548 // General G1 pre-barrier generator. 3549 // Purpose: record the previous value if it is not null. 3550 // All non-tmps are preserved. 3551 //------------------------------------------------------ 3552 // Note: Rpre_val needs special attention. 3553 // The flag pre_val_needed indicated that the caller of this emitter function 3554 // relies on Rpre_val containing the correct value, that is: 3555 // either the value it contained on entry to this code segment 3556 // or the value that was loaded into the register from (Robj+offset). 3557 // 3558 // Independent from this requirement, the contents of Rpre_val must survive 3559 // the push_frame() operation. push_frame() uses Z_R0_scratch by default 3560 // to temporarily remember the frame pointer. 3561 // If Rpre_val is assigned Z_R0_scratch by the caller, code must be emitted to 3562 // save it's value. 3563 void MacroAssembler::g1_write_barrier_pre(Register Robj, 3564 RegisterOrConstant offset, 3565 Register Rpre_val, // Ideally, this is a non-volatile register. 3566 Register Rval, // Will be preserved. 3567 Register Rtmp1, // If Rpre_val is volatile, either Rtmp1 3568 Register Rtmp2, // or Rtmp2 has to be non-volatile.. 3569 bool pre_val_needed // Save Rpre_val across runtime call, caller uses it. 3570 ) { 3571 Label callRuntime, filtered; 3572 const int active_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()); 3573 const int buffer_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_buf()); 3574 const int index_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_index()); 3575 assert_different_registers(Rtmp1, Rtmp2, Z_R0_scratch); // None of the Rtmp<i> must be Z_R0!! 3576 assert_different_registers(Robj, Z_R0_scratch); // Used for addressing. Furthermore, push_frame destroys Z_R0!! 3577 assert_different_registers(Rval, Z_R0_scratch); // push_frame destroys Z_R0!! 3578 3579 #ifdef ASSERT 3580 // make sure the register is not Z_R0. Used for addressing. Furthermore, would be destroyed by push_frame. 3581 if (offset.is_register() && offset.as_register()->encoding() == 0) { 3582 tty->print_cr("Roffset(g1_write_barrier_pre) = %%r%d", offset.as_register()->encoding()); 3583 assert(false, "bad register for offset"); 3584 } 3585 #endif 3586 3587 BLOCK_COMMENT("g1_write_barrier_pre {"); 3588 3589 // Is marking active? 3590 // Note: value is loaded for test purposes only. No further use here. 3591 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 3592 load_and_test_int(Rtmp1, Address(Z_thread, active_offset)); 3593 } else { 3594 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 3595 load_and_test_byte(Rtmp1, Address(Z_thread, active_offset)); 3596 } 3597 z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently. 3598 3599 assert(Rpre_val != noreg, "must have a real register"); 3600 3601 3602 // If an object is given, we need to load the previous value into Rpre_val. 3603 if (Robj != noreg) { 3604 // Load the previous value... 3605 Register ixReg = offset.is_register() ? offset.register_or_noreg() : Z_R0; 3606 if (UseCompressedOops) { 3607 z_llgf(Rpre_val, offset.constant_or_zero(), ixReg, Robj); 3608 } else { 3609 z_lg(Rpre_val, offset.constant_or_zero(), ixReg, Robj); 3610 } 3611 } 3612 3613 // Is the previous value NULL? 3614 // If so, we don't need to record it and we're done. 3615 // Note: pre_val is loaded, decompressed and stored (directly or via runtime call). 3616 // Register contents is preserved across runtime call if caller requests to do so. 3617 z_ltgr(Rpre_val, Rpre_val); 3618 z_bre(filtered); // previous value is NULL, so we don't need to record it. 3619 3620 // Decode the oop now. We know it's not NULL. 3621 if (Robj != noreg && UseCompressedOops) { 3622 oop_decoder(Rpre_val, Rpre_val, /*maybeNULL=*/false); 3623 } 3624 3625 // OK, it's not filtered, so we'll need to call enqueue. 3626 3627 // We can store the original value in the thread's buffer 3628 // only if index > 0. Otherwise, we need runtime to handle. 3629 // (The index field is typed as size_t.) 3630 Register Rbuffer = Rtmp1, Rindex = Rtmp2; 3631 assert_different_registers(Rbuffer, Rindex, Rpre_val); 3632 3633 z_lg(Rbuffer, buffer_offset, Z_thread); 3634 3635 load_and_test_long(Rindex, Address(Z_thread, index_offset)); 3636 z_bre(callRuntime); // If index == 0, goto runtime. 3637 3638 add2reg(Rindex, -wordSize); // Decrement index. 3639 z_stg(Rindex, index_offset, Z_thread); 3640 3641 // Record the previous value. 3642 z_stg(Rpre_val, 0, Rbuffer, Rindex); 3643 z_bru(filtered); // We are done. 3644 3645 Rbuffer = noreg; // end of life 3646 Rindex = noreg; // end of life 3647 3648 bind(callRuntime); 3649 3650 // Save some registers (inputs and result) over runtime call 3651 // by spilling them into the top frame. 3652 if (Robj != noreg && Robj->is_volatile()) { 3653 z_stg(Robj, Robj->encoding()*BytesPerWord, Z_SP); 3654 } 3655 if (offset.is_register() && offset.as_register()->is_volatile()) { 3656 Register Roff = offset.as_register(); 3657 z_stg(Roff, Roff->encoding()*BytesPerWord, Z_SP); 3658 } 3659 if (Rval != noreg && Rval->is_volatile()) { 3660 z_stg(Rval, Rval->encoding()*BytesPerWord, Z_SP); 3661 } 3662 3663 // Save Rpre_val (result) over runtime call. 3664 Register Rpre_save = Rpre_val; 3665 if ((Rpre_val == Z_R0_scratch) || (pre_val_needed && Rpre_val->is_volatile())) { 3666 guarantee(!Rtmp1->is_volatile() || !Rtmp2->is_volatile(), "oops!"); 3667 Rpre_save = !Rtmp1->is_volatile() ? Rtmp1 : Rtmp2; 3668 } 3669 lgr_if_needed(Rpre_save, Rpre_val); 3670 3671 // Push frame to protect top frame with return pc and spilled register values. 3672 save_return_pc(); 3673 push_frame_abi160(0); // Will use Z_R0 as tmp. 3674 3675 // Rpre_val may be destroyed by push_frame(). 3676 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_save, Z_thread); 3677 3678 pop_frame(); 3679 restore_return_pc(); 3680 3681 // Restore spilled values. 3682 if (Robj != noreg && Robj->is_volatile()) { 3683 z_lg(Robj, Robj->encoding()*BytesPerWord, Z_SP); 3684 } 3685 if (offset.is_register() && offset.as_register()->is_volatile()) { 3686 Register Roff = offset.as_register(); 3687 z_lg(Roff, Roff->encoding()*BytesPerWord, Z_SP); 3688 } 3689 if (Rval != noreg && Rval->is_volatile()) { 3690 z_lg(Rval, Rval->encoding()*BytesPerWord, Z_SP); 3691 } 3692 if (pre_val_needed && Rpre_val->is_volatile()) { 3693 lgr_if_needed(Rpre_val, Rpre_save); 3694 } 3695 3696 bind(filtered); 3697 BLOCK_COMMENT("} g1_write_barrier_pre"); 3698 } 3699 3700 // General G1 post-barrier generator. 3701 // Purpose: Store cross-region card. 3702 void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, 3703 Register Rnew_val, 3704 Register Rtmp1, 3705 Register Rtmp2, 3706 Register Rtmp3) { 3707 Label callRuntime, filtered; 3708 3709 assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); // Most probably, Rnew_val == Rtmp3. 3710 3711 G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set(); 3712 assert(bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier"); 3713 3714 BLOCK_COMMENT("g1_write_barrier_post {"); 3715 3716 // Does store cross heap regions? 3717 // It does if the two addresses specify different grain addresses. 3718 if (G1RSBarrierRegionFilter) { 3719 if (VM_Version::has_DistinctOpnds()) { 3720 z_xgrk(Rtmp1, Rstore_addr, Rnew_val); 3721 } else { 3722 z_lgr(Rtmp1, Rstore_addr); 3723 z_xgr(Rtmp1, Rnew_val); 3724 } 3725 z_srag(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes); 3726 z_bre(filtered); 3727 } 3728 3729 // Crosses regions, storing NULL? 3730 #ifdef ASSERT 3731 z_ltgr(Rnew_val, Rnew_val); 3732 asm_assert_ne("null oop not allowed (G1)", 0x255); // TODO: also on z? Checked by caller on PPC64, so following branch is obsolete: 3733 z_bre(filtered); // Safety net: don't break if we have a NULL oop. 3734 #endif 3735 Rnew_val = noreg; // end of lifetime 3736 3737 // Storing region crossing non-NULL, is card already dirty? 3738 assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code"); 3739 assert_different_registers(Rtmp1, Rtmp2, Rtmp3); 3740 // Make sure not to use Z_R0 for any of these registers. 3741 Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3; 3742 Register Rbase = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp3; 3743 3744 // calculate address of card 3745 load_const_optimized(Rbase, (address)bs->byte_map_base); // Card table base. 3746 z_srlg(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift); // Index into card table. 3747 z_algr(Rcard_addr, Rbase); // Explicit calculation needed for cli. 3748 Rbase = noreg; // end of lifetime 3749 3750 // Filter young. 3751 assert((unsigned int)G1SATBCardTableModRefBS::g1_young_card_val() <= 255, "otherwise check this code"); 3752 z_cli(0, Rcard_addr, (int)G1SATBCardTableModRefBS::g1_young_card_val()); 3753 z_bre(filtered); 3754 3755 // Check the card value. If dirty, we're done. 3756 // This also avoids false sharing of the (already dirty) card. 3757 z_sync(); // Required to support concurrent cleaning. 3758 assert((unsigned int)CardTableModRefBS::dirty_card_val() <= 255, "otherwise check this code"); 3759 z_cli(0, Rcard_addr, CardTableModRefBS::dirty_card_val()); // Reload after membar. 3760 z_bre(filtered); 3761 3762 // Storing a region crossing, non-NULL oop, card is clean. 3763 // Dirty card and log. 3764 z_mvi(0, Rcard_addr, CardTableModRefBS::dirty_card_val()); 3765 3766 Register Rcard_addr_x = Rcard_addr; 3767 Register Rqueue_index = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp1; 3768 Register Rqueue_buf = (Rtmp3 != Z_R0_scratch) ? Rtmp3 : Rtmp1; 3769 const int qidx_off = in_bytes(JavaThread::dirty_card_queue_offset() + SATBMarkQueue::byte_offset_of_index()); 3770 const int qbuf_off = in_bytes(JavaThread::dirty_card_queue_offset() + SATBMarkQueue::byte_offset_of_buf()); 3771 if ((Rcard_addr == Rqueue_buf) || (Rcard_addr == Rqueue_index)) { 3772 Rcard_addr_x = Z_R0_scratch; // Register shortage. We have to use Z_R0. 3773 } 3774 lgr_if_needed(Rcard_addr_x, Rcard_addr); 3775 3776 load_and_test_long(Rqueue_index, Address(Z_thread, qidx_off)); 3777 z_bre(callRuntime); // Index == 0 then jump to runtime. 3778 3779 z_lg(Rqueue_buf, qbuf_off, Z_thread); 3780 3781 add2reg(Rqueue_index, -wordSize); // Decrement index. 3782 z_stg(Rqueue_index, qidx_off, Z_thread); 3783 3784 z_stg(Rcard_addr_x, 0, Rqueue_index, Rqueue_buf); // Store card. 3785 z_bru(filtered); 3786 3787 bind(callRuntime); 3788 3789 // TODO: do we need a frame? Introduced to be on the safe side. 3790 bool needs_frame = true; 3791 lgr_if_needed(Rcard_addr, Rcard_addr_x); // copy back asap. push_frame will destroy Z_R0_scratch! 3792 3793 // VM call need frame to access(write) O register. 3794 if (needs_frame) { 3795 save_return_pc(); 3796 push_frame_abi160(0); // Will use Z_R0 as tmp on old CPUs. 3797 } 3798 3799 // Save the live input values. 3800 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr, Z_thread); 3801 3802 if (needs_frame) { 3803 pop_frame(); 3804 restore_return_pc(); 3805 } 3806 3807 bind(filtered); 3808 3809 BLOCK_COMMENT("} g1_write_barrier_post"); 3810 } 3811 #endif // INCLUDE_ALL_GCS 3812 3813 // Last_Java_sp must comply to the rules in frame_s390.hpp. 3814 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation) { 3815 BLOCK_COMMENT("set_last_Java_frame {"); 3816 3817 // Always set last_Java_pc and flags first because once last_Java_sp 3818 // is visible has_last_Java_frame is true and users will look at the 3819 // rest of the fields. (Note: flags should always be zero before we 3820 // get here so doesn't need to be set.) 3821 3822 // Verify that last_Java_pc was zeroed on return to Java. 3823 if (allow_relocation) { 3824 asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()), 3825 Z_thread, 3826 "last_Java_pc not zeroed before leaving Java", 3827 0x200); 3828 } else { 3829 asm_assert_mem8_is_zero_static(in_bytes(JavaThread::last_Java_pc_offset()), 3830 Z_thread, 3831 "last_Java_pc not zeroed before leaving Java", 3832 0x200); 3833 } 3834 3835 // When returning from calling out from Java mode the frame anchor's 3836 // last_Java_pc will always be set to NULL. It is set here so that 3837 // if we are doing a call to native (not VM) that we capture the 3838 // known pc and don't have to rely on the native call having a 3839 // standard frame linkage where we can find the pc. 3840 if (last_Java_pc!=noreg) { 3841 z_stg(last_Java_pc, Address(Z_thread, JavaThread::last_Java_pc_offset())); 3842 } 3843 3844 // This membar release is not required on z/Architecture, since the sequence of stores 3845 // in maintained. Nevertheless, we leave it in to document the required ordering. 3846 // The implementation of z_release() should be empty. 3847 // z_release(); 3848 3849 z_stg(last_Java_sp, Address(Z_thread, JavaThread::last_Java_sp_offset())); 3850 BLOCK_COMMENT("} set_last_Java_frame"); 3851 } 3852 3853 void MacroAssembler::reset_last_Java_frame(bool allow_relocation) { 3854 BLOCK_COMMENT("reset_last_Java_frame {"); 3855 3856 if (allow_relocation) { 3857 asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()), 3858 Z_thread, 3859 "SP was not set, still zero", 3860 0x202); 3861 } else { 3862 asm_assert_mem8_isnot_zero_static(in_bytes(JavaThread::last_Java_sp_offset()), 3863 Z_thread, 3864 "SP was not set, still zero", 3865 0x202); 3866 } 3867 3868 // _last_Java_sp = 0 3869 // Clearing storage must be atomic here, so don't use clear_mem()! 3870 store_const(Address(Z_thread, JavaThread::last_Java_sp_offset()), 0); 3871 3872 // _last_Java_pc = 0 3873 store_const(Address(Z_thread, JavaThread::last_Java_pc_offset()), 0); 3874 3875 BLOCK_COMMENT("} reset_last_Java_frame"); 3876 return; 3877 } 3878 3879 void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation) { 3880 assert_different_registers(sp, tmp1); 3881 3882 // We cannot trust that code generated by the C++ compiler saves R14 3883 // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at 3884 // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()). 3885 // Therefore we load the PC into tmp1 and let set_last_Java_frame() save 3886 // it into the frame anchor. 3887 get_PC(tmp1); 3888 set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1, allow_relocation); 3889 } 3890 3891 void MacroAssembler::set_thread_state(JavaThreadState new_state) { 3892 z_release(); 3893 3894 assert(Immediate::is_uimm16(_thread_max_state), "enum value out of range for instruction"); 3895 assert(sizeof(JavaThreadState) == sizeof(int), "enum value must have base type int"); 3896 store_const(Address(Z_thread, JavaThread::thread_state_offset()), new_state, Z_R0, false); 3897 } 3898 3899 void MacroAssembler::get_vm_result(Register oop_result) { 3900 verify_thread(); 3901 3902 z_lg(oop_result, Address(Z_thread, JavaThread::vm_result_offset())); 3903 clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(void*)); 3904 3905 verify_oop(oop_result); 3906 } 3907 3908 void MacroAssembler::get_vm_result_2(Register result) { 3909 verify_thread(); 3910 3911 z_lg(result, Address(Z_thread, JavaThread::vm_result_2_offset())); 3912 clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(void*)); 3913 } 3914 3915 // We require that C code which does not return a value in vm_result will 3916 // leave it undisturbed. 3917 void MacroAssembler::set_vm_result(Register oop_result) { 3918 z_stg(oop_result, Address(Z_thread, JavaThread::vm_result_offset())); 3919 } 3920 3921 // Explicit null checks (used for method handle code). 3922 void MacroAssembler::null_check(Register reg, Register tmp, int64_t offset) { 3923 if (!ImplicitNullChecks) { 3924 NearLabel ok; 3925 3926 compare64_and_branch(reg, (intptr_t) 0, Assembler::bcondNotEqual, ok); 3927 3928 // We just put the address into reg if it was 0 (tmp==Z_R0 is allowed so we can't use it for the address). 3929 address exception_entry = Interpreter::throw_NullPointerException_entry(); 3930 load_absolute_address(reg, exception_entry); 3931 z_br(reg); 3932 3933 bind(ok); 3934 } else { 3935 if (needs_explicit_null_check((intptr_t)offset)) { 3936 // Provoke OS NULL exception if reg = NULL by 3937 // accessing M[reg] w/o changing any registers. 3938 z_lg(tmp, 0, reg); 3939 } 3940 // else 3941 // Nothing to do, (later) access of M[reg + offset] 3942 // will provoke OS NULL exception if reg = NULL. 3943 } 3944 } 3945 3946 //------------------------------------- 3947 // Compressed Klass Pointers 3948 //------------------------------------- 3949 3950 // Klass oop manipulations if compressed. 3951 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 3952 Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible. 3953 address base = Universe::narrow_klass_base(); 3954 int shift = Universe::narrow_klass_shift(); 3955 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 3956 3957 BLOCK_COMMENT("cKlass encoder {"); 3958 3959 #ifdef ASSERT 3960 Label ok; 3961 z_tmll(current, KlassAlignmentInBytes-1); // Check alignment. 3962 z_brc(Assembler::bcondAllZero, ok); 3963 // The plain disassembler does not recognize illtrap. It instead displays 3964 // a 32-bit value. Issueing two illtraps assures the disassembler finds 3965 // the proper beginning of the next instruction. 3966 z_illtrap(0xee); 3967 z_illtrap(0xee); 3968 bind(ok); 3969 #endif 3970 3971 if (base != NULL) { 3972 unsigned int base_h = ((unsigned long)base)>>32; 3973 unsigned int base_l = (unsigned int)((unsigned long)base); 3974 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 3975 lgr_if_needed(dst, current); 3976 z_aih(dst, -((int)base_h)); // Base has no set bits in lower half. 3977 } else if ((base_h == 0) && (base_l != 0)) { 3978 lgr_if_needed(dst, current); 3979 z_agfi(dst, -(int)base_l); 3980 } else { 3981 load_const(Z_R0, base); 3982 lgr_if_needed(dst, current); 3983 z_sgr(dst, Z_R0); 3984 } 3985 current = dst; 3986 } 3987 if (shift != 0) { 3988 assert (LogKlassAlignmentInBytes == shift, "decode alg wrong"); 3989 z_srlg(dst, current, shift); 3990 current = dst; 3991 } 3992 lgr_if_needed(dst, current); // Move may be required (if neither base nor shift != 0). 3993 3994 BLOCK_COMMENT("} cKlass encoder"); 3995 } 3996 3997 // This function calculates the size of the code generated by 3998 // decode_klass_not_null(register dst, Register src) 3999 // when (Universe::heap() != NULL). Hence, if the instructions 4000 // it generates change, then this method needs to be updated. 4001 int MacroAssembler::instr_size_for_decode_klass_not_null() { 4002 address base = Universe::narrow_klass_base(); 4003 int shift_size = Universe::narrow_klass_shift() == 0 ? 0 : 6; /* sllg */ 4004 int addbase_size = 0; 4005 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 4006 4007 if (base != NULL) { 4008 unsigned int base_h = ((unsigned long)base)>>32; 4009 unsigned int base_l = (unsigned int)((unsigned long)base); 4010 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 4011 addbase_size += 6; /* aih */ 4012 } else if ((base_h == 0) && (base_l != 0)) { 4013 addbase_size += 6; /* algfi */ 4014 } else { 4015 addbase_size += load_const_size(); 4016 addbase_size += 4; /* algr */ 4017 } 4018 } 4019 #ifdef ASSERT 4020 addbase_size += 10; 4021 addbase_size += 2; // Extra sigill. 4022 #endif 4023 return addbase_size + shift_size; 4024 } 4025 4026 // !!! If the instructions that get generated here change 4027 // then function instr_size_for_decode_klass_not_null() 4028 // needs to get updated. 4029 // This variant of decode_klass_not_null() must generate predictable code! 4030 // The code must only depend on globally known parameters. 4031 void MacroAssembler::decode_klass_not_null(Register dst) { 4032 address base = Universe::narrow_klass_base(); 4033 int shift = Universe::narrow_klass_shift(); 4034 int beg_off = offset(); 4035 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 4036 4037 BLOCK_COMMENT("cKlass decoder (const size) {"); 4038 4039 if (shift != 0) { // Shift required? 4040 z_sllg(dst, dst, shift); 4041 } 4042 if (base != NULL) { 4043 unsigned int base_h = ((unsigned long)base)>>32; 4044 unsigned int base_l = (unsigned int)((unsigned long)base); 4045 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 4046 z_aih(dst, base_h); // Base has no set bits in lower half. 4047 } else if ((base_h == 0) && (base_l != 0)) { 4048 z_algfi(dst, base_l); // Base has no set bits in upper half. 4049 } else { 4050 load_const(Z_R0, base); // Base has set bits everywhere. 4051 z_algr(dst, Z_R0); 4052 } 4053 } 4054 4055 #ifdef ASSERT 4056 Label ok; 4057 z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment. 4058 z_brc(Assembler::bcondAllZero, ok); 4059 // The plain disassembler does not recognize illtrap. It instead displays 4060 // a 32-bit value. Issueing two illtraps assures the disassembler finds 4061 // the proper beginning of the next instruction. 4062 z_illtrap(0xd1); 4063 z_illtrap(0xd1); 4064 bind(ok); 4065 #endif 4066 assert(offset() == beg_off + instr_size_for_decode_klass_not_null(), "Code gen mismatch."); 4067 4068 BLOCK_COMMENT("} cKlass decoder (const size)"); 4069 } 4070 4071 // This variant of decode_klass_not_null() is for cases where 4072 // 1) the size of the generated instructions may vary 4073 // 2) the result is (potentially) stored in a register different from the source. 4074 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 4075 address base = Universe::narrow_klass_base(); 4076 int shift = Universe::narrow_klass_shift(); 4077 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 4078 4079 BLOCK_COMMENT("cKlass decoder {"); 4080 4081 if (src == noreg) src = dst; 4082 4083 if (shift != 0) { // Shift or at least move required? 4084 z_sllg(dst, src, shift); 4085 } else { 4086 lgr_if_needed(dst, src); 4087 } 4088 4089 if (base != NULL) { 4090 unsigned int base_h = ((unsigned long)base)>>32; 4091 unsigned int base_l = (unsigned int)((unsigned long)base); 4092 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 4093 z_aih(dst, base_h); // Base has not set bits in lower half. 4094 } else if ((base_h == 0) && (base_l != 0)) { 4095 z_algfi(dst, base_l); // Base has no set bits in upper half. 4096 } else { 4097 load_const_optimized(Z_R0, base); // Base has set bits everywhere. 4098 z_algr(dst, Z_R0); 4099 } 4100 } 4101 4102 #ifdef ASSERT 4103 Label ok; 4104 z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment. 4105 z_brc(Assembler::bcondAllZero, ok); 4106 // The plain disassembler does not recognize illtrap. It instead displays 4107 // a 32-bit value. Issueing two illtraps assures the disassembler finds 4108 // the proper beginning of the next instruction. 4109 z_illtrap(0xd2); 4110 z_illtrap(0xd2); 4111 bind(ok); 4112 #endif 4113 BLOCK_COMMENT("} cKlass decoder"); 4114 } 4115 4116 void MacroAssembler::load_klass(Register klass, Address mem) { 4117 if (UseCompressedClassPointers) { 4118 z_llgf(klass, mem); 4119 // Attention: no null check here! 4120 decode_klass_not_null(klass); 4121 } else { 4122 z_lg(klass, mem); 4123 } 4124 } 4125 4126 void MacroAssembler::load_klass(Register klass, Register src_oop) { 4127 if (UseCompressedClassPointers) { 4128 z_llgf(klass, oopDesc::klass_offset_in_bytes(), src_oop); 4129 // Attention: no null check here! 4130 decode_klass_not_null(klass); 4131 } else { 4132 z_lg(klass, oopDesc::klass_offset_in_bytes(), src_oop); 4133 } 4134 } 4135 4136 void MacroAssembler::load_prototype_header(Register Rheader, Register Rsrc_oop) { 4137 assert_different_registers(Rheader, Rsrc_oop); 4138 load_klass(Rheader, Rsrc_oop); 4139 z_lg(Rheader, Address(Rheader, Klass::prototype_header_offset())); 4140 } 4141 4142 void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) { 4143 if (UseCompressedClassPointers) { 4144 assert_different_registers(dst_oop, klass, Z_R0); 4145 if (ck == noreg) ck = klass; 4146 encode_klass_not_null(ck, klass); 4147 z_st(ck, Address(dst_oop, oopDesc::klass_offset_in_bytes())); 4148 } else { 4149 z_stg(klass, Address(dst_oop, oopDesc::klass_offset_in_bytes())); 4150 } 4151 } 4152 4153 void MacroAssembler::store_klass_gap(Register s, Register d) { 4154 if (UseCompressedClassPointers) { 4155 assert(s != d, "not enough registers"); 4156 // Support s = noreg. 4157 if (s != noreg) { 4158 z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes())); 4159 } else { 4160 z_mvhi(Address(d, oopDesc::klass_gap_offset_in_bytes()), 0); 4161 } 4162 } 4163 } 4164 4165 // Compare klass ptr in memory against klass ptr in register. 4166 // 4167 // Rop1 - klass in register, always uncompressed. 4168 // disp - Offset of klass in memory, compressed/uncompressed, depending on runtime flag. 4169 // Rbase - Base address of cKlass in memory. 4170 // maybeNULL - True if Rop1 possibly is a NULL. 4171 void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL) { 4172 4173 BLOCK_COMMENT("compare klass ptr {"); 4174 4175 if (UseCompressedClassPointers) { 4176 const int shift = Universe::narrow_klass_shift(); 4177 address base = Universe::narrow_klass_base(); 4178 4179 assert((shift == 0) || (shift == LogKlassAlignmentInBytes), "cKlass encoder detected bad shift"); 4180 assert_different_registers(Rop1, Z_R0); 4181 assert_different_registers(Rop1, Rbase, Z_R1); 4182 4183 // First encode register oop and then compare with cOop in memory. 4184 // This sequence saves an unnecessary cOop load and decode. 4185 if (base == NULL) { 4186 if (shift == 0) { 4187 z_cl(Rop1, disp, Rbase); // Unscaled 4188 } else { 4189 z_srlg(Z_R0, Rop1, shift); // ZeroBased 4190 z_cl(Z_R0, disp, Rbase); 4191 } 4192 } else { // HeapBased 4193 #ifdef ASSERT 4194 bool used_R0 = true; 4195 bool used_R1 = true; 4196 #endif 4197 Register current = Rop1; 4198 Label done; 4199 4200 if (maybeNULL) { // NULL ptr must be preserved! 4201 z_ltgr(Z_R0, current); 4202 z_bre(done); 4203 current = Z_R0; 4204 } 4205 4206 unsigned int base_h = ((unsigned long)base)>>32; 4207 unsigned int base_l = (unsigned int)((unsigned long)base); 4208 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 4209 lgr_if_needed(Z_R0, current); 4210 z_aih(Z_R0, -((int)base_h)); // Base has no set bits in lower half. 4211 } else if ((base_h == 0) && (base_l != 0)) { 4212 lgr_if_needed(Z_R0, current); 4213 z_agfi(Z_R0, -(int)base_l); 4214 } else { 4215 int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base)); 4216 add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); // Subtract base by adding complement. 4217 } 4218 4219 if (shift != 0) { 4220 z_srlg(Z_R0, Z_R0, shift); 4221 } 4222 bind(done); 4223 z_cl(Z_R0, disp, Rbase); 4224 #ifdef ASSERT 4225 if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2); 4226 if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2); 4227 #endif 4228 } 4229 } else { 4230 z_clg(Rop1, disp, Z_R0, Rbase); 4231 } 4232 BLOCK_COMMENT("} compare klass ptr"); 4233 } 4234 4235 //--------------------------- 4236 // Compressed oops 4237 //--------------------------- 4238 4239 void MacroAssembler::encode_heap_oop(Register oop) { 4240 oop_encoder(oop, oop, true /*maybe null*/); 4241 } 4242 4243 void MacroAssembler::encode_heap_oop_not_null(Register oop) { 4244 oop_encoder(oop, oop, false /*not null*/); 4245 } 4246 4247 // Called with something derived from the oop base. e.g. oop_base>>3. 4248 int MacroAssembler::get_oop_base_pow2_offset(uint64_t oop_base) { 4249 unsigned int oop_base_ll = ((unsigned int)(oop_base >> 0)) & 0xffff; 4250 unsigned int oop_base_lh = ((unsigned int)(oop_base >> 16)) & 0xffff; 4251 unsigned int oop_base_hl = ((unsigned int)(oop_base >> 32)) & 0xffff; 4252 unsigned int oop_base_hh = ((unsigned int)(oop_base >> 48)) & 0xffff; 4253 unsigned int n_notzero_parts = (oop_base_ll == 0 ? 0:1) 4254 + (oop_base_lh == 0 ? 0:1) 4255 + (oop_base_hl == 0 ? 0:1) 4256 + (oop_base_hh == 0 ? 0:1); 4257 4258 assert(oop_base != 0, "This is for HeapBased cOops only"); 4259 4260 if (n_notzero_parts != 1) { // Check if oop_base is just a few pages shy of a power of 2. 4261 uint64_t pow2_offset = 0x10000 - oop_base_ll; 4262 if (pow2_offset < 0x8000) { // This might not be necessary. 4263 uint64_t oop_base2 = oop_base + pow2_offset; 4264 4265 oop_base_ll = ((unsigned int)(oop_base2 >> 0)) & 0xffff; 4266 oop_base_lh = ((unsigned int)(oop_base2 >> 16)) & 0xffff; 4267 oop_base_hl = ((unsigned int)(oop_base2 >> 32)) & 0xffff; 4268 oop_base_hh = ((unsigned int)(oop_base2 >> 48)) & 0xffff; 4269 n_notzero_parts = (oop_base_ll == 0 ? 0:1) + 4270 (oop_base_lh == 0 ? 0:1) + 4271 (oop_base_hl == 0 ? 0:1) + 4272 (oop_base_hh == 0 ? 0:1); 4273 if (n_notzero_parts == 1) { 4274 assert(-(int64_t)pow2_offset != (int64_t)-1, "We use -1 to signal uninitialized base register"); 4275 return -pow2_offset; 4276 } 4277 } 4278 } 4279 return 0; 4280 } 4281 4282 // If base address is offset from a straight power of two by just a few pages, 4283 // return this offset to the caller for a possible later composite add. 4284 // TODO/FIX: will only work correctly for 4k pages. 4285 int MacroAssembler::get_oop_base(Register Rbase, uint64_t oop_base) { 4286 int pow2_offset = get_oop_base_pow2_offset(oop_base); 4287 4288 load_const_optimized(Rbase, oop_base - pow2_offset); // Best job possible. 4289 4290 return pow2_offset; 4291 } 4292 4293 int MacroAssembler::get_oop_base_complement(Register Rbase, uint64_t oop_base) { 4294 int offset = get_oop_base(Rbase, oop_base); 4295 z_lcgr(Rbase, Rbase); 4296 return -offset; 4297 } 4298 4299 // Compare compressed oop in memory against oop in register. 4300 // Rop1 - Oop in register. 4301 // disp - Offset of cOop in memory. 4302 // Rbase - Base address of cOop in memory. 4303 // maybeNULL - True if Rop1 possibly is a NULL. 4304 // maybeNULLtarget - Branch target for Rop1 == NULL, if flow control shall NOT continue with compare instruction. 4305 void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL) { 4306 Register Rbase = mem.baseOrR0(); 4307 Register Rindex = mem.indexOrR0(); 4308 int64_t disp = mem.disp(); 4309 4310 const int shift = Universe::narrow_oop_shift(); 4311 address base = Universe::narrow_oop_base(); 4312 4313 assert(UseCompressedOops, "must be on to call this method"); 4314 assert(Universe::heap() != NULL, "java heap must be initialized to call this method"); 4315 assert((shift == 0) || (shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift"); 4316 assert_different_registers(Rop1, Z_R0); 4317 assert_different_registers(Rop1, Rbase, Z_R1); 4318 assert_different_registers(Rop1, Rindex, Z_R1); 4319 4320 BLOCK_COMMENT("compare heap oop {"); 4321 4322 // First encode register oop and then compare with cOop in memory. 4323 // This sequence saves an unnecessary cOop load and decode. 4324 if (base == NULL) { 4325 if (shift == 0) { 4326 z_cl(Rop1, disp, Rindex, Rbase); // Unscaled 4327 } else { 4328 z_srlg(Z_R0, Rop1, shift); // ZeroBased 4329 z_cl(Z_R0, disp, Rindex, Rbase); 4330 } 4331 } else { // HeapBased 4332 #ifdef ASSERT 4333 bool used_R0 = true; 4334 bool used_R1 = true; 4335 #endif 4336 Label done; 4337 int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base)); 4338 4339 if (maybeNULL) { // NULL ptr must be preserved! 4340 z_ltgr(Z_R0, Rop1); 4341 z_bre(done); 4342 } 4343 4344 add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); 4345 z_srlg(Z_R0, Z_R0, shift); 4346 4347 bind(done); 4348 z_cl(Z_R0, disp, Rindex, Rbase); 4349 #ifdef ASSERT 4350 if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2); 4351 if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2); 4352 #endif 4353 } 4354 BLOCK_COMMENT("} compare heap oop"); 4355 } 4356 4357 // Load heap oop and decompress, if necessary. 4358 void MacroAssembler::load_heap_oop(Register dest, const Address &a) { 4359 if (UseCompressedOops) { 4360 z_llgf(dest, a.disp(), a.indexOrR0(), a.baseOrR0()); 4361 oop_decoder(dest, dest, true); 4362 } else { 4363 z_lg(dest, a.disp(), a.indexOrR0(), a.baseOrR0()); 4364 } 4365 } 4366 4367 // Load heap oop and decompress, if necessary. 4368 void MacroAssembler::load_heap_oop(Register dest, int64_t disp, Register base) { 4369 if (UseCompressedOops) { 4370 z_llgf(dest, disp, base); 4371 oop_decoder(dest, dest, true); 4372 } else { 4373 z_lg(dest, disp, base); 4374 } 4375 } 4376 4377 // Load heap oop and decompress, if necessary. 4378 void MacroAssembler::load_heap_oop_not_null(Register dest, int64_t disp, Register base) { 4379 if (UseCompressedOops) { 4380 z_llgf(dest, disp, base); 4381 oop_decoder(dest, dest, false); 4382 } else { 4383 z_lg(dest, disp, base); 4384 } 4385 } 4386 4387 // Compress, if necessary, and store oop to heap. 4388 void MacroAssembler::store_heap_oop(Register Roop, RegisterOrConstant offset, Register base) { 4389 Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0; 4390 if (UseCompressedOops) { 4391 assert_different_registers(Roop, offset.register_or_noreg(), base); 4392 encode_heap_oop(Roop); 4393 z_st(Roop, offset.constant_or_zero(), Ridx, base); 4394 } else { 4395 z_stg(Roop, offset.constant_or_zero(), Ridx, base); 4396 } 4397 } 4398 4399 // Compress, if necessary, and store oop to heap. Oop is guaranteed to be not NULL. 4400 void MacroAssembler::store_heap_oop_not_null(Register Roop, RegisterOrConstant offset, Register base) { 4401 Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0; 4402 if (UseCompressedOops) { 4403 assert_different_registers(Roop, offset.register_or_noreg(), base); 4404 encode_heap_oop_not_null(Roop); 4405 z_st(Roop, offset.constant_or_zero(), Ridx, base); 4406 } else { 4407 z_stg(Roop, offset.constant_or_zero(), Ridx, base); 4408 } 4409 } 4410 4411 // Store NULL oop to heap. 4412 void MacroAssembler::store_heap_oop_null(Register zero, RegisterOrConstant offset, Register base) { 4413 Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0; 4414 if (UseCompressedOops) { 4415 z_st(zero, offset.constant_or_zero(), Ridx, base); 4416 } else { 4417 z_stg(zero, offset.constant_or_zero(), Ridx, base); 4418 } 4419 } 4420 4421 //------------------------------------------------- 4422 // Encode compressed oop. Generally usable encoder. 4423 //------------------------------------------------- 4424 // Rsrc - contains regular oop on entry. It remains unchanged. 4425 // Rdst - contains compressed oop on exit. 4426 // Rdst and Rsrc may indicate same register, in which case Rsrc does not remain unchanged. 4427 // 4428 // Rdst must not indicate scratch register Z_R1 (Z_R1_scratch) for functionality. 4429 // Rdst should not indicate scratch register Z_R0 (Z_R0_scratch) for performance. 4430 // 4431 // only32bitValid is set, if later code only uses the lower 32 bits. In this 4432 // case we must not fix the upper 32 bits. 4433 void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL, 4434 Register Rbase, int pow2_offset, bool only32bitValid) { 4435 4436 const address oop_base = Universe::narrow_oop_base(); 4437 const int oop_shift = Universe::narrow_oop_shift(); 4438 const bool disjoint = Universe::narrow_oop_base_disjoint(); 4439 4440 assert(UseCompressedOops, "must be on to call this method"); 4441 assert(Universe::heap() != NULL, "java heap must be initialized to call this encoder"); 4442 assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift"); 4443 4444 if (disjoint || (oop_base == NULL)) { 4445 BLOCK_COMMENT("cOop encoder zeroBase {"); 4446 if (oop_shift == 0) { 4447 if (oop_base != NULL && !only32bitValid) { 4448 z_llgfr(Rdst, Rsrc); // Clear upper bits in case the register will be decoded again. 4449 } else { 4450 lgr_if_needed(Rdst, Rsrc); 4451 } 4452 } else { 4453 z_srlg(Rdst, Rsrc, oop_shift); 4454 if (oop_base != NULL && !only32bitValid) { 4455 z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again. 4456 } 4457 } 4458 BLOCK_COMMENT("} cOop encoder zeroBase"); 4459 return; 4460 } 4461 4462 bool used_R0 = false; 4463 bool used_R1 = false; 4464 4465 BLOCK_COMMENT("cOop encoder general {"); 4466 assert_different_registers(Rdst, Z_R1); 4467 assert_different_registers(Rsrc, Rbase); 4468 if (maybeNULL) { 4469 Label done; 4470 // We reorder shifting and subtracting, so that we can compare 4471 // and shift in parallel: 4472 // 4473 // cycle 0: potential LoadN, base = <const> 4474 // cycle 1: base = !base dst = src >> 3, cmp cr = (src != 0) 4475 // cycle 2: if (cr) br, dst = dst + base + offset 4476 4477 // Get oop_base components. 4478 if (pow2_offset == -1) { 4479 if (Rdst == Rbase) { 4480 if (Rdst == Z_R1 || Rsrc == Z_R1) { 4481 Rbase = Z_R0; 4482 used_R0 = true; 4483 } else { 4484 Rdst = Z_R1; 4485 used_R1 = true; 4486 } 4487 } 4488 if (Rbase == Z_R1) { 4489 used_R1 = true; 4490 } 4491 pow2_offset = get_oop_base_complement(Rbase, ((uint64_t)(intptr_t)oop_base) >> oop_shift); 4492 } 4493 assert_different_registers(Rdst, Rbase); 4494 4495 // Check for NULL oop (must be left alone) and shift. 4496 if (oop_shift != 0) { // Shift out alignment bits 4497 if (((intptr_t)oop_base&0xc000000000000000L) == 0L) { // We are sure: no single address will have the leftmost bit set. 4498 z_srag(Rdst, Rsrc, oop_shift); // Arithmetic shift sets the condition code. 4499 } else { 4500 z_srlg(Rdst, Rsrc, oop_shift); 4501 z_ltgr(Rsrc, Rsrc); // This is the recommended way of testing for zero. 4502 // This probably is faster, as it does not write a register. No! 4503 // z_cghi(Rsrc, 0); 4504 } 4505 } else { 4506 z_ltgr(Rdst, Rsrc); // Move NULL to result register. 4507 } 4508 z_bre(done); 4509 4510 // Subtract oop_base components. 4511 if ((Rdst == Z_R0) || (Rbase == Z_R0)) { 4512 z_algr(Rdst, Rbase); 4513 if (pow2_offset != 0) { add2reg(Rdst, pow2_offset); } 4514 } else { 4515 add2reg_with_index(Rdst, pow2_offset, Rbase, Rdst); 4516 } 4517 if (!only32bitValid) { 4518 z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again. 4519 } 4520 bind(done); 4521 4522 } else { // not null 4523 // Get oop_base components. 4524 if (pow2_offset == -1) { 4525 pow2_offset = get_oop_base_complement(Rbase, (uint64_t)(intptr_t)oop_base); 4526 } 4527 4528 // Subtract oop_base components and shift. 4529 if (Rdst == Z_R0 || Rsrc == Z_R0 || Rbase == Z_R0) { 4530 // Don't use lay instruction. 4531 if (Rdst == Rsrc) { 4532 z_algr(Rdst, Rbase); 4533 } else { 4534 lgr_if_needed(Rdst, Rbase); 4535 z_algr(Rdst, Rsrc); 4536 } 4537 if (pow2_offset != 0) add2reg(Rdst, pow2_offset); 4538 } else { 4539 add2reg_with_index(Rdst, pow2_offset, Rbase, Rsrc); 4540 } 4541 if (oop_shift != 0) { // Shift out alignment bits. 4542 z_srlg(Rdst, Rdst, oop_shift); 4543 } 4544 if (!only32bitValid) { 4545 z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again. 4546 } 4547 } 4548 #ifdef ASSERT 4549 if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb01bUL, 2); } 4550 if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb02bUL, 2); } 4551 #endif 4552 BLOCK_COMMENT("} cOop encoder general"); 4553 } 4554 4555 //------------------------------------------------- 4556 // decode compressed oop. Generally usable decoder. 4557 //------------------------------------------------- 4558 // Rsrc - contains compressed oop on entry. 4559 // Rdst - contains regular oop on exit. 4560 // Rdst and Rsrc may indicate same register. 4561 // Rdst must not be the same register as Rbase, if Rbase was preloaded (before call). 4562 // Rdst can be the same register as Rbase. Then, either Z_R0 or Z_R1 must be available as scratch. 4563 // Rbase - register to use for the base 4564 // pow2_offset - offset of base to nice value. If -1, base must be loaded. 4565 // For performance, it is good to 4566 // - avoid Z_R0 for any of the argument registers. 4567 // - keep Rdst and Rsrc distinct from Rbase. Rdst == Rsrc is ok for performance. 4568 // - avoid Z_R1 for Rdst if Rdst == Rbase. 4569 void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, Register Rbase, int pow2_offset) { 4570 4571 const address oop_base = Universe::narrow_oop_base(); 4572 const int oop_shift = Universe::narrow_oop_shift(); 4573 const bool disjoint = Universe::narrow_oop_base_disjoint(); 4574 4575 assert(UseCompressedOops, "must be on to call this method"); 4576 assert(Universe::heap() != NULL, "java heap must be initialized to call this decoder"); 4577 assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), 4578 "cOop encoder detected bad shift"); 4579 4580 // cOops are always loaded zero-extended from memory. No explicit zero-extension necessary. 4581 4582 if (oop_base != NULL) { 4583 unsigned int oop_base_hl = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xffff; 4584 unsigned int oop_base_hh = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 48)) & 0xffff; 4585 unsigned int oop_base_hf = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xFFFFffff; 4586 if (disjoint && (oop_base_hl == 0 || oop_base_hh == 0)) { 4587 BLOCK_COMMENT("cOop decoder disjointBase {"); 4588 // We do not need to load the base. Instead, we can install the upper bits 4589 // with an OR instead of an ADD. 4590 Label done; 4591 4592 // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set. 4593 if (maybeNULL) { // NULL ptr must be preserved! 4594 z_slag(Rdst, Rsrc, oop_shift); // Arithmetic shift sets the condition code. 4595 z_bre(done); 4596 } else { 4597 z_sllg(Rdst, Rsrc, oop_shift); // Logical shift leaves condition code alone. 4598 } 4599 if ((oop_base_hl != 0) && (oop_base_hh != 0)) { 4600 z_oihf(Rdst, oop_base_hf); 4601 } else if (oop_base_hl != 0) { 4602 z_oihl(Rdst, oop_base_hl); 4603 } else { 4604 assert(oop_base_hh != 0, "not heapbased mode"); 4605 z_oihh(Rdst, oop_base_hh); 4606 } 4607 bind(done); 4608 BLOCK_COMMENT("} cOop decoder disjointBase"); 4609 } else { 4610 BLOCK_COMMENT("cOop decoder general {"); 4611 // There are three decode steps: 4612 // scale oop offset (shift left) 4613 // get base (in reg) and pow2_offset (constant) 4614 // add base, pow2_offset, and oop offset 4615 // The following register overlap situations may exist: 4616 // Rdst == Rsrc, Rbase any other 4617 // not a problem. Scaling in-place leaves Rbase undisturbed. 4618 // Loading Rbase does not impact the scaled offset. 4619 // Rdst == Rbase, Rsrc any other 4620 // scaling would destroy a possibly preloaded Rbase. Loading Rbase 4621 // would destroy the scaled offset. 4622 // Remedy: use Rdst_tmp if Rbase has been preloaded. 4623 // use Rbase_tmp if base has to be loaded. 4624 // Rsrc == Rbase, Rdst any other 4625 // Only possible without preloaded Rbase. 4626 // Loading Rbase does not destroy compressed oop because it was scaled into Rdst before. 4627 // Rsrc == Rbase, Rdst == Rbase 4628 // Only possible without preloaded Rbase. 4629 // Loading Rbase would destroy compressed oop. Scaling in-place is ok. 4630 // Remedy: use Rbase_tmp. 4631 // 4632 Label done; 4633 Register Rdst_tmp = Rdst; 4634 Register Rbase_tmp = Rbase; 4635 bool used_R0 = false; 4636 bool used_R1 = false; 4637 bool base_preloaded = pow2_offset >= 0; 4638 guarantee(!(base_preloaded && (Rsrc == Rbase)), "Register clash, check caller"); 4639 assert(oop_shift != 0, "room for optimization"); 4640 4641 // Check if we need to use scratch registers. 4642 if (Rdst == Rbase) { 4643 assert(!(((Rdst == Z_R0) && (Rsrc == Z_R1)) || ((Rdst == Z_R1) && (Rsrc == Z_R0))), "need a scratch reg"); 4644 if (Rdst != Rsrc) { 4645 if (base_preloaded) { Rdst_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; } 4646 else { Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; } 4647 } else { 4648 Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; 4649 } 4650 } 4651 if (base_preloaded) lgr_if_needed(Rbase_tmp, Rbase); 4652 4653 // Scale oop and check for NULL. 4654 // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set. 4655 if (maybeNULL) { // NULL ptr must be preserved! 4656 z_slag(Rdst_tmp, Rsrc, oop_shift); // Arithmetic shift sets the condition code. 4657 z_bre(done); 4658 } else { 4659 z_sllg(Rdst_tmp, Rsrc, oop_shift); // Logical shift leaves condition code alone. 4660 } 4661 4662 // Get oop_base components. 4663 if (!base_preloaded) { 4664 pow2_offset = get_oop_base(Rbase_tmp, (uint64_t)(intptr_t)oop_base); 4665 } 4666 4667 // Add up all components. 4668 if ((Rbase_tmp == Z_R0) || (Rdst_tmp == Z_R0)) { 4669 z_algr(Rdst_tmp, Rbase_tmp); 4670 if (pow2_offset != 0) { add2reg(Rdst_tmp, pow2_offset); } 4671 } else { 4672 add2reg_with_index(Rdst_tmp, pow2_offset, Rbase_tmp, Rdst_tmp); 4673 } 4674 4675 bind(done); 4676 lgr_if_needed(Rdst, Rdst_tmp); 4677 #ifdef ASSERT 4678 if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb03bUL, 2); } 4679 if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb04bUL, 2); } 4680 #endif 4681 BLOCK_COMMENT("} cOop decoder general"); 4682 } 4683 } else { 4684 BLOCK_COMMENT("cOop decoder zeroBase {"); 4685 if (oop_shift == 0) { 4686 lgr_if_needed(Rdst, Rsrc); 4687 } else { 4688 z_sllg(Rdst, Rsrc, oop_shift); 4689 } 4690 BLOCK_COMMENT("} cOop decoder zeroBase"); 4691 } 4692 } 4693 4694 // ((OopHandle)result).resolve(); 4695 void MacroAssembler::resolve_oop_handle(Register result) { 4696 // OopHandle::resolve is an indirection. 4697 z_lg(result, 0, result); 4698 } 4699 4700 void MacroAssembler::load_mirror(Register mirror, Register method) { 4701 mem2reg_opt(mirror, Address(method, Method::const_offset())); 4702 mem2reg_opt(mirror, Address(mirror, ConstMethod::constants_offset())); 4703 mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes())); 4704 mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset())); 4705 resolve_oop_handle(mirror); 4706 } 4707 4708 //--------------------------------------------------------------- 4709 //--- Operations on arrays. 4710 //--------------------------------------------------------------- 4711 4712 // Compiler ensures base is doubleword aligned and cnt is #doublewords. 4713 // Emitter does not KILL cnt and base arguments, since they need to be copied to 4714 // work registers anyway. 4715 // Actually, only r0, r1, and r5 are killed. 4716 unsigned int MacroAssembler::Clear_Array(Register cnt_arg, Register base_pointer_arg, Register src_addr, Register src_len) { 4717 // Src_addr is evenReg. 4718 // Src_len is odd_Reg. 4719 4720 int block_start = offset(); 4721 Register tmp_reg = src_len; // Holds target instr addr for EX. 4722 Register dst_len = Z_R1; // Holds dst len for MVCLE. 4723 Register dst_addr = Z_R0; // Holds dst addr for MVCLE. 4724 4725 Label doXC, doMVCLE, done; 4726 4727 BLOCK_COMMENT("Clear_Array {"); 4728 4729 // Check for zero len and convert to long. 4730 z_ltgfr(src_len, cnt_arg); // Remember casted value for doSTG case. 4731 z_bre(done); // Nothing to do if len == 0. 4732 4733 // Prefetch data to be cleared. 4734 if (VM_Version::has_Prefetch()) { 4735 z_pfd(0x02, 0, Z_R0, base_pointer_arg); 4736 z_pfd(0x02, 256, Z_R0, base_pointer_arg); 4737 } 4738 4739 z_sllg(dst_len, src_len, 3); // #bytes to clear. 4740 z_cghi(src_len, 32); // Check for len <= 256 bytes (<=32 DW). 4741 z_brnh(doXC); // If so, use executed XC to clear. 4742 4743 // MVCLE: initialize long arrays (general case). 4744 bind(doMVCLE); 4745 z_lgr(dst_addr, base_pointer_arg); 4746 clear_reg(src_len, true, false); // Src len of MVCLE is zero. 4747 4748 MacroAssembler::move_long_ext(dst_addr, src_addr, 0); 4749 z_bru(done); 4750 4751 // XC: initialize short arrays. 4752 Label XC_template; // Instr template, never exec directly! 4753 bind(XC_template); 4754 z_xc(0,0,base_pointer_arg,0,base_pointer_arg); 4755 4756 bind(doXC); 4757 add2reg(dst_len, -1); // Get #bytes-1 for EXECUTE. 4758 if (VM_Version::has_ExecuteExtensions()) { 4759 z_exrl(dst_len, XC_template); // Execute XC with var. len. 4760 } else { 4761 z_larl(tmp_reg, XC_template); 4762 z_ex(dst_len,0,Z_R0,tmp_reg); // Execute XC with var. len. 4763 } 4764 // z_bru(done); // fallthru 4765 4766 bind(done); 4767 4768 BLOCK_COMMENT("} Clear_Array"); 4769 4770 int block_end = offset(); 4771 return block_end - block_start; 4772 } 4773 4774 // Compiler ensures base is doubleword aligned and cnt is count of doublewords. 4775 // Emitter does not KILL any arguments nor work registers. 4776 // Emitter generates up to 16 XC instructions, depending on the array length. 4777 unsigned int MacroAssembler::Clear_Array_Const(long cnt, Register base) { 4778 int block_start = offset(); 4779 int off; 4780 int lineSize_Bytes = AllocatePrefetchStepSize; 4781 int lineSize_DW = AllocatePrefetchStepSize>>LogBytesPerWord; 4782 bool doPrefetch = VM_Version::has_Prefetch(); 4783 int XC_maxlen = 256; 4784 int numXCInstr = cnt > 0 ? (cnt*BytesPerWord-1)/XC_maxlen+1 : 0; 4785 4786 BLOCK_COMMENT("Clear_Array_Const {"); 4787 assert(cnt*BytesPerWord <= 4096, "ClearArrayConst can handle 4k only"); 4788 4789 // Do less prefetching for very short arrays. 4790 if (numXCInstr > 0) { 4791 // Prefetch only some cache lines, then begin clearing. 4792 if (doPrefetch) { 4793 if (cnt*BytesPerWord <= lineSize_Bytes/4) { // If less than 1/4 of a cache line to clear, 4794 z_pfd(0x02, 0, Z_R0, base); // prefetch just the first cache line. 4795 } else { 4796 assert(XC_maxlen == lineSize_Bytes, "ClearArrayConst needs 256B cache lines"); 4797 for (off = 0; (off < AllocatePrefetchLines) && (off <= numXCInstr); off ++) { 4798 z_pfd(0x02, off*lineSize_Bytes, Z_R0, base); 4799 } 4800 } 4801 } 4802 4803 for (off=0; off<(numXCInstr-1); off++) { 4804 z_xc(off*XC_maxlen, XC_maxlen-1, base, off*XC_maxlen, base); 4805 4806 // Prefetch some cache lines in advance. 4807 if (doPrefetch && (off <= numXCInstr-AllocatePrefetchLines)) { 4808 z_pfd(0x02, (off+AllocatePrefetchLines)*lineSize_Bytes, Z_R0, base); 4809 } 4810 } 4811 if (off*XC_maxlen < cnt*BytesPerWord) { 4812 z_xc(off*XC_maxlen, (cnt*BytesPerWord-off*XC_maxlen)-1, base, off*XC_maxlen, base); 4813 } 4814 } 4815 BLOCK_COMMENT("} Clear_Array_Const"); 4816 4817 int block_end = offset(); 4818 return block_end - block_start; 4819 } 4820 4821 // Compiler ensures base is doubleword aligned and cnt is #doublewords. 4822 // Emitter does not KILL cnt and base arguments, since they need to be copied to 4823 // work registers anyway. 4824 // Actually, only r0, r1, r4, and r5 (which are work registers) are killed. 4825 // 4826 // For very large arrays, exploit MVCLE H/W support. 4827 // MVCLE instruction automatically exploits H/W-optimized page mover. 4828 // - Bytes up to next page boundary are cleared with a series of XC to self. 4829 // - All full pages are cleared with the page mover H/W assist. 4830 // - Remaining bytes are again cleared by a series of XC to self. 4831 // 4832 unsigned int MacroAssembler::Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register src_addr, Register src_len) { 4833 // Src_addr is evenReg. 4834 // Src_len is odd_Reg. 4835 4836 int block_start = offset(); 4837 Register dst_len = Z_R1; // Holds dst len for MVCLE. 4838 Register dst_addr = Z_R0; // Holds dst addr for MVCLE. 4839 4840 BLOCK_COMMENT("Clear_Array_Const_Big {"); 4841 4842 // Get len to clear. 4843 load_const_optimized(dst_len, (long)cnt*8L); // in Bytes = #DW*8 4844 4845 // Prepare other args to MVCLE. 4846 z_lgr(dst_addr, base_pointer_arg); 4847 // Indicate unused result. 4848 (void) clear_reg(src_len, true, false); // Src len of MVCLE is zero. 4849 4850 // Clear. 4851 MacroAssembler::move_long_ext(dst_addr, src_addr, 0); 4852 BLOCK_COMMENT("} Clear_Array_Const_Big"); 4853 4854 int block_end = offset(); 4855 return block_end - block_start; 4856 } 4857 4858 // Allocator. 4859 unsigned int MacroAssembler::CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg, 4860 Register cnt_reg, 4861 Register tmp1_reg, Register tmp2_reg) { 4862 // Tmp1 is oddReg. 4863 // Tmp2 is evenReg. 4864 4865 int block_start = offset(); 4866 Label doMVC, doMVCLE, done, MVC_template; 4867 4868 BLOCK_COMMENT("CopyRawMemory_AlignedDisjoint {"); 4869 4870 // Check for zero len and convert to long. 4871 z_ltgfr(cnt_reg, cnt_reg); // Remember casted value for doSTG case. 4872 z_bre(done); // Nothing to do if len == 0. 4873 4874 z_sllg(Z_R1, cnt_reg, 3); // Dst len in bytes. calc early to have the result ready. 4875 4876 z_cghi(cnt_reg, 32); // Check for len <= 256 bytes (<=32 DW). 4877 z_brnh(doMVC); // If so, use executed MVC to clear. 4878 4879 bind(doMVCLE); // A lot of data (more than 256 bytes). 4880 // Prep dest reg pair. 4881 z_lgr(Z_R0, dst_reg); // dst addr 4882 // Dst len already in Z_R1. 4883 // Prep src reg pair. 4884 z_lgr(tmp2_reg, src_reg); // src addr 4885 z_lgr(tmp1_reg, Z_R1); // Src len same as dst len. 4886 4887 // Do the copy. 4888 move_long_ext(Z_R0, tmp2_reg, 0xb0); // Bypass cache. 4889 z_bru(done); // All done. 4890 4891 bind(MVC_template); // Just some data (not more than 256 bytes). 4892 z_mvc(0, 0, dst_reg, 0, src_reg); 4893 4894 bind(doMVC); 4895 4896 if (VM_Version::has_ExecuteExtensions()) { 4897 add2reg(Z_R1, -1); 4898 } else { 4899 add2reg(tmp1_reg, -1, Z_R1); 4900 z_larl(Z_R1, MVC_template); 4901 } 4902 4903 if (VM_Version::has_Prefetch()) { 4904 z_pfd(1, 0,Z_R0,src_reg); 4905 z_pfd(2, 0,Z_R0,dst_reg); 4906 // z_pfd(1,256,Z_R0,src_reg); // Assume very short copy. 4907 // z_pfd(2,256,Z_R0,dst_reg); 4908 } 4909 4910 if (VM_Version::has_ExecuteExtensions()) { 4911 z_exrl(Z_R1, MVC_template); 4912 } else { 4913 z_ex(tmp1_reg, 0, Z_R0, Z_R1); 4914 } 4915 4916 bind(done); 4917 4918 BLOCK_COMMENT("} CopyRawMemory_AlignedDisjoint"); 4919 4920 int block_end = offset(); 4921 return block_end - block_start; 4922 } 4923 4924 //------------------------------------------------------ 4925 // Special String Intrinsics. Implementation 4926 //------------------------------------------------------ 4927 4928 // Intrinsics for CompactStrings 4929 4930 // Compress char[] to byte[]. 4931 // Restores: src, dst 4932 // Uses: cnt 4933 // Kills: tmp, Z_R0, Z_R1. 4934 // Early clobber: result. 4935 // Note: 4936 // cnt is signed int. Do not rely on high word! 4937 // counts # characters, not bytes. 4938 // The result is the number of characters copied before the first incompatible character was found. 4939 // If precise is true, the processing stops exactly at this point. Otherwise, the result may be off 4940 // by a few bytes. The result always indicates the number of copied characters. 4941 // 4942 // Note: Does not behave exactly like package private StringUTF16 compress java implementation in case of failure: 4943 // - Different number of characters may have been written to dead array (if precise is false). 4944 // - Returns a number <cnt instead of 0. (Result gets compared with cnt.) 4945 unsigned int MacroAssembler::string_compress(Register result, Register src, Register dst, Register cnt, 4946 Register tmp, bool precise) { 4947 assert_different_registers(Z_R0, Z_R1, src, dst, cnt, tmp); 4948 4949 if (precise) { 4950 BLOCK_COMMENT("encode_iso_array {"); 4951 } else { 4952 BLOCK_COMMENT("string_compress {"); 4953 } 4954 int block_start = offset(); 4955 4956 Register Rsrc = src; 4957 Register Rdst = dst; 4958 Register Rix = tmp; 4959 Register Rcnt = cnt; 4960 Register Rmask = result; // holds incompatibility check mask until result value is stored. 4961 Label ScalarShortcut, AllDone; 4962 4963 z_iilf(Rmask, 0xFF00FF00); 4964 z_iihf(Rmask, 0xFF00FF00); 4965 4966 #if 0 // Sacrifice shortcuts for code compactness 4967 { 4968 //---< shortcuts for short strings (very frequent) >--- 4969 // Strings with 4 and 8 characters were fond to occur very frequently. 4970 // Therefore, we handle them right away with minimal overhead. 4971 Label skipShortcut, skip4Shortcut, skip8Shortcut; 4972 Register Rout = Z_R0; 4973 z_chi(Rcnt, 4); 4974 z_brne(skip4Shortcut); // 4 characters are very frequent 4975 z_lg(Z_R0, 0, Rsrc); // Treat exactly 4 characters specially. 4976 if (VM_Version::has_DistinctOpnds()) { 4977 Rout = Z_R0; 4978 z_ngrk(Rix, Z_R0, Rmask); 4979 } else { 4980 Rout = Rix; 4981 z_lgr(Rix, Z_R0); 4982 z_ngr(Z_R0, Rmask); 4983 } 4984 z_brnz(skipShortcut); 4985 z_stcmh(Rout, 5, 0, Rdst); 4986 z_stcm(Rout, 5, 2, Rdst); 4987 z_lgfr(result, Rcnt); 4988 z_bru(AllDone); 4989 bind(skip4Shortcut); 4990 4991 z_chi(Rcnt, 8); 4992 z_brne(skip8Shortcut); // There's more to do... 4993 z_lmg(Z_R0, Z_R1, 0, Rsrc); // Treat exactly 8 characters specially. 4994 if (VM_Version::has_DistinctOpnds()) { 4995 Rout = Z_R0; 4996 z_ogrk(Rix, Z_R0, Z_R1); 4997 z_ngr(Rix, Rmask); 4998 } else { 4999 Rout = Rix; 5000 z_lgr(Rix, Z_R0); 5001 z_ogr(Z_R0, Z_R1); 5002 z_ngr(Z_R0, Rmask); 5003 } 5004 z_brnz(skipShortcut); 5005 z_stcmh(Rout, 5, 0, Rdst); 5006 z_stcm(Rout, 5, 2, Rdst); 5007 z_stcmh(Z_R1, 5, 4, Rdst); 5008 z_stcm(Z_R1, 5, 6, Rdst); 5009 z_lgfr(result, Rcnt); 5010 z_bru(AllDone); 5011 5012 bind(skip8Shortcut); 5013 clear_reg(Z_R0, true, false); // #characters already processed (none). Precond for scalar loop. 5014 z_brl(ScalarShortcut); // Just a few characters 5015 5016 bind(skipShortcut); 5017 } 5018 #endif 5019 clear_reg(Z_R0); // make sure register is properly initialized. 5020 5021 if (VM_Version::has_VectorFacility()) { 5022 const int min_vcnt = 32; // Minimum #characters required to use vector instructions. 5023 // Otherwise just do nothing in vector mode. 5024 // Must be multiple of 2*(vector register length in chars (8 HW = 128 bits)). 5025 const int log_min_vcnt = exact_log2(min_vcnt); 5026 Label VectorLoop, VectorDone, VectorBreak; 5027 5028 VectorRegister Vtmp1 = Z_V16; 5029 VectorRegister Vtmp2 = Z_V17; 5030 VectorRegister Vmask = Z_V18; 5031 VectorRegister Vzero = Z_V19; 5032 VectorRegister Vsrc_first = Z_V20; 5033 VectorRegister Vsrc_last = Z_V23; 5034 5035 assert((Vsrc_last->encoding() - Vsrc_first->encoding() + 1) == min_vcnt/8, "logic error"); 5036 assert(VM_Version::has_DistinctOpnds(), "Assumption when has_VectorFacility()"); 5037 z_srak(Rix, Rcnt, log_min_vcnt); // # vector loop iterations 5038 z_brz(VectorDone); // not enough data for vector loop 5039 5040 z_vzero(Vzero); // all zeroes 5041 z_vgmh(Vmask, 0, 7); // generate 0xff00 mask for all 2-byte elements 5042 z_sllg(Z_R0, Rix, log_min_vcnt); // remember #chars that will be processed by vector loop 5043 5044 bind(VectorLoop); 5045 z_vlm(Vsrc_first, Vsrc_last, 0, Rsrc); 5046 add2reg(Rsrc, min_vcnt*2); 5047 5048 //---< check for incompatible character >--- 5049 z_vo(Vtmp1, Z_V20, Z_V21); 5050 z_vo(Vtmp2, Z_V22, Z_V23); 5051 z_vo(Vtmp1, Vtmp1, Vtmp2); 5052 z_vn(Vtmp1, Vtmp1, Vmask); 5053 z_vceqhs(Vtmp1, Vtmp1, Vzero); // high half of all chars must be zero for successful compress. 5054 z_brne(VectorBreak); // break vector loop, incompatible character found. 5055 // re-process data from current iteration in break handler. 5056 5057 //---< pack & store characters >--- 5058 z_vpkh(Vtmp1, Z_V20, Z_V21); // pack (src1, src2) -> tmp1 5059 z_vpkh(Vtmp2, Z_V22, Z_V23); // pack (src3, src4) -> tmp2 5060 z_vstm(Vtmp1, Vtmp2, 0, Rdst); // store packed string 5061 add2reg(Rdst, min_vcnt); 5062 5063 z_brct(Rix, VectorLoop); 5064 5065 z_bru(VectorDone); 5066 5067 bind(VectorBreak); 5068 add2reg(Rsrc, -min_vcnt*2); // Fix Rsrc. Rsrc was already updated, but Rdst and Rix are not. 5069 z_sll(Rix, log_min_vcnt); // # chars processed so far in VectorLoop, excl. current iteration. 5070 z_sr(Z_R0, Rix); // correct # chars processed in total. 5071 5072 bind(VectorDone); 5073 } 5074 5075 { 5076 const int min_cnt = 8; // Minimum #characters required to use unrolled loop. 5077 // Otherwise just do nothing in unrolled loop. 5078 // Must be multiple of 8. 5079 const int log_min_cnt = exact_log2(min_cnt); 5080 Label UnrolledLoop, UnrolledDone, UnrolledBreak; 5081 5082 if (VM_Version::has_DistinctOpnds()) { 5083 z_srk(Rix, Rcnt, Z_R0); // remaining # chars to compress in unrolled loop 5084 } else { 5085 z_lr(Rix, Rcnt); 5086 z_sr(Rix, Z_R0); 5087 } 5088 z_sra(Rix, log_min_cnt); // unrolled loop count 5089 z_brz(UnrolledDone); 5090 5091 bind(UnrolledLoop); 5092 z_lmg(Z_R0, Z_R1, 0, Rsrc); 5093 if (precise) { 5094 z_ogr(Z_R1, Z_R0); // check all 8 chars for incompatibility 5095 z_ngr(Z_R1, Rmask); 5096 z_brnz(UnrolledBreak); 5097 5098 z_lg(Z_R1, 8, Rsrc); // reload destroyed register 5099 z_stcmh(Z_R0, 5, 0, Rdst); 5100 z_stcm(Z_R0, 5, 2, Rdst); 5101 } else { 5102 z_stcmh(Z_R0, 5, 0, Rdst); 5103 z_stcm(Z_R0, 5, 2, Rdst); 5104 5105 z_ogr(Z_R0, Z_R1); 5106 z_ngr(Z_R0, Rmask); 5107 z_brnz(UnrolledBreak); 5108 } 5109 z_stcmh(Z_R1, 5, 4, Rdst); 5110 z_stcm(Z_R1, 5, 6, Rdst); 5111 5112 add2reg(Rsrc, min_cnt*2); 5113 add2reg(Rdst, min_cnt); 5114 z_brct(Rix, UnrolledLoop); 5115 5116 z_lgfr(Z_R0, Rcnt); // # chars processed in total after unrolled loop. 5117 z_nilf(Z_R0, ~(min_cnt-1)); 5118 z_tmll(Rcnt, min_cnt-1); 5119 z_brnaz(ScalarShortcut); // if all bits zero, there is nothing left to do for scalar loop. 5120 // Rix == 0 in all cases. 5121 z_lgfr(result, Rcnt); // all characters processed. 5122 z_sgfr(Rdst, Rcnt); // restore ptr 5123 z_sgfr(Rsrc, Rcnt); // restore ptr, double the element count for Rsrc restore 5124 z_sgfr(Rsrc, Rcnt); 5125 z_bru(AllDone); 5126 5127 bind(UnrolledBreak); 5128 z_lgfr(Z_R0, Rcnt); // # chars processed in total after unrolled loop 5129 z_nilf(Z_R0, ~(min_cnt-1)); 5130 z_sll(Rix, log_min_cnt); // # chars processed so far in UnrolledLoop, excl. current iteration. 5131 z_sr(Z_R0, Rix); // correct # chars processed in total. 5132 if (!precise) { 5133 z_lgfr(result, Z_R0); 5134 z_aghi(result, min_cnt/2); // min_cnt/2 characters have already been written 5135 // but ptrs were not updated yet. 5136 z_sgfr(Rdst, Z_R0); // restore ptr 5137 z_sgfr(Rsrc, Z_R0); // restore ptr, double the element count for Rsrc restore 5138 z_sgfr(Rsrc, Z_R0); 5139 z_bru(AllDone); 5140 } 5141 bind(UnrolledDone); 5142 } 5143 5144 { 5145 Label ScalarLoop, ScalarDone, ScalarBreak; 5146 5147 bind(ScalarShortcut); 5148 z_ltgfr(result, Rcnt); 5149 z_brz(AllDone); 5150 5151 #if 0 // Sacrifice shortcuts for code compactness 5152 { 5153 //---< Special treatment for very short strings (one or two characters) >--- 5154 // For these strings, we are sure that the above code was skipped. 5155 // Thus, no registers were modified, register restore is not required. 5156 Label ScalarDoit, Scalar2Char; 5157 z_chi(Rcnt, 2); 5158 z_brh(ScalarDoit); 5159 z_llh(Z_R1, 0, Z_R0, Rsrc); 5160 z_bre(Scalar2Char); 5161 z_tmll(Z_R1, 0xff00); 5162 z_lghi(result, 0); // cnt == 1, first char invalid, no chars successfully processed 5163 z_brnaz(AllDone); 5164 z_stc(Z_R1, 0, Z_R0, Rdst); 5165 z_lghi(result, 1); 5166 z_bru(AllDone); 5167 5168 bind(Scalar2Char); 5169 z_llh(Z_R0, 2, Z_R0, Rsrc); 5170 z_tmll(Z_R1, 0xff00); 5171 z_lghi(result, 0); // cnt == 2, first char invalid, no chars successfully processed 5172 z_brnaz(AllDone); 5173 z_stc(Z_R1, 0, Z_R0, Rdst); 5174 z_tmll(Z_R0, 0xff00); 5175 z_lghi(result, 1); // cnt == 2, second char invalid, one char successfully processed 5176 z_brnaz(AllDone); 5177 z_stc(Z_R0, 1, Z_R0, Rdst); 5178 z_lghi(result, 2); 5179 z_bru(AllDone); 5180 5181 bind(ScalarDoit); 5182 } 5183 #endif 5184 5185 if (VM_Version::has_DistinctOpnds()) { 5186 z_srk(Rix, Rcnt, Z_R0); // remaining # chars to compress in unrolled loop 5187 } else { 5188 z_lr(Rix, Rcnt); 5189 z_sr(Rix, Z_R0); 5190 } 5191 z_lgfr(result, Rcnt); // # processed characters (if all runs ok). 5192 z_brz(ScalarDone); 5193 5194 bind(ScalarLoop); 5195 z_llh(Z_R1, 0, Z_R0, Rsrc); 5196 z_tmll(Z_R1, 0xff00); 5197 z_brnaz(ScalarBreak); 5198 z_stc(Z_R1, 0, Z_R0, Rdst); 5199 add2reg(Rsrc, 2); 5200 add2reg(Rdst, 1); 5201 z_brct(Rix, ScalarLoop); 5202 5203 z_bru(ScalarDone); 5204 5205 bind(ScalarBreak); 5206 z_sr(result, Rix); 5207 5208 bind(ScalarDone); 5209 z_sgfr(Rdst, result); // restore ptr 5210 z_sgfr(Rsrc, result); // restore ptr, double the element count for Rsrc restore 5211 z_sgfr(Rsrc, result); 5212 } 5213 bind(AllDone); 5214 5215 if (precise) { 5216 BLOCK_COMMENT("} encode_iso_array"); 5217 } else { 5218 BLOCK_COMMENT("} string_compress"); 5219 } 5220 return offset() - block_start; 5221 } 5222 5223 // Inflate byte[] to char[]. 5224 unsigned int MacroAssembler::string_inflate_trot(Register src, Register dst, Register cnt, Register tmp) { 5225 int block_start = offset(); 5226 5227 BLOCK_COMMENT("string_inflate {"); 5228 5229 Register stop_char = Z_R0; 5230 Register table = Z_R1; 5231 Register src_addr = tmp; 5232 5233 assert_different_registers(Z_R0, Z_R1, tmp, src, dst, cnt); 5234 assert(dst->encoding()%2 == 0, "must be even reg"); 5235 assert(cnt->encoding()%2 == 1, "must be odd reg"); 5236 assert(cnt->encoding() - dst->encoding() == 1, "must be even/odd pair"); 5237 5238 StubRoutines::zarch::generate_load_trot_table_addr(this, table); // kills Z_R0 (if ASSERT) 5239 clear_reg(stop_char); // Stop character. Not used here, but initialized to have a defined value. 5240 lgr_if_needed(src_addr, src); 5241 z_llgfr(cnt, cnt); // # src characters, must be a positive simm32. 5242 5243 translate_ot(dst, src_addr, /* mask = */ 0x0001); 5244 5245 BLOCK_COMMENT("} string_inflate"); 5246 5247 return offset() - block_start; 5248 } 5249 5250 // Inflate byte[] to char[]. 5251 // Restores: src, dst 5252 // Uses: cnt 5253 // Kills: tmp, Z_R0, Z_R1. 5254 // Note: 5255 // cnt is signed int. Do not rely on high word! 5256 // counts # characters, not bytes. 5257 unsigned int MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp) { 5258 assert_different_registers(Z_R0, Z_R1, src, dst, cnt, tmp); 5259 5260 BLOCK_COMMENT("string_inflate {"); 5261 int block_start = offset(); 5262 5263 Register Rcnt = cnt; // # characters (src: bytes, dst: char (2-byte)), remaining after current loop. 5264 Register Rix = tmp; // loop index 5265 Register Rsrc = src; // addr(src array) 5266 Register Rdst = dst; // addr(dst array) 5267 Label ScalarShortcut, AllDone; 5268 5269 #if 0 // Sacrifice shortcuts for code compactness 5270 { 5271 //---< shortcuts for short strings (very frequent) >--- 5272 Label skipShortcut, skip4Shortcut; 5273 z_ltr(Rcnt, Rcnt); // absolutely nothing to do for strings of len == 0. 5274 z_brz(AllDone); 5275 clear_reg(Z_R0); // make sure registers are properly initialized. 5276 clear_reg(Z_R1); 5277 z_chi(Rcnt, 4); 5278 z_brne(skip4Shortcut); // 4 characters are very frequent 5279 z_icm(Z_R0, 5, 0, Rsrc); // Treat exactly 4 characters specially. 5280 z_icm(Z_R1, 5, 2, Rsrc); 5281 z_stm(Z_R0, Z_R1, 0, Rdst); 5282 z_bru(AllDone); 5283 bind(skip4Shortcut); 5284 5285 z_chi(Rcnt, 8); 5286 z_brh(skipShortcut); // There's a lot to do... 5287 z_lgfr(Z_R0, Rcnt); // remaining #characters (<= 8). Precond for scalar loop. 5288 // This does not destroy the "register cleared" state of Z_R0. 5289 z_brl(ScalarShortcut); // Just a few characters 5290 z_icmh(Z_R0, 5, 0, Rsrc); // Treat exactly 8 characters specially. 5291 z_icmh(Z_R1, 5, 4, Rsrc); 5292 z_icm(Z_R0, 5, 2, Rsrc); 5293 z_icm(Z_R1, 5, 6, Rsrc); 5294 z_stmg(Z_R0, Z_R1, 0, Rdst); 5295 z_bru(AllDone); 5296 bind(skipShortcut); 5297 } 5298 #endif 5299 clear_reg(Z_R0); // make sure register is properly initialized. 5300 5301 if (VM_Version::has_VectorFacility()) { 5302 const int min_vcnt = 32; // Minimum #characters required to use vector instructions. 5303 // Otherwise just do nothing in vector mode. 5304 // Must be multiple of vector register length (16 bytes = 128 bits). 5305 const int log_min_vcnt = exact_log2(min_vcnt); 5306 Label VectorLoop, VectorDone; 5307 5308 assert(VM_Version::has_DistinctOpnds(), "Assumption when has_VectorFacility()"); 5309 z_srak(Rix, Rcnt, log_min_vcnt); // calculate # vector loop iterations 5310 z_brz(VectorDone); // skip if none 5311 5312 z_sllg(Z_R0, Rix, log_min_vcnt); // remember #chars that will be processed by vector loop 5313 5314 bind(VectorLoop); 5315 z_vlm(Z_V20, Z_V21, 0, Rsrc); // get next 32 characters (single-byte) 5316 add2reg(Rsrc, min_vcnt); 5317 5318 z_vuplhb(Z_V22, Z_V20); // V2 <- (expand) V0(high) 5319 z_vupllb(Z_V23, Z_V20); // V3 <- (expand) V0(low) 5320 z_vuplhb(Z_V24, Z_V21); // V4 <- (expand) V1(high) 5321 z_vupllb(Z_V25, Z_V21); // V5 <- (expand) V1(low) 5322 z_vstm(Z_V22, Z_V25, 0, Rdst); // store next 32 bytes 5323 add2reg(Rdst, min_vcnt*2); 5324 5325 z_brct(Rix, VectorLoop); 5326 5327 bind(VectorDone); 5328 } 5329 5330 const int min_cnt = 8; // Minimum #characters required to use unrolled scalar loop. 5331 // Otherwise just do nothing in unrolled scalar mode. 5332 // Must be multiple of 8. 5333 { 5334 const int log_min_cnt = exact_log2(min_cnt); 5335 Label UnrolledLoop, UnrolledDone; 5336 5337 5338 if (VM_Version::has_DistinctOpnds()) { 5339 z_srk(Rix, Rcnt, Z_R0); // remaining # chars to process in unrolled loop 5340 } else { 5341 z_lr(Rix, Rcnt); 5342 z_sr(Rix, Z_R0); 5343 } 5344 z_sra(Rix, log_min_cnt); // unrolled loop count 5345 z_brz(UnrolledDone); 5346 5347 clear_reg(Z_R0); 5348 clear_reg(Z_R1); 5349 5350 bind(UnrolledLoop); 5351 z_icmh(Z_R0, 5, 0, Rsrc); 5352 z_icmh(Z_R1, 5, 4, Rsrc); 5353 z_icm(Z_R0, 5, 2, Rsrc); 5354 z_icm(Z_R1, 5, 6, Rsrc); 5355 add2reg(Rsrc, min_cnt); 5356 5357 z_stmg(Z_R0, Z_R1, 0, Rdst); 5358 5359 add2reg(Rdst, min_cnt*2); 5360 z_brct(Rix, UnrolledLoop); 5361 5362 bind(UnrolledDone); 5363 z_lgfr(Z_R0, Rcnt); // # chars left over after unrolled loop. 5364 z_nilf(Z_R0, min_cnt-1); 5365 z_brnz(ScalarShortcut); // if zero, there is nothing left to do for scalar loop. 5366 // Rix == 0 in all cases. 5367 z_sgfr(Z_R0, Rcnt); // negative # characters the ptrs have been advanced previously. 5368 z_agr(Rdst, Z_R0); // restore ptr, double the element count for Rdst restore. 5369 z_agr(Rdst, Z_R0); 5370 z_agr(Rsrc, Z_R0); // restore ptr. 5371 z_bru(AllDone); 5372 } 5373 5374 { 5375 bind(ScalarShortcut); 5376 // Z_R0 must contain remaining # characters as 64-bit signed int here. 5377 // register contents is preserved over scalar processing (for register fixup). 5378 5379 #if 0 // Sacrifice shortcuts for code compactness 5380 { 5381 Label ScalarDefault; 5382 z_chi(Rcnt, 2); 5383 z_brh(ScalarDefault); 5384 z_llc(Z_R0, 0, Z_R0, Rsrc); // 6 bytes 5385 z_sth(Z_R0, 0, Z_R0, Rdst); // 4 bytes 5386 z_brl(AllDone); 5387 z_llc(Z_R0, 1, Z_R0, Rsrc); // 6 bytes 5388 z_sth(Z_R0, 2, Z_R0, Rdst); // 4 bytes 5389 z_bru(AllDone); 5390 bind(ScalarDefault); 5391 } 5392 #endif 5393 5394 Label CodeTable; 5395 // Some comments on Rix calculation: 5396 // - Rcnt is small, therefore no bits shifted out of low word (sll(g) instructions). 5397 // - high word of both Rix and Rcnt may contain garbage 5398 // - the final lngfr takes care of that garbage, extending the sign to high word 5399 z_sllg(Rix, Z_R0, 2); // calculate 10*Rix = (4*Rix + Rix)*2 5400 z_ar(Rix, Z_R0); 5401 z_larl(Z_R1, CodeTable); 5402 z_sll(Rix, 1); 5403 z_lngfr(Rix, Rix); // ix range: [0..7], after inversion & mult: [-(7*12)..(0*12)]. 5404 z_bc(Assembler::bcondAlways, 0, Rix, Z_R1); 5405 5406 z_llc(Z_R1, 6, Z_R0, Rsrc); // 6 bytes 5407 z_sth(Z_R1, 12, Z_R0, Rdst); // 4 bytes 5408 5409 z_llc(Z_R1, 5, Z_R0, Rsrc); 5410 z_sth(Z_R1, 10, Z_R0, Rdst); 5411 5412 z_llc(Z_R1, 4, Z_R0, Rsrc); 5413 z_sth(Z_R1, 8, Z_R0, Rdst); 5414 5415 z_llc(Z_R1, 3, Z_R0, Rsrc); 5416 z_sth(Z_R1, 6, Z_R0, Rdst); 5417 5418 z_llc(Z_R1, 2, Z_R0, Rsrc); 5419 z_sth(Z_R1, 4, Z_R0, Rdst); 5420 5421 z_llc(Z_R1, 1, Z_R0, Rsrc); 5422 z_sth(Z_R1, 2, Z_R0, Rdst); 5423 5424 z_llc(Z_R1, 0, Z_R0, Rsrc); 5425 z_sth(Z_R1, 0, Z_R0, Rdst); 5426 bind(CodeTable); 5427 5428 z_chi(Rcnt, 8); // no fixup for small strings. Rdst, Rsrc were not modified. 5429 z_brl(AllDone); 5430 5431 z_sgfr(Z_R0, Rcnt); // # characters the ptrs have been advanced previously. 5432 z_agr(Rdst, Z_R0); // restore ptr, double the element count for Rdst restore. 5433 z_agr(Rdst, Z_R0); 5434 z_agr(Rsrc, Z_R0); // restore ptr. 5435 } 5436 bind(AllDone); 5437 5438 BLOCK_COMMENT("} string_inflate"); 5439 return offset() - block_start; 5440 } 5441 5442 // Inflate byte[] to char[], length known at compile time. 5443 // Restores: src, dst 5444 // Kills: tmp, Z_R0, Z_R1. 5445 // Note: 5446 // len is signed int. Counts # characters, not bytes. 5447 unsigned int MacroAssembler::string_inflate_const(Register src, Register dst, Register tmp, int len) { 5448 assert_different_registers(Z_R0, Z_R1, src, dst, tmp); 5449 5450 BLOCK_COMMENT("string_inflate_const {"); 5451 int block_start = offset(); 5452 5453 Register Rix = tmp; // loop index 5454 Register Rsrc = src; // addr(src array) 5455 Register Rdst = dst; // addr(dst array) 5456 Label ScalarShortcut, AllDone; 5457 int nprocessed = 0; 5458 int src_off = 0; // compensate for saved (optimized away) ptr advancement. 5459 int dst_off = 0; // compensate for saved (optimized away) ptr advancement. 5460 bool restore_inputs = false; 5461 bool workreg_clear = false; 5462 5463 if ((len >= 32) && VM_Version::has_VectorFacility()) { 5464 const int min_vcnt = 32; // Minimum #characters required to use vector instructions. 5465 // Otherwise just do nothing in vector mode. 5466 // Must be multiple of vector register length (16 bytes = 128 bits). 5467 const int log_min_vcnt = exact_log2(min_vcnt); 5468 const int iterations = (len - nprocessed) >> log_min_vcnt; 5469 nprocessed += iterations << log_min_vcnt; 5470 Label VectorLoop; 5471 5472 if (iterations == 1) { 5473 z_vlm(Z_V20, Z_V21, 0+src_off, Rsrc); // get next 32 characters (single-byte) 5474 z_vuplhb(Z_V22, Z_V20); // V2 <- (expand) V0(high) 5475 z_vupllb(Z_V23, Z_V20); // V3 <- (expand) V0(low) 5476 z_vuplhb(Z_V24, Z_V21); // V4 <- (expand) V1(high) 5477 z_vupllb(Z_V25, Z_V21); // V5 <- (expand) V1(low) 5478 z_vstm(Z_V22, Z_V25, 0+dst_off, Rdst); // store next 32 bytes 5479 5480 src_off += min_vcnt; 5481 dst_off += min_vcnt*2; 5482 } else { 5483 restore_inputs = true; 5484 5485 z_lgfi(Rix, len>>log_min_vcnt); 5486 bind(VectorLoop); 5487 z_vlm(Z_V20, Z_V21, 0, Rsrc); // get next 32 characters (single-byte) 5488 add2reg(Rsrc, min_vcnt); 5489 5490 z_vuplhb(Z_V22, Z_V20); // V2 <- (expand) V0(high) 5491 z_vupllb(Z_V23, Z_V20); // V3 <- (expand) V0(low) 5492 z_vuplhb(Z_V24, Z_V21); // V4 <- (expand) V1(high) 5493 z_vupllb(Z_V25, Z_V21); // V5 <- (expand) V1(low) 5494 z_vstm(Z_V22, Z_V25, 0, Rdst); // store next 32 bytes 5495 add2reg(Rdst, min_vcnt*2); 5496 5497 z_brct(Rix, VectorLoop); 5498 } 5499 } 5500 5501 if (((len-nprocessed) >= 16) && VM_Version::has_VectorFacility()) { 5502 const int min_vcnt = 16; // Minimum #characters required to use vector instructions. 5503 // Otherwise just do nothing in vector mode. 5504 // Must be multiple of vector register length (16 bytes = 128 bits). 5505 const int log_min_vcnt = exact_log2(min_vcnt); 5506 const int iterations = (len - nprocessed) >> log_min_vcnt; 5507 nprocessed += iterations << log_min_vcnt; 5508 assert(iterations == 1, "must be!"); 5509 5510 z_vl(Z_V20, 0+src_off, Z_R0, Rsrc); // get next 16 characters (single-byte) 5511 z_vuplhb(Z_V22, Z_V20); // V2 <- (expand) V0(high) 5512 z_vupllb(Z_V23, Z_V20); // V3 <- (expand) V0(low) 5513 z_vstm(Z_V22, Z_V23, 0+dst_off, Rdst); // store next 32 bytes 5514 5515 src_off += min_vcnt; 5516 dst_off += min_vcnt*2; 5517 } 5518 5519 if ((len-nprocessed) > 8) { 5520 const int min_cnt = 8; // Minimum #characters required to use unrolled scalar loop. 5521 // Otherwise just do nothing in unrolled scalar mode. 5522 // Must be multiple of 8. 5523 const int log_min_cnt = exact_log2(min_cnt); 5524 const int iterations = (len - nprocessed) >> log_min_cnt; 5525 nprocessed += iterations << log_min_cnt; 5526 5527 //---< avoid loop overhead/ptr increment for small # iterations >--- 5528 if (iterations <= 2) { 5529 clear_reg(Z_R0); 5530 clear_reg(Z_R1); 5531 workreg_clear = true; 5532 5533 z_icmh(Z_R0, 5, 0+src_off, Rsrc); 5534 z_icmh(Z_R1, 5, 4+src_off, Rsrc); 5535 z_icm(Z_R0, 5, 2+src_off, Rsrc); 5536 z_icm(Z_R1, 5, 6+src_off, Rsrc); 5537 z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst); 5538 5539 src_off += min_cnt; 5540 dst_off += min_cnt*2; 5541 } 5542 5543 if (iterations == 2) { 5544 z_icmh(Z_R0, 5, 0+src_off, Rsrc); 5545 z_icmh(Z_R1, 5, 4+src_off, Rsrc); 5546 z_icm(Z_R0, 5, 2+src_off, Rsrc); 5547 z_icm(Z_R1, 5, 6+src_off, Rsrc); 5548 z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst); 5549 5550 src_off += min_cnt; 5551 dst_off += min_cnt*2; 5552 } 5553 5554 if (iterations > 2) { 5555 Label UnrolledLoop; 5556 restore_inputs = true; 5557 5558 clear_reg(Z_R0); 5559 clear_reg(Z_R1); 5560 workreg_clear = true; 5561 5562 z_lgfi(Rix, iterations); 5563 bind(UnrolledLoop); 5564 z_icmh(Z_R0, 5, 0, Rsrc); 5565 z_icmh(Z_R1, 5, 4, Rsrc); 5566 z_icm(Z_R0, 5, 2, Rsrc); 5567 z_icm(Z_R1, 5, 6, Rsrc); 5568 add2reg(Rsrc, min_cnt); 5569 5570 z_stmg(Z_R0, Z_R1, 0, Rdst); 5571 add2reg(Rdst, min_cnt*2); 5572 5573 z_brct(Rix, UnrolledLoop); 5574 } 5575 } 5576 5577 if ((len-nprocessed) > 0) { 5578 switch (len-nprocessed) { 5579 case 8: 5580 if (!workreg_clear) { 5581 clear_reg(Z_R0); 5582 clear_reg(Z_R1); 5583 } 5584 z_icmh(Z_R0, 5, 0+src_off, Rsrc); 5585 z_icmh(Z_R1, 5, 4+src_off, Rsrc); 5586 z_icm(Z_R0, 5, 2+src_off, Rsrc); 5587 z_icm(Z_R1, 5, 6+src_off, Rsrc); 5588 z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst); 5589 break; 5590 case 7: 5591 if (!workreg_clear) { 5592 clear_reg(Z_R0); 5593 clear_reg(Z_R1); 5594 } 5595 clear_reg(Rix); 5596 z_icm(Z_R0, 5, 0+src_off, Rsrc); 5597 z_icm(Z_R1, 5, 2+src_off, Rsrc); 5598 z_icm(Rix, 5, 4+src_off, Rsrc); 5599 z_stm(Z_R0, Z_R1, 0+dst_off, Rdst); 5600 z_llc(Z_R0, 6+src_off, Z_R0, Rsrc); 5601 z_st(Rix, 8+dst_off, Z_R0, Rdst); 5602 z_sth(Z_R0, 12+dst_off, Z_R0, Rdst); 5603 break; 5604 case 6: 5605 if (!workreg_clear) { 5606 clear_reg(Z_R0); 5607 clear_reg(Z_R1); 5608 } 5609 clear_reg(Rix); 5610 z_icm(Z_R0, 5, 0+src_off, Rsrc); 5611 z_icm(Z_R1, 5, 2+src_off, Rsrc); 5612 z_icm(Rix, 5, 4+src_off, Rsrc); 5613 z_stm(Z_R0, Z_R1, 0+dst_off, Rdst); 5614 z_st(Rix, 8+dst_off, Z_R0, Rdst); 5615 break; 5616 case 5: 5617 if (!workreg_clear) { 5618 clear_reg(Z_R0); 5619 clear_reg(Z_R1); 5620 } 5621 z_icm(Z_R0, 5, 0+src_off, Rsrc); 5622 z_icm(Z_R1, 5, 2+src_off, Rsrc); 5623 z_llc(Rix, 4+src_off, Z_R0, Rsrc); 5624 z_stm(Z_R0, Z_R1, 0+dst_off, Rdst); 5625 z_sth(Rix, 8+dst_off, Z_R0, Rdst); 5626 break; 5627 case 4: 5628 if (!workreg_clear) { 5629 clear_reg(Z_R0); 5630 clear_reg(Z_R1); 5631 } 5632 z_icm(Z_R0, 5, 0+src_off, Rsrc); 5633 z_icm(Z_R1, 5, 2+src_off, Rsrc); 5634 z_stm(Z_R0, Z_R1, 0+dst_off, Rdst); 5635 break; 5636 case 3: 5637 if (!workreg_clear) { 5638 clear_reg(Z_R0); 5639 } 5640 z_llc(Z_R1, 2+src_off, Z_R0, Rsrc); 5641 z_icm(Z_R0, 5, 0+src_off, Rsrc); 5642 z_sth(Z_R1, 4+dst_off, Z_R0, Rdst); 5643 z_st(Z_R0, 0+dst_off, Rdst); 5644 break; 5645 case 2: 5646 z_llc(Z_R0, 0+src_off, Z_R0, Rsrc); 5647 z_llc(Z_R1, 1+src_off, Z_R0, Rsrc); 5648 z_sth(Z_R0, 0+dst_off, Z_R0, Rdst); 5649 z_sth(Z_R1, 2+dst_off, Z_R0, Rdst); 5650 break; 5651 case 1: 5652 z_llc(Z_R0, 0+src_off, Z_R0, Rsrc); 5653 z_sth(Z_R0, 0+dst_off, Z_R0, Rdst); 5654 break; 5655 default: 5656 guarantee(false, "Impossible"); 5657 break; 5658 } 5659 src_off += len-nprocessed; 5660 dst_off += (len-nprocessed)*2; 5661 nprocessed = len; 5662 } 5663 5664 //---< restore modified input registers >--- 5665 if ((nprocessed > 0) && restore_inputs) { 5666 z_agfi(Rsrc, -(nprocessed-src_off)); 5667 if (nprocessed < 1000000000) { // avoid int overflow 5668 z_agfi(Rdst, -(nprocessed*2-dst_off)); 5669 } else { 5670 z_agfi(Rdst, -(nprocessed-dst_off)); 5671 z_agfi(Rdst, -nprocessed); 5672 } 5673 } 5674 5675 BLOCK_COMMENT("} string_inflate_const"); 5676 return offset() - block_start; 5677 } 5678 5679 // Kills src. 5680 unsigned int MacroAssembler::has_negatives(Register result, Register src, Register cnt, 5681 Register odd_reg, Register even_reg, Register tmp) { 5682 int block_start = offset(); 5683 Label Lloop1, Lloop2, Lslow, Lnotfound, Ldone; 5684 const Register addr = src, mask = tmp; 5685 5686 BLOCK_COMMENT("has_negatives {"); 5687 5688 z_llgfr(Z_R1, cnt); // Number of bytes to read. (Must be a positive simm32.) 5689 z_llilf(mask, 0x80808080); 5690 z_lhi(result, 1); // Assume true. 5691 // Last possible addr for fast loop. 5692 z_lay(odd_reg, -16, Z_R1, src); 5693 z_chi(cnt, 16); 5694 z_brl(Lslow); 5695 5696 // ind1: index, even_reg: index increment, odd_reg: index limit 5697 z_iihf(mask, 0x80808080); 5698 z_lghi(even_reg, 16); 5699 5700 bind(Lloop1); // 16 bytes per iteration. 5701 z_lg(Z_R0, Address(addr)); 5702 z_lg(Z_R1, Address(addr, 8)); 5703 z_ogr(Z_R0, Z_R1); 5704 z_ngr(Z_R0, mask); 5705 z_brne(Ldone); // If found return 1. 5706 z_brxlg(addr, even_reg, Lloop1); 5707 5708 bind(Lslow); 5709 z_aghi(odd_reg, 16-1); // Last possible addr for slow loop. 5710 z_lghi(even_reg, 1); 5711 z_cgr(addr, odd_reg); 5712 z_brh(Lnotfound); 5713 5714 bind(Lloop2); // 1 byte per iteration. 5715 z_cli(Address(addr), 0x80); 5716 z_brnl(Ldone); // If found return 1. 5717 z_brxlg(addr, even_reg, Lloop2); 5718 5719 bind(Lnotfound); 5720 z_lhi(result, 0); 5721 5722 bind(Ldone); 5723 5724 BLOCK_COMMENT("} has_negatives"); 5725 5726 return offset() - block_start; 5727 } 5728 5729 // kill: cnt1, cnt2, odd_reg, even_reg; early clobber: result 5730 unsigned int MacroAssembler::string_compare(Register str1, Register str2, 5731 Register cnt1, Register cnt2, 5732 Register odd_reg, Register even_reg, Register result, int ae) { 5733 int block_start = offset(); 5734 5735 assert_different_registers(str1, cnt1, cnt2, odd_reg, even_reg, result); 5736 assert_different_registers(str2, cnt1, cnt2, odd_reg, even_reg, result); 5737 5738 // If strings are equal up to min length, return the length difference. 5739 const Register diff = result, // Pre-set result with length difference. 5740 min = cnt1, // min number of bytes 5741 tmp = cnt2; 5742 5743 // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a) 5744 // we interchange str1 and str2 in the UL case and negate the result. 5745 // Like this, str1 is always latin1 encoded, except for the UU case. 5746 // In addition, we need 0 (or sign which is 0) extend when using 64 bit register. 5747 const bool used_as_LU = (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL); 5748 5749 BLOCK_COMMENT("string_compare {"); 5750 5751 if (used_as_LU) { 5752 z_srl(cnt2, 1); 5753 } 5754 5755 // See if the lengths are different, and calculate min in cnt1. 5756 // Save diff in case we need it for a tie-breaker. 5757 5758 // diff = cnt1 - cnt2 5759 if (VM_Version::has_DistinctOpnds()) { 5760 z_srk(diff, cnt1, cnt2); 5761 } else { 5762 z_lr(diff, cnt1); 5763 z_sr(diff, cnt2); 5764 } 5765 if (str1 != str2) { 5766 if (VM_Version::has_LoadStoreConditional()) { 5767 z_locr(min, cnt2, Assembler::bcondHigh); 5768 } else { 5769 Label Lskip; 5770 z_brl(Lskip); // min ok if cnt1 < cnt2 5771 z_lr(min, cnt2); // min = cnt2 5772 bind(Lskip); 5773 } 5774 } 5775 5776 if (ae == StrIntrinsicNode::UU) { 5777 z_sra(diff, 1); 5778 } 5779 if (str1 != str2) { 5780 Label Ldone; 5781 if (used_as_LU) { 5782 // Loop which searches the first difference character by character. 5783 Label Lloop; 5784 const Register ind1 = Z_R1, 5785 ind2 = min; 5786 int stride1 = 1, stride2 = 2; // See comment above. 5787 5788 // ind1: index, even_reg: index increment, odd_reg: index limit 5789 z_llilf(ind1, (unsigned int)(-stride1)); 5790 z_lhi(even_reg, stride1); 5791 add2reg(odd_reg, -stride1, min); 5792 clear_reg(ind2); // kills min 5793 5794 bind(Lloop); 5795 z_brxh(ind1, even_reg, Ldone); 5796 z_llc(tmp, Address(str1, ind1)); 5797 z_llh(Z_R0, Address(str2, ind2)); 5798 z_ahi(ind2, stride2); 5799 z_sr(tmp, Z_R0); 5800 z_bre(Lloop); 5801 5802 z_lr(result, tmp); 5803 5804 } else { 5805 // Use clcle in fast loop (only for same encoding). 5806 z_lgr(Z_R0, str1); 5807 z_lgr(even_reg, str2); 5808 z_llgfr(Z_R1, min); 5809 z_llgfr(odd_reg, min); 5810 5811 if (ae == StrIntrinsicNode::LL) { 5812 compare_long_ext(Z_R0, even_reg, 0); 5813 } else { 5814 compare_long_uni(Z_R0, even_reg, 0); 5815 } 5816 z_bre(Ldone); 5817 z_lgr(Z_R1, Z_R0); 5818 if (ae == StrIntrinsicNode::LL) { 5819 z_llc(Z_R0, Address(even_reg)); 5820 z_llc(result, Address(Z_R1)); 5821 } else { 5822 z_llh(Z_R0, Address(even_reg)); 5823 z_llh(result, Address(Z_R1)); 5824 } 5825 z_sr(result, Z_R0); 5826 } 5827 5828 // Otherwise, return the difference between the first mismatched chars. 5829 bind(Ldone); 5830 } 5831 5832 if (ae == StrIntrinsicNode::UL) { 5833 z_lcr(result, result); // Negate result (see note above). 5834 } 5835 5836 BLOCK_COMMENT("} string_compare"); 5837 5838 return offset() - block_start; 5839 } 5840 5841 unsigned int MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, Register limit, 5842 Register odd_reg, Register even_reg, Register result, bool is_byte) { 5843 int block_start = offset(); 5844 5845 BLOCK_COMMENT("array_equals {"); 5846 5847 assert_different_registers(ary1, limit, odd_reg, even_reg); 5848 assert_different_registers(ary2, limit, odd_reg, even_reg); 5849 5850 Label Ldone, Ldone_true, Ldone_false, Lclcle, CLC_template; 5851 int base_offset = 0; 5852 5853 if (ary1 != ary2) { 5854 if (is_array_equ) { 5855 base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR); 5856 5857 // Return true if the same array. 5858 compareU64_and_branch(ary1, ary2, Assembler::bcondEqual, Ldone_true); 5859 5860 // Return false if one of them is NULL. 5861 compareU64_and_branch(ary1, (intptr_t)0, Assembler::bcondEqual, Ldone_false); 5862 compareU64_and_branch(ary2, (intptr_t)0, Assembler::bcondEqual, Ldone_false); 5863 5864 // Load the lengths of arrays. 5865 z_llgf(odd_reg, Address(ary1, arrayOopDesc::length_offset_in_bytes())); 5866 5867 // Return false if the two arrays are not equal length. 5868 z_c(odd_reg, Address(ary2, arrayOopDesc::length_offset_in_bytes())); 5869 z_brne(Ldone_false); 5870 5871 // string len in bytes (right operand) 5872 if (!is_byte) { 5873 z_chi(odd_reg, 128); 5874 z_sll(odd_reg, 1); // preserves flags 5875 z_brh(Lclcle); 5876 } else { 5877 compareU32_and_branch(odd_reg, (intptr_t)256, Assembler::bcondHigh, Lclcle); 5878 } 5879 } else { 5880 z_llgfr(odd_reg, limit); // Need to zero-extend prior to using the value. 5881 compareU32_and_branch(limit, (intptr_t)256, Assembler::bcondHigh, Lclcle); 5882 } 5883 5884 5885 // Use clc instruction for up to 256 bytes. 5886 { 5887 Register str1_reg = ary1, 5888 str2_reg = ary2; 5889 if (is_array_equ) { 5890 str1_reg = Z_R1; 5891 str2_reg = even_reg; 5892 add2reg(str1_reg, base_offset, ary1); // string addr (left operand) 5893 add2reg(str2_reg, base_offset, ary2); // string addr (right operand) 5894 } 5895 z_ahi(odd_reg, -1); // Clc uses decremented limit. Also compare result to 0. 5896 z_brl(Ldone_true); 5897 // Note: We could jump to the template if equal. 5898 5899 assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware"); 5900 z_exrl(odd_reg, CLC_template); 5901 z_bre(Ldone_true); 5902 // fall through 5903 5904 bind(Ldone_false); 5905 clear_reg(result); 5906 z_bru(Ldone); 5907 5908 bind(CLC_template); 5909 z_clc(0, 0, str1_reg, 0, str2_reg); 5910 } 5911 5912 // Use clcle instruction. 5913 { 5914 bind(Lclcle); 5915 add2reg(even_reg, base_offset, ary2); // string addr (right operand) 5916 add2reg(Z_R0, base_offset, ary1); // string addr (left operand) 5917 5918 z_lgr(Z_R1, odd_reg); // string len in bytes (left operand) 5919 if (is_byte) { 5920 compare_long_ext(Z_R0, even_reg, 0); 5921 } else { 5922 compare_long_uni(Z_R0, even_reg, 0); 5923 } 5924 z_lghi(result, 0); // Preserve flags. 5925 z_brne(Ldone); 5926 } 5927 } 5928 // fall through 5929 5930 bind(Ldone_true); 5931 z_lghi(result, 1); // All characters are equal. 5932 bind(Ldone); 5933 5934 BLOCK_COMMENT("} array_equals"); 5935 5936 return offset() - block_start; 5937 } 5938 5939 // kill: haycnt, needlecnt, odd_reg, even_reg; early clobber: result 5940 unsigned int MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt, 5941 Register needle, Register needlecnt, int needlecntval, 5942 Register odd_reg, Register even_reg, int ae) { 5943 int block_start = offset(); 5944 5945 // Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite! 5946 assert(ae != StrIntrinsicNode::LU, "Invalid encoding"); 5947 const int h_csize = (ae == StrIntrinsicNode::LL) ? 1 : 2; 5948 const int n_csize = (ae == StrIntrinsicNode::UU) ? 2 : 1; 5949 Label L_needle1, L_Found, L_NotFound; 5950 5951 BLOCK_COMMENT("string_indexof {"); 5952 5953 if (needle == haystack) { 5954 z_lhi(result, 0); 5955 } else { 5956 5957 // Load first character of needle (R0 used by search_string instructions). 5958 if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); } 5959 5960 // Compute last haystack addr to use if no match gets found. 5961 if (needlecnt != noreg) { // variable needlecnt 5962 z_ahi(needlecnt, -1); // Remaining characters after first one. 5963 z_sr(haycnt, needlecnt); // Compute index succeeding last element to compare. 5964 if (n_csize == 2) { z_sll(needlecnt, 1); } // In bytes. 5965 } else { // constant needlecnt 5966 assert((needlecntval & 0x7fff) == needlecntval, "must be positive simm16 immediate"); 5967 // Compute index succeeding last element to compare. 5968 if (needlecntval != 1) { z_ahi(haycnt, 1 - needlecntval); } 5969 } 5970 5971 z_llgfr(haycnt, haycnt); // Clear high half. 5972 z_lgr(result, haystack); // Final result will be computed from needle start pointer. 5973 if (h_csize == 2) { z_sll(haycnt, 1); } // Scale to number of bytes. 5974 z_agr(haycnt, haystack); // Point to address succeeding last element (haystack+scale*(haycnt-needlecnt+1)). 5975 5976 if (h_csize != n_csize) { 5977 assert(ae == StrIntrinsicNode::UL, "Invalid encoding"); 5978 5979 if (needlecnt != noreg || needlecntval != 1) { 5980 if (needlecnt != noreg) { 5981 compare32_and_branch(needlecnt, (intptr_t)0, Assembler::bcondEqual, L_needle1); 5982 } 5983 5984 // Main Loop: UL version (now we have at least 2 characters). 5985 Label L_OuterLoop, L_InnerLoop, L_Skip; 5986 bind(L_OuterLoop); // Search for 1st 2 characters. 5987 z_lgr(Z_R1, haycnt); 5988 MacroAssembler::search_string_uni(Z_R1, result); 5989 z_brc(Assembler::bcondNotFound, L_NotFound); 5990 z_lgr(result, Z_R1); 5991 5992 z_lghi(Z_R1, n_csize); 5993 z_lghi(even_reg, h_csize); 5994 bind(L_InnerLoop); 5995 z_llgc(odd_reg, Address(needle, Z_R1)); 5996 z_ch(odd_reg, Address(result, even_reg)); 5997 z_brne(L_Skip); 5998 if (needlecnt != noreg) { z_cr(Z_R1, needlecnt); } else { z_chi(Z_R1, needlecntval - 1); } 5999 z_brnl(L_Found); 6000 z_aghi(Z_R1, n_csize); 6001 z_aghi(even_reg, h_csize); 6002 z_bru(L_InnerLoop); 6003 6004 bind(L_Skip); 6005 z_aghi(result, h_csize); // This is the new address we want to use for comparing. 6006 z_bru(L_OuterLoop); 6007 } 6008 6009 } else { 6010 const intptr_t needle_bytes = (n_csize == 2) ? ((needlecntval - 1) << 1) : (needlecntval - 1); 6011 Label L_clcle; 6012 6013 if (needlecnt != noreg || (needlecntval != 1 && needle_bytes <= 256)) { 6014 if (needlecnt != noreg) { 6015 compare32_and_branch(needlecnt, 256, Assembler::bcondHigh, L_clcle); 6016 z_ahi(needlecnt, -1); // remaining bytes -1 (for CLC) 6017 z_brl(L_needle1); 6018 } 6019 6020 // Main Loop: clc version (now we have at least 2 characters). 6021 Label L_OuterLoop, CLC_template; 6022 bind(L_OuterLoop); // Search for 1st 2 characters. 6023 z_lgr(Z_R1, haycnt); 6024 if (h_csize == 1) { 6025 MacroAssembler::search_string(Z_R1, result); 6026 } else { 6027 MacroAssembler::search_string_uni(Z_R1, result); 6028 } 6029 z_brc(Assembler::bcondNotFound, L_NotFound); 6030 z_lgr(result, Z_R1); 6031 6032 if (needlecnt != noreg) { 6033 assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware"); 6034 z_exrl(needlecnt, CLC_template); 6035 } else { 6036 z_clc(h_csize, needle_bytes -1, Z_R1, n_csize, needle); 6037 } 6038 z_bre(L_Found); 6039 z_aghi(result, h_csize); // This is the new address we want to use for comparing. 6040 z_bru(L_OuterLoop); 6041 6042 if (needlecnt != noreg) { 6043 bind(CLC_template); 6044 z_clc(h_csize, 0, Z_R1, n_csize, needle); 6045 } 6046 } 6047 6048 if (needlecnt != noreg || needle_bytes > 256) { 6049 bind(L_clcle); 6050 6051 // Main Loop: clcle version (now we have at least 256 bytes). 6052 Label L_OuterLoop, CLC_template; 6053 bind(L_OuterLoop); // Search for 1st 2 characters. 6054 z_lgr(Z_R1, haycnt); 6055 if (h_csize == 1) { 6056 MacroAssembler::search_string(Z_R1, result); 6057 } else { 6058 MacroAssembler::search_string_uni(Z_R1, result); 6059 } 6060 z_brc(Assembler::bcondNotFound, L_NotFound); 6061 6062 add2reg(Z_R0, n_csize, needle); 6063 add2reg(even_reg, h_csize, Z_R1); 6064 z_lgr(result, Z_R1); 6065 if (needlecnt != noreg) { 6066 z_llgfr(Z_R1, needlecnt); // needle len in bytes (left operand) 6067 z_llgfr(odd_reg, needlecnt); 6068 } else { 6069 load_const_optimized(Z_R1, needle_bytes); 6070 if (Immediate::is_simm16(needle_bytes)) { z_lghi(odd_reg, needle_bytes); } else { z_lgr(odd_reg, Z_R1); } 6071 } 6072 if (h_csize == 1) { 6073 compare_long_ext(Z_R0, even_reg, 0); 6074 } else { 6075 compare_long_uni(Z_R0, even_reg, 0); 6076 } 6077 z_bre(L_Found); 6078 6079 if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); } // Reload. 6080 z_aghi(result, h_csize); // This is the new address we want to use for comparing. 6081 z_bru(L_OuterLoop); 6082 } 6083 } 6084 6085 if (needlecnt != noreg || needlecntval == 1) { 6086 bind(L_needle1); 6087 6088 // Single needle character version. 6089 if (h_csize == 1) { 6090 MacroAssembler::search_string(haycnt, result); 6091 } else { 6092 MacroAssembler::search_string_uni(haycnt, result); 6093 } 6094 z_lgr(result, haycnt); 6095 z_brc(Assembler::bcondFound, L_Found); 6096 } 6097 6098 bind(L_NotFound); 6099 add2reg(result, -1, haystack); // Return -1. 6100 6101 bind(L_Found); // Return index (or -1 in fallthrough case). 6102 z_sgr(result, haystack); 6103 if (h_csize == 2) { z_srag(result, result, exact_log2(sizeof(jchar))); } 6104 } 6105 BLOCK_COMMENT("} string_indexof"); 6106 6107 return offset() - block_start; 6108 } 6109 6110 // early clobber: result 6111 unsigned int MacroAssembler::string_indexof_char(Register result, Register haystack, Register haycnt, 6112 Register needle, jchar needleChar, Register odd_reg, Register even_reg, bool is_byte) { 6113 int block_start = offset(); 6114 6115 BLOCK_COMMENT("string_indexof_char {"); 6116 6117 if (needle == haystack) { 6118 z_lhi(result, 0); 6119 } else { 6120 6121 Label Ldone; 6122 6123 z_llgfr(odd_reg, haycnt); // Preset loop ctr/searchrange end. 6124 if (needle == noreg) { 6125 load_const_optimized(Z_R0, (unsigned long)needleChar); 6126 } else { 6127 if (is_byte) { 6128 z_llgcr(Z_R0, needle); // First (and only) needle char. 6129 } else { 6130 z_llghr(Z_R0, needle); // First (and only) needle char. 6131 } 6132 } 6133 6134 if (!is_byte) { 6135 z_agr(odd_reg, odd_reg); // Calc #bytes to be processed with SRSTU. 6136 } 6137 6138 z_lgr(even_reg, haystack); // haystack addr 6139 z_agr(odd_reg, haystack); // First char after range end. 6140 z_lghi(result, -1); 6141 6142 if (is_byte) { 6143 MacroAssembler::search_string(odd_reg, even_reg); 6144 } else { 6145 MacroAssembler::search_string_uni(odd_reg, even_reg); 6146 } 6147 z_brc(Assembler::bcondNotFound, Ldone); 6148 if (is_byte) { 6149 if (VM_Version::has_DistinctOpnds()) { 6150 z_sgrk(result, odd_reg, haystack); 6151 } else { 6152 z_sgr(odd_reg, haystack); 6153 z_lgr(result, odd_reg); 6154 } 6155 } else { 6156 z_slgr(odd_reg, haystack); 6157 z_srlg(result, odd_reg, exact_log2(sizeof(jchar))); 6158 } 6159 6160 bind(Ldone); 6161 } 6162 BLOCK_COMMENT("} string_indexof_char"); 6163 6164 return offset() - block_start; 6165 } 6166 6167 6168 //------------------------------------------------- 6169 // Constants (scalar and oop) in constant pool 6170 //------------------------------------------------- 6171 6172 // Add a non-relocated constant to the CP. 6173 int MacroAssembler::store_const_in_toc(AddressLiteral& val) { 6174 long value = val.value(); 6175 address tocPos = long_constant(value); 6176 6177 if (tocPos != NULL) { 6178 int tocOffset = (int)(tocPos - code()->consts()->start()); 6179 return tocOffset; 6180 } 6181 // Address_constant returned NULL, so no constant entry has been created. 6182 // In that case, we return a "fatal" offset, just in case that subsequently 6183 // generated access code is executed. 6184 return -1; 6185 } 6186 6187 // Returns the TOC offset where the address is stored. 6188 // Add a relocated constant to the CP. 6189 int MacroAssembler::store_oop_in_toc(AddressLiteral& oop) { 6190 // Use RelocationHolder::none for the constant pool entry. 6191 // Otherwise we will end up with a failing NativeCall::verify(x), 6192 // where x is the address of the constant pool entry. 6193 address tocPos = address_constant((address)oop.value(), RelocationHolder::none); 6194 6195 if (tocPos != NULL) { 6196 int tocOffset = (int)(tocPos - code()->consts()->start()); 6197 RelocationHolder rsp = oop.rspec(); 6198 Relocation *rel = rsp.reloc(); 6199 6200 // Store toc_offset in relocation, used by call_far_patchable. 6201 if ((relocInfo::relocType)rel->type() == relocInfo::runtime_call_w_cp_type) { 6202 ((runtime_call_w_cp_Relocation *)(rel))->set_constant_pool_offset(tocOffset); 6203 } 6204 // Relocate at the load's pc. 6205 relocate(rsp); 6206 6207 return tocOffset; 6208 } 6209 // Address_constant returned NULL, so no constant entry has been created 6210 // in that case, we return a "fatal" offset, just in case that subsequently 6211 // generated access code is executed. 6212 return -1; 6213 } 6214 6215 bool MacroAssembler::load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc) { 6216 int tocOffset = store_const_in_toc(a); 6217 if (tocOffset == -1) return false; 6218 address tocPos = tocOffset + code()->consts()->start(); 6219 assert((address)code()->consts()->start() != NULL, "Please add CP address"); 6220 6221 load_long_pcrelative(dst, tocPos); 6222 return true; 6223 } 6224 6225 bool MacroAssembler::load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc) { 6226 int tocOffset = store_oop_in_toc(a); 6227 if (tocOffset == -1) return false; 6228 address tocPos = tocOffset + code()->consts()->start(); 6229 assert((address)code()->consts()->start() != NULL, "Please add CP address"); 6230 6231 load_addr_pcrelative(dst, tocPos); 6232 return true; 6233 } 6234 6235 // If the instruction sequence at the given pc is a load_const_from_toc 6236 // sequence, return the value currently stored at the referenced position 6237 // in the TOC. 6238 intptr_t MacroAssembler::get_const_from_toc(address pc) { 6239 6240 assert(is_load_const_from_toc(pc), "must be load_const_from_pool"); 6241 6242 long offset = get_load_const_from_toc_offset(pc); 6243 address dataLoc = NULL; 6244 if (is_load_const_from_toc_pcrelative(pc)) { 6245 dataLoc = pc + offset; 6246 } else { 6247 CodeBlob* cb = CodeCache::find_blob_unsafe(pc); // Else we get assertion if nmethod is zombie. 6248 assert(cb && cb->is_nmethod(), "sanity"); 6249 nmethod* nm = (nmethod*)cb; 6250 dataLoc = nm->ctable_begin() + offset; 6251 } 6252 return *(intptr_t *)dataLoc; 6253 } 6254 6255 // If the instruction sequence at the given pc is a load_const_from_toc 6256 // sequence, copy the passed-in new_data value into the referenced 6257 // position in the TOC. 6258 void MacroAssembler::set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb) { 6259 assert(is_load_const_from_toc(pc), "must be load_const_from_pool"); 6260 6261 long offset = MacroAssembler::get_load_const_from_toc_offset(pc); 6262 address dataLoc = NULL; 6263 if (is_load_const_from_toc_pcrelative(pc)) { 6264 dataLoc = pc+offset; 6265 } else { 6266 nmethod* nm = CodeCache::find_nmethod(pc); 6267 assert((cb == NULL) || (nm == (nmethod*)cb), "instruction address should be in CodeBlob"); 6268 dataLoc = nm->ctable_begin() + offset; 6269 } 6270 if (*(unsigned long *)dataLoc != new_data) { // Prevent cache invalidation: update only if necessary. 6271 *(unsigned long *)dataLoc = new_data; 6272 } 6273 } 6274 6275 // Dynamic TOC. Getter must only be called if "a" is a load_const_from_toc 6276 // site. Verify by calling is_load_const_from_toc() before!! 6277 // Offset is +/- 2**32 -> use long. 6278 long MacroAssembler::get_load_const_from_toc_offset(address a) { 6279 assert(is_load_const_from_toc_pcrelative(a), "expected pc relative load"); 6280 // expected code sequence: 6281 // z_lgrl(t, simm32); len = 6 6282 unsigned long inst; 6283 unsigned int len = get_instruction(a, &inst); 6284 return get_pcrel_offset(inst); 6285 } 6286 6287 //********************************************************************************** 6288 // inspection of generated instruction sequences for a particular pattern 6289 //********************************************************************************** 6290 6291 bool MacroAssembler::is_load_const_from_toc_pcrelative(address a) { 6292 #ifdef ASSERT 6293 unsigned long inst; 6294 unsigned int len = get_instruction(a+2, &inst); 6295 if ((len == 6) && is_load_pcrelative_long(a) && is_call_pcrelative_long(inst)) { 6296 const int range = 128; 6297 Assembler::dump_code_range(tty, a, range, "instr(a) == z_lgrl && instr(a+2) == z_brasl"); 6298 VM_Version::z_SIGSEGV(); 6299 } 6300 #endif 6301 // expected code sequence: 6302 // z_lgrl(t, relAddr32); len = 6 6303 //TODO: verify accessed data is in CP, if possible. 6304 return is_load_pcrelative_long(a); // TODO: might be too general. Currently, only lgrl is used. 6305 } 6306 6307 bool MacroAssembler::is_load_const_from_toc_call(address a) { 6308 return is_load_const_from_toc(a) && is_call_byregister(a + load_const_from_toc_size()); 6309 } 6310 6311 bool MacroAssembler::is_load_const_call(address a) { 6312 return is_load_const(a) && is_call_byregister(a + load_const_size()); 6313 } 6314 6315 //------------------------------------------------- 6316 // Emitters for some really CICS instructions 6317 //------------------------------------------------- 6318 6319 void MacroAssembler::move_long_ext(Register dst, Register src, unsigned int pad) { 6320 assert(dst->encoding()%2==0, "must be an even/odd register pair"); 6321 assert(src->encoding()%2==0, "must be an even/odd register pair"); 6322 assert(pad<256, "must be a padding BYTE"); 6323 6324 Label retry; 6325 bind(retry); 6326 Assembler::z_mvcle(dst, src, pad); 6327 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6328 } 6329 6330 void MacroAssembler::compare_long_ext(Register left, Register right, unsigned int pad) { 6331 assert(left->encoding() % 2 == 0, "must be an even/odd register pair"); 6332 assert(right->encoding() % 2 == 0, "must be an even/odd register pair"); 6333 assert(pad<256, "must be a padding BYTE"); 6334 6335 Label retry; 6336 bind(retry); 6337 Assembler::z_clcle(left, right, pad, Z_R0); 6338 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6339 } 6340 6341 void MacroAssembler::compare_long_uni(Register left, Register right, unsigned int pad) { 6342 assert(left->encoding() % 2 == 0, "must be an even/odd register pair"); 6343 assert(right->encoding() % 2 == 0, "must be an even/odd register pair"); 6344 assert(pad<=0xfff, "must be a padding HALFWORD"); 6345 assert(VM_Version::has_ETF2(), "instruction must be available"); 6346 6347 Label retry; 6348 bind(retry); 6349 Assembler::z_clclu(left, right, pad, Z_R0); 6350 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6351 } 6352 6353 void MacroAssembler::search_string(Register end, Register start) { 6354 assert(end->encoding() != 0, "end address must not be in R0"); 6355 assert(start->encoding() != 0, "start address must not be in R0"); 6356 6357 Label retry; 6358 bind(retry); 6359 Assembler::z_srst(end, start); 6360 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6361 } 6362 6363 void MacroAssembler::search_string_uni(Register end, Register start) { 6364 assert(end->encoding() != 0, "end address must not be in R0"); 6365 assert(start->encoding() != 0, "start address must not be in R0"); 6366 assert(VM_Version::has_ETF3(), "instruction must be available"); 6367 6368 Label retry; 6369 bind(retry); 6370 Assembler::z_srstu(end, start); 6371 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6372 } 6373 6374 void MacroAssembler::kmac(Register srcBuff) { 6375 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 6376 assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair"); 6377 6378 Label retry; 6379 bind(retry); 6380 Assembler::z_kmac(Z_R0, srcBuff); 6381 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6382 } 6383 6384 void MacroAssembler::kimd(Register srcBuff) { 6385 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 6386 assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair"); 6387 6388 Label retry; 6389 bind(retry); 6390 Assembler::z_kimd(Z_R0, srcBuff); 6391 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6392 } 6393 6394 void MacroAssembler::klmd(Register srcBuff) { 6395 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 6396 assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair"); 6397 6398 Label retry; 6399 bind(retry); 6400 Assembler::z_klmd(Z_R0, srcBuff); 6401 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6402 } 6403 6404 void MacroAssembler::km(Register dstBuff, Register srcBuff) { 6405 // DstBuff and srcBuff are allowed to be the same register (encryption in-place). 6406 // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block. 6407 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 6408 assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register"); 6409 assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair"); 6410 6411 Label retry; 6412 bind(retry); 6413 Assembler::z_km(dstBuff, srcBuff); 6414 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6415 } 6416 6417 void MacroAssembler::kmc(Register dstBuff, Register srcBuff) { 6418 // DstBuff and srcBuff are allowed to be the same register (encryption in-place). 6419 // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block. 6420 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 6421 assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register"); 6422 assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair"); 6423 6424 Label retry; 6425 bind(retry); 6426 Assembler::z_kmc(dstBuff, srcBuff); 6427 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6428 } 6429 6430 void MacroAssembler::cksm(Register crcBuff, Register srcBuff) { 6431 assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair"); 6432 6433 Label retry; 6434 bind(retry); 6435 Assembler::z_cksm(crcBuff, srcBuff); 6436 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6437 } 6438 6439 void MacroAssembler::translate_oo(Register r1, Register r2, uint m3) { 6440 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); 6441 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); 6442 6443 Label retry; 6444 bind(retry); 6445 Assembler::z_troo(r1, r2, m3); 6446 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6447 } 6448 6449 void MacroAssembler::translate_ot(Register r1, Register r2, uint m3) { 6450 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); 6451 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); 6452 6453 Label retry; 6454 bind(retry); 6455 Assembler::z_trot(r1, r2, m3); 6456 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6457 } 6458 6459 void MacroAssembler::translate_to(Register r1, Register r2, uint m3) { 6460 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); 6461 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); 6462 6463 Label retry; 6464 bind(retry); 6465 Assembler::z_trto(r1, r2, m3); 6466 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6467 } 6468 6469 void MacroAssembler::translate_tt(Register r1, Register r2, uint m3) { 6470 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); 6471 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); 6472 6473 Label retry; 6474 bind(retry); 6475 Assembler::z_trtt(r1, r2, m3); 6476 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6477 } 6478 6479 6480 void MacroAssembler::generate_type_profiling(const Register Rdata, 6481 const Register Rreceiver_klass, 6482 const Register Rwanted_receiver_klass, 6483 const Register Rmatching_row, 6484 bool is_virtual_call) { 6485 const int row_size = in_bytes(ReceiverTypeData::receiver_offset(1)) - 6486 in_bytes(ReceiverTypeData::receiver_offset(0)); 6487 const int num_rows = ReceiverTypeData::row_limit(); 6488 NearLabel found_free_row; 6489 NearLabel do_increment; 6490 NearLabel found_no_slot; 6491 6492 BLOCK_COMMENT("type profiling {"); 6493 6494 // search for: 6495 // a) The type given in Rwanted_receiver_klass. 6496 // b) The *first* empty row. 6497 6498 // First search for a) only, just running over b) with no regard. 6499 // This is possible because 6500 // wanted_receiver_class == receiver_class && wanted_receiver_class == 0 6501 // is never true (receiver_class can't be zero). 6502 for (int row_num = 0; row_num < num_rows; row_num++) { 6503 // Row_offset should be a well-behaved positive number. The generated code relies 6504 // on that wrt constant code size. Add2reg can handle all row_offset values, but 6505 // will have to vary generated code size. 6506 int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num)); 6507 assert(Displacement::is_shortDisp(row_offset), "Limitation of generated code"); 6508 6509 // Is Rwanted_receiver_klass in this row? 6510 if (VM_Version::has_CompareBranch()) { 6511 z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata); 6512 // Rmatching_row = Rdata + row_offset; 6513 add2reg(Rmatching_row, row_offset, Rdata); 6514 // if (*row_recv == (intptr_t) receiver_klass) goto fill_existing_slot; 6515 compare64_and_branch(Rwanted_receiver_klass, Rreceiver_klass, Assembler::bcondEqual, do_increment); 6516 } else { 6517 add2reg(Rmatching_row, row_offset, Rdata); 6518 z_cg(Rreceiver_klass, row_offset, Z_R0, Rdata); 6519 z_bre(do_increment); 6520 } 6521 } 6522 6523 // Now that we did not find a match, let's search for b). 6524 6525 // We could save the first calculation of Rmatching_row if we woud search for a) in reverse order. 6526 // We would then end up here with Rmatching_row containing the value for row_num == 0. 6527 // We would not see much benefit, if any at all, because the CPU can schedule 6528 // two instructions together with a branch anyway. 6529 for (int row_num = 0; row_num < num_rows; row_num++) { 6530 int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num)); 6531 6532 // Has this row a zero receiver_klass, i.e. is it empty? 6533 if (VM_Version::has_CompareBranch()) { 6534 z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata); 6535 // Rmatching_row = Rdata + row_offset 6536 add2reg(Rmatching_row, row_offset, Rdata); 6537 // if (*row_recv == (intptr_t) 0) goto found_free_row 6538 compare64_and_branch(Rwanted_receiver_klass, (intptr_t)0, Assembler::bcondEqual, found_free_row); 6539 } else { 6540 add2reg(Rmatching_row, row_offset, Rdata); 6541 load_and_test_long(Rwanted_receiver_klass, Address(Rdata, row_offset)); 6542 z_bre(found_free_row); // zero -> Found a free row. 6543 } 6544 } 6545 6546 // No match, no empty row found. 6547 // Increment total counter to indicate polymorphic case. 6548 if (is_virtual_call) { 6549 add2mem_64(Address(Rdata, CounterData::count_offset()), 1, Rmatching_row); 6550 } 6551 z_bru(found_no_slot); 6552 6553 // Here we found an empty row, but we have not found Rwanted_receiver_klass. 6554 // Rmatching_row holds the address to the first empty row. 6555 bind(found_free_row); 6556 // Store receiver_klass into empty slot. 6557 z_stg(Rreceiver_klass, 0, Z_R0, Rmatching_row); 6558 6559 // Increment the counter of Rmatching_row. 6560 bind(do_increment); 6561 ByteSize counter_offset = ReceiverTypeData::receiver_count_offset(0) - ReceiverTypeData::receiver_offset(0); 6562 add2mem_64(Address(Rmatching_row, counter_offset), 1, Rdata); 6563 6564 bind(found_no_slot); 6565 6566 BLOCK_COMMENT("} type profiling"); 6567 } 6568 6569 //--------------------------------------- 6570 // Helpers for Intrinsic Emitters 6571 //--------------------------------------- 6572 6573 /** 6574 * uint32_t crc; 6575 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 6576 */ 6577 void MacroAssembler::fold_byte_crc32(Register crc, Register val, Register table, Register tmp) { 6578 assert_different_registers(crc, table, tmp); 6579 assert_different_registers(val, table); 6580 if (crc == val) { // Must rotate first to use the unmodified value. 6581 rotate_then_insert(tmp, val, 56-2, 63-2, 2, true); // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest. 6582 z_srl(crc, 8); // Unsigned shift, clear leftmost 8 bits. 6583 } else { 6584 z_srl(crc, 8); // Unsigned shift, clear leftmost 8 bits. 6585 rotate_then_insert(tmp, val, 56-2, 63-2, 2, true); // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest. 6586 } 6587 z_x(crc, Address(table, tmp, 0)); 6588 } 6589 6590 /** 6591 * uint32_t crc; 6592 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 6593 */ 6594 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 6595 fold_byte_crc32(crc, crc, table, tmp); 6596 } 6597 6598 /** 6599 * Emits code to update CRC-32 with a byte value according to constants in table. 6600 * 6601 * @param [in,out]crc Register containing the crc. 6602 * @param [in]val Register containing the byte to fold into the CRC. 6603 * @param [in]table Register containing the table of crc constants. 6604 * 6605 * uint32_t crc; 6606 * val = crc_table[(val ^ crc) & 0xFF]; 6607 * crc = val ^ (crc >> 8); 6608 */ 6609 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 6610 z_xr(val, crc); 6611 fold_byte_crc32(crc, val, table, val); 6612 } 6613 6614 6615 /** 6616 * @param crc register containing existing CRC (32-bit) 6617 * @param buf register pointing to input byte buffer (byte*) 6618 * @param len register containing number of bytes 6619 * @param table register pointing to CRC table 6620 */ 6621 void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, Register data) { 6622 assert_different_registers(crc, buf, len, table, data); 6623 6624 Label L_mainLoop, L_done; 6625 const int mainLoop_stepping = 1; 6626 6627 // Process all bytes in a single-byte loop. 6628 z_ltr(len, len); 6629 z_brnh(L_done); 6630 6631 bind(L_mainLoop); 6632 z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register. 6633 add2reg(buf, mainLoop_stepping); // Advance buffer position. 6634 update_byte_crc32(crc, data, table); 6635 z_brct(len, L_mainLoop); // Iterate. 6636 6637 bind(L_done); 6638 } 6639 6640 /** 6641 * Emits code to update CRC-32 with a 4-byte value according to constants in table. 6642 * Implementation according to jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.c. 6643 * 6644 */ 6645 void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc, 6646 Register t0, Register t1, Register t2, Register t3) { 6647 // This is what we implement (the DOBIG4 part): 6648 // 6649 // #define DOBIG4 c ^= *++buf4; \ 6650 // c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \ 6651 // crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24] 6652 // #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4 6653 // Pre-calculate (constant) column offsets, use columns 4..7 for big-endian. 6654 const int ix0 = 4*(4*CRC32_COLUMN_SIZE); 6655 const int ix1 = 5*(4*CRC32_COLUMN_SIZE); 6656 const int ix2 = 6*(4*CRC32_COLUMN_SIZE); 6657 const int ix3 = 7*(4*CRC32_COLUMN_SIZE); 6658 6659 // XOR crc with next four bytes of buffer. 6660 lgr_if_needed(t0, crc); 6661 z_x(t0, Address(buf, bufDisp)); 6662 if (bufInc != 0) { 6663 add2reg(buf, bufInc); 6664 } 6665 6666 // Chop crc into 4 single-byte pieces, shifted left 2 bits, to form the table indices. 6667 rotate_then_insert(t3, t0, 56-2, 63-2, 2, true); // ((c >> 0) & 0xff) << 2 6668 rotate_then_insert(t2, t0, 56-2, 63-2, 2-8, true); // ((c >> 8) & 0xff) << 2 6669 rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true); // ((c >> 16) & 0xff) << 2 6670 rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true); // ((c >> 24) & 0xff) << 2 6671 6672 // XOR indexed table values to calculate updated crc. 6673 z_ly(t2, Address(table, t2, (intptr_t)ix1)); 6674 z_ly(t0, Address(table, t0, (intptr_t)ix3)); 6675 z_xy(t2, Address(table, t3, (intptr_t)ix0)); 6676 z_xy(t0, Address(table, t1, (intptr_t)ix2)); 6677 z_xr(t0, t2); // Now t0 contains the updated CRC value. 6678 lgr_if_needed(crc, t0); 6679 } 6680 6681 /** 6682 * @param crc register containing existing CRC (32-bit) 6683 * @param buf register pointing to input byte buffer (byte*) 6684 * @param len register containing number of bytes 6685 * @param table register pointing to CRC table 6686 * 6687 * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller! 6688 */ 6689 void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table, 6690 Register t0, Register t1, Register t2, Register t3, 6691 bool invertCRC) { 6692 assert_different_registers(crc, buf, len, table); 6693 6694 Label L_mainLoop, L_tail; 6695 Register data = t0; 6696 Register ctr = Z_R0; 6697 const int mainLoop_stepping = 8; 6698 const int tailLoop_stepping = 1; 6699 const int log_stepping = exact_log2(mainLoop_stepping); 6700 6701 // Don't test for len <= 0 here. This pathological case should not occur anyway. 6702 // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles. 6703 // The situation itself is detected and handled correctly by the conditional branches 6704 // following aghi(len, -stepping) and aghi(len, +stepping). 6705 6706 if (invertCRC) { 6707 not_(crc, noreg, false); // 1s complement of crc 6708 } 6709 6710 #if 0 6711 { 6712 // Pre-mainLoop alignment did not show any positive effect on performance. 6713 // We leave the code in for reference. Maybe the vector instructions in z13 depend on alignment. 6714 6715 z_cghi(len, mainLoop_stepping); // Alignment is useless for short data streams. 6716 z_brnh(L_tail); 6717 6718 // Align buf to word (4-byte) boundary. 6719 z_lcr(ctr, buf); 6720 rotate_then_insert(ctr, ctr, 62, 63, 0, true); // TODO: should set cc 6721 z_sgfr(len, ctr); // Remaining len after alignment. 6722 6723 update_byteLoop_crc32(crc, buf, ctr, table, data); 6724 } 6725 #endif 6726 6727 // Check for short (<mainLoop_stepping bytes) buffer. 6728 z_srag(ctr, len, log_stepping); 6729 z_brnh(L_tail); 6730 6731 z_lrvr(crc, crc); // Revert byte order because we are dealing with big-endian data. 6732 rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop 6733 6734 BIND(L_mainLoop); 6735 update_1word_crc32(crc, buf, table, 0, 0, crc, t1, t2, t3); 6736 update_1word_crc32(crc, buf, table, 4, mainLoop_stepping, crc, t1, t2, t3); 6737 z_brct(ctr, L_mainLoop); // Iterate. 6738 6739 z_lrvr(crc, crc); // Revert byte order back to original. 6740 6741 // Process last few (<8) bytes of buffer. 6742 BIND(L_tail); 6743 update_byteLoop_crc32(crc, buf, len, table, data); 6744 6745 if (invertCRC) { 6746 not_(crc, noreg, false); // 1s complement of crc 6747 } 6748 } 6749 6750 /** 6751 * @param crc register containing existing CRC (32-bit) 6752 * @param buf register pointing to input byte buffer (byte*) 6753 * @param len register containing number of bytes 6754 * @param table register pointing to CRC table 6755 * 6756 * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller! 6757 */ 6758 void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table, 6759 Register t0, Register t1, Register t2, Register t3, 6760 bool invertCRC) { 6761 assert_different_registers(crc, buf, len, table); 6762 6763 Label L_mainLoop, L_tail; 6764 Register data = t0; 6765 Register ctr = Z_R0; 6766 const int mainLoop_stepping = 4; 6767 const int log_stepping = exact_log2(mainLoop_stepping); 6768 6769 // Don't test for len <= 0 here. This pathological case should not occur anyway. 6770 // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles. 6771 // The situation itself is detected and handled correctly by the conditional branches 6772 // following aghi(len, -stepping) and aghi(len, +stepping). 6773 6774 if (invertCRC) { 6775 not_(crc, noreg, false); // 1s complement of crc 6776 } 6777 6778 // Check for short (<4 bytes) buffer. 6779 z_srag(ctr, len, log_stepping); 6780 z_brnh(L_tail); 6781 6782 z_lrvr(crc, crc); // Revert byte order because we are dealing with big-endian data. 6783 rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop 6784 6785 BIND(L_mainLoop); 6786 update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3); 6787 z_brct(ctr, L_mainLoop); // Iterate. 6788 6789 z_lrvr(crc, crc); // Revert byte order back to original. 6790 6791 // Process last few (<8) bytes of buffer. 6792 BIND(L_tail); 6793 update_byteLoop_crc32(crc, buf, len, table, data); 6794 6795 if (invertCRC) { 6796 not_(crc, noreg, false); // 1s complement of crc 6797 } 6798 } 6799 6800 /** 6801 * @param crc register containing existing CRC (32-bit) 6802 * @param buf register pointing to input byte buffer (byte*) 6803 * @param len register containing number of bytes 6804 * @param table register pointing to CRC table 6805 */ 6806 void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table, 6807 Register t0, Register t1, Register t2, Register t3, 6808 bool invertCRC) { 6809 assert_different_registers(crc, buf, len, table); 6810 Register data = t0; 6811 6812 if (invertCRC) { 6813 not_(crc, noreg, false); // 1s complement of crc 6814 } 6815 6816 update_byteLoop_crc32(crc, buf, len, table, data); 6817 6818 if (invertCRC) { 6819 not_(crc, noreg, false); // 1s complement of crc 6820 } 6821 } 6822 6823 void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp, 6824 bool invertCRC) { 6825 assert_different_registers(crc, buf, len, table, tmp); 6826 6827 if (invertCRC) { 6828 not_(crc, noreg, false); // 1s complement of crc 6829 } 6830 6831 z_llgc(tmp, Address(buf, (intptr_t)0)); // Current byte of input buffer (zero extended). Avoids garbage in upper half of register. 6832 update_byte_crc32(crc, tmp, table); 6833 6834 if (invertCRC) { 6835 not_(crc, noreg, false); // 1s complement of crc 6836 } 6837 } 6838 6839 void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table, 6840 bool invertCRC) { 6841 assert_different_registers(crc, val, table); 6842 6843 if (invertCRC) { 6844 not_(crc, noreg, false); // 1s complement of crc 6845 } 6846 6847 update_byte_crc32(crc, val, table); 6848 6849 if (invertCRC) { 6850 not_(crc, noreg, false); // 1s complement of crc 6851 } 6852 } 6853 6854 // 6855 // Code for BigInteger::multiplyToLen() intrinsic. 6856 // 6857 6858 // dest_lo += src1 + src2 6859 // dest_hi += carry1 + carry2 6860 // Z_R7 is destroyed ! 6861 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, 6862 Register src1, Register src2) { 6863 clear_reg(Z_R7); 6864 z_algr(dest_lo, src1); 6865 z_alcgr(dest_hi, Z_R7); 6866 z_algr(dest_lo, src2); 6867 z_alcgr(dest_hi, Z_R7); 6868 } 6869 6870 // Multiply 64 bit by 64 bit first loop. 6871 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, 6872 Register x_xstart, 6873 Register y, Register y_idx, 6874 Register z, 6875 Register carry, 6876 Register product, 6877 Register idx, Register kdx) { 6878 // jlong carry, x[], y[], z[]; 6879 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) { 6880 // huge_128 product = y[idx] * x[xstart] + carry; 6881 // z[kdx] = (jlong)product; 6882 // carry = (jlong)(product >>> 64); 6883 // } 6884 // z[xstart] = carry; 6885 6886 Label L_first_loop, L_first_loop_exit; 6887 Label L_one_x, L_one_y, L_multiply; 6888 6889 z_aghi(xstart, -1); 6890 z_brl(L_one_x); // Special case: length of x is 1. 6891 6892 // Load next two integers of x. 6893 z_sllg(Z_R1_scratch, xstart, LogBytesPerInt); 6894 mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0)); 6895 6896 6897 bind(L_first_loop); 6898 6899 z_aghi(idx, -1); 6900 z_brl(L_first_loop_exit); 6901 z_aghi(idx, -1); 6902 z_brl(L_one_y); 6903 6904 // Load next two integers of y. 6905 z_sllg(Z_R1_scratch, idx, LogBytesPerInt); 6906 mem2reg_opt(y_idx, Address(y, Z_R1_scratch, 0)); 6907 6908 6909 bind(L_multiply); 6910 6911 Register multiplicand = product->successor(); 6912 Register product_low = multiplicand; 6913 6914 lgr_if_needed(multiplicand, x_xstart); 6915 z_mlgr(product, y_idx); // multiplicand * y_idx -> product::multiplicand 6916 clear_reg(Z_R7); 6917 z_algr(product_low, carry); // Add carry to result. 6918 z_alcgr(product, Z_R7); // Add carry of the last addition. 6919 add2reg(kdx, -2); 6920 6921 // Store result. 6922 z_sllg(Z_R7, kdx, LogBytesPerInt); 6923 reg2mem_opt(product_low, Address(z, Z_R7, 0)); 6924 lgr_if_needed(carry, product); 6925 z_bru(L_first_loop); 6926 6927 6928 bind(L_one_y); // Load one 32 bit portion of y as (0,value). 6929 6930 clear_reg(y_idx); 6931 mem2reg_opt(y_idx, Address(y, (intptr_t) 0), false); 6932 z_bru(L_multiply); 6933 6934 6935 bind(L_one_x); // Load one 32 bit portion of x as (0,value). 6936 6937 clear_reg(x_xstart); 6938 mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false); 6939 z_bru(L_first_loop); 6940 6941 bind(L_first_loop_exit); 6942 } 6943 6944 // Multiply 64 bit by 64 bit and add 128 bit. 6945 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, 6946 Register z, 6947 Register yz_idx, Register idx, 6948 Register carry, Register product, 6949 int offset) { 6950 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; 6951 // z[kdx] = (jlong)product; 6952 6953 Register multiplicand = product->successor(); 6954 Register product_low = multiplicand; 6955 6956 z_sllg(Z_R7, idx, LogBytesPerInt); 6957 mem2reg_opt(yz_idx, Address(y, Z_R7, offset)); 6958 6959 lgr_if_needed(multiplicand, x_xstart); 6960 z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand 6961 mem2reg_opt(yz_idx, Address(z, Z_R7, offset)); 6962 6963 add2_with_carry(product, product_low, carry, yz_idx); 6964 6965 z_sllg(Z_R7, idx, LogBytesPerInt); 6966 reg2mem_opt(product_low, Address(z, Z_R7, offset)); 6967 6968 } 6969 6970 // Multiply 128 bit by 128 bit. Unrolled inner loop. 6971 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, 6972 Register y, Register z, 6973 Register yz_idx, Register idx, 6974 Register jdx, 6975 Register carry, Register product, 6976 Register carry2) { 6977 // jlong carry, x[], y[], z[]; 6978 // int kdx = ystart+1; 6979 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 6980 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; 6981 // z[kdx+idx+1] = (jlong)product; 6982 // jlong carry2 = (jlong)(product >>> 64); 6983 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; 6984 // z[kdx+idx] = (jlong)product; 6985 // carry = (jlong)(product >>> 64); 6986 // } 6987 // idx += 2; 6988 // if (idx > 0) { 6989 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry; 6990 // z[kdx+idx] = (jlong)product; 6991 // carry = (jlong)(product >>> 64); 6992 // } 6993 6994 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 6995 6996 // scale the index 6997 lgr_if_needed(jdx, idx); 6998 and_imm(jdx, 0xfffffffffffffffcL); 6999 rshift(jdx, 2); 7000 7001 7002 bind(L_third_loop); 7003 7004 z_aghi(jdx, -1); 7005 z_brl(L_third_loop_exit); 7006 add2reg(idx, -4); 7007 7008 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8); 7009 lgr_if_needed(carry2, product); 7010 7011 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0); 7012 lgr_if_needed(carry, product); 7013 z_bru(L_third_loop); 7014 7015 7016 bind(L_third_loop_exit); // Handle any left-over operand parts. 7017 7018 and_imm(idx, 0x3); 7019 z_brz(L_post_third_loop_done); 7020 7021 Label L_check_1; 7022 7023 z_aghi(idx, -2); 7024 z_brl(L_check_1); 7025 7026 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0); 7027 lgr_if_needed(carry, product); 7028 7029 7030 bind(L_check_1); 7031 7032 add2reg(idx, 0x2); 7033 and_imm(idx, 0x1); 7034 z_aghi(idx, -1); 7035 z_brl(L_post_third_loop_done); 7036 7037 Register multiplicand = product->successor(); 7038 Register product_low = multiplicand; 7039 7040 z_sllg(Z_R7, idx, LogBytesPerInt); 7041 clear_reg(yz_idx); 7042 mem2reg_opt(yz_idx, Address(y, Z_R7, 0), false); 7043 lgr_if_needed(multiplicand, x_xstart); 7044 z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand 7045 clear_reg(yz_idx); 7046 mem2reg_opt(yz_idx, Address(z, Z_R7, 0), false); 7047 7048 add2_with_carry(product, product_low, yz_idx, carry); 7049 7050 z_sllg(Z_R7, idx, LogBytesPerInt); 7051 reg2mem_opt(product_low, Address(z, Z_R7, 0), false); 7052 rshift(product_low, 32); 7053 7054 lshift(product, 32); 7055 z_ogr(product_low, product); 7056 lgr_if_needed(carry, product_low); 7057 7058 bind(L_post_third_loop_done); 7059 } 7060 7061 void MacroAssembler::multiply_to_len(Register x, Register xlen, 7062 Register y, Register ylen, 7063 Register z, 7064 Register tmp1, Register tmp2, 7065 Register tmp3, Register tmp4, 7066 Register tmp5) { 7067 ShortBranchVerifier sbv(this); 7068 7069 assert_different_registers(x, xlen, y, ylen, z, 7070 tmp1, tmp2, tmp3, tmp4, tmp5, Z_R1_scratch, Z_R7); 7071 assert_different_registers(x, xlen, y, ylen, z, 7072 tmp1, tmp2, tmp3, tmp4, tmp5, Z_R8); 7073 7074 z_stmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP); 7075 7076 // In openJdk, we store the argument as 32-bit value to slot. 7077 Address zlen(Z_SP, _z_abi(remaining_cargs)); // Int in long on big endian. 7078 7079 const Register idx = tmp1; 7080 const Register kdx = tmp2; 7081 const Register xstart = tmp3; 7082 7083 const Register y_idx = tmp4; 7084 const Register carry = tmp5; 7085 const Register product = Z_R0_scratch; 7086 const Register x_xstart = Z_R8; 7087 7088 // First Loop. 7089 // 7090 // final static long LONG_MASK = 0xffffffffL; 7091 // int xstart = xlen - 1; 7092 // int ystart = ylen - 1; 7093 // long carry = 0; 7094 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 7095 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 7096 // z[kdx] = (int)product; 7097 // carry = product >>> 32; 7098 // } 7099 // z[xstart] = (int)carry; 7100 // 7101 7102 lgr_if_needed(idx, ylen); // idx = ylen 7103 z_llgf(kdx, zlen); // C2 does not respect int to long conversion for stub calls, thus load zero-extended. 7104 clear_reg(carry); // carry = 0 7105 7106 Label L_done; 7107 7108 lgr_if_needed(xstart, xlen); 7109 z_aghi(xstart, -1); 7110 z_brl(L_done); 7111 7112 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 7113 7114 NearLabel L_second_loop; 7115 compare64_and_branch(kdx, RegisterOrConstant((intptr_t) 0), bcondEqual, L_second_loop); 7116 7117 NearLabel L_carry; 7118 z_aghi(kdx, -1); 7119 z_brz(L_carry); 7120 7121 // Store lower 32 bits of carry. 7122 z_sllg(Z_R1_scratch, kdx, LogBytesPerInt); 7123 reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); 7124 rshift(carry, 32); 7125 z_aghi(kdx, -1); 7126 7127 7128 bind(L_carry); 7129 7130 // Store upper 32 bits of carry. 7131 z_sllg(Z_R1_scratch, kdx, LogBytesPerInt); 7132 reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); 7133 7134 // Second and third (nested) loops. 7135 // 7136 // for (int i = xstart-1; i >= 0; i--) { // Second loop 7137 // carry = 0; 7138 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 7139 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 7140 // (z[k] & LONG_MASK) + carry; 7141 // z[k] = (int)product; 7142 // carry = product >>> 32; 7143 // } 7144 // z[i] = (int)carry; 7145 // } 7146 // 7147 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx 7148 7149 const Register jdx = tmp1; 7150 7151 bind(L_second_loop); 7152 7153 clear_reg(carry); // carry = 0; 7154 lgr_if_needed(jdx, ylen); // j = ystart+1 7155 7156 z_aghi(xstart, -1); // i = xstart-1; 7157 z_brl(L_done); 7158 7159 // Use free slots in the current stackframe instead of push/pop. 7160 Address zsave(Z_SP, _z_abi(carg_1)); 7161 reg2mem_opt(z, zsave); 7162 7163 7164 Label L_last_x; 7165 7166 z_sllg(Z_R1_scratch, xstart, LogBytesPerInt); 7167 load_address(z, Address(z, Z_R1_scratch, 4)); // z = z + k - j 7168 z_aghi(xstart, -1); // i = xstart-1; 7169 z_brl(L_last_x); 7170 7171 z_sllg(Z_R1_scratch, xstart, LogBytesPerInt); 7172 mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0)); 7173 7174 7175 Label L_third_loop_prologue; 7176 7177 bind(L_third_loop_prologue); 7178 7179 Address xsave(Z_SP, _z_abi(carg_2)); 7180 Address xlensave(Z_SP, _z_abi(carg_3)); 7181 Address ylensave(Z_SP, _z_abi(carg_4)); 7182 7183 reg2mem_opt(x, xsave); 7184 reg2mem_opt(xstart, xlensave); 7185 reg2mem_opt(ylen, ylensave); 7186 7187 7188 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x); 7189 7190 mem2reg_opt(z, zsave); 7191 mem2reg_opt(x, xsave); 7192 mem2reg_opt(xlen, xlensave); // This is the decrement of the loop counter! 7193 mem2reg_opt(ylen, ylensave); 7194 7195 add2reg(tmp3, 1, xlen); 7196 z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt); 7197 reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); 7198 z_aghi(tmp3, -1); 7199 z_brl(L_done); 7200 7201 rshift(carry, 32); 7202 z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt); 7203 reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); 7204 z_bru(L_second_loop); 7205 7206 // Next infrequent code is moved outside loops. 7207 bind(L_last_x); 7208 7209 clear_reg(x_xstart); 7210 mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false); 7211 z_bru(L_third_loop_prologue); 7212 7213 bind(L_done); 7214 7215 z_lmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP); 7216 } 7217 7218 #ifndef PRODUCT 7219 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false). 7220 void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) { 7221 Label ok; 7222 if (check_equal) { 7223 z_bre(ok); 7224 } else { 7225 z_brne(ok); 7226 } 7227 stop(msg, id); 7228 bind(ok); 7229 } 7230 7231 // Assert if CC indicates "low". 7232 void MacroAssembler::asm_assert_low(const char *msg, int id) { 7233 Label ok; 7234 z_brnl(ok); 7235 stop(msg, id); 7236 bind(ok); 7237 } 7238 7239 // Assert if CC indicates "high". 7240 void MacroAssembler::asm_assert_high(const char *msg, int id) { 7241 Label ok; 7242 z_brnh(ok); 7243 stop(msg, id); 7244 bind(ok); 7245 } 7246 7247 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false) 7248 // generate non-relocatable code. 7249 void MacroAssembler::asm_assert_static(bool check_equal, const char *msg, int id) { 7250 Label ok; 7251 if (check_equal) { z_bre(ok); } 7252 else { z_brne(ok); } 7253 stop_static(msg, id); 7254 bind(ok); 7255 } 7256 7257 void MacroAssembler::asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset, 7258 Register mem_base, const char* msg, int id) { 7259 switch (size) { 7260 case 4: 7261 load_and_test_int(Z_R0, Address(mem_base, mem_offset)); 7262 break; 7263 case 8: 7264 load_and_test_long(Z_R0, Address(mem_base, mem_offset)); 7265 break; 7266 default: 7267 ShouldNotReachHere(); 7268 } 7269 if (allow_relocation) { asm_assert(check_equal, msg, id); } 7270 else { asm_assert_static(check_equal, msg, id); } 7271 } 7272 7273 // Check the condition 7274 // expected_size == FP - SP 7275 // after transformation: 7276 // expected_size - FP + SP == 0 7277 // Destroys Register expected_size if no tmp register is passed. 7278 void MacroAssembler::asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) { 7279 if (tmp == noreg) { 7280 tmp = expected_size; 7281 } else { 7282 if (tmp != expected_size) { 7283 z_lgr(tmp, expected_size); 7284 } 7285 z_algr(tmp, Z_SP); 7286 z_slg(tmp, 0, Z_R0, Z_SP); 7287 asm_assert_eq(msg, id); 7288 } 7289 } 7290 #endif // !PRODUCT 7291 7292 void MacroAssembler::verify_thread() { 7293 if (VerifyThread) { 7294 unimplemented("", 117); 7295 } 7296 } 7297 7298 // Plausibility check for oops. 7299 void MacroAssembler::verify_oop(Register oop, const char* msg) { 7300 if (!VerifyOops) return; 7301 7302 BLOCK_COMMENT("verify_oop {"); 7303 Register tmp = Z_R0; 7304 unsigned int nbytes_save = 5*BytesPerWord; 7305 address entry = StubRoutines::verify_oop_subroutine_entry_address(); 7306 7307 save_return_pc(); 7308 push_frame_abi160(nbytes_save); 7309 z_stmg(Z_R1, Z_R5, frame::z_abi_160_size, Z_SP); 7310 7311 z_lgr(Z_ARG2, oop); 7312 load_const(Z_ARG1, (address) msg); 7313 load_const(Z_R1, entry); 7314 z_lg(Z_R1, 0, Z_R1); 7315 call_c(Z_R1); 7316 7317 z_lmg(Z_R1, Z_R5, frame::z_abi_160_size, Z_SP); 7318 pop_frame(); 7319 restore_return_pc(); 7320 7321 BLOCK_COMMENT("} verify_oop "); 7322 } 7323 7324 const char* MacroAssembler::stop_types[] = { 7325 "stop", 7326 "untested", 7327 "unimplemented", 7328 "shouldnotreachhere" 7329 }; 7330 7331 static void stop_on_request(const char* tp, const char* msg) { 7332 tty->print("Z assembly code requires stop: (%s) %s\n", tp, msg); 7333 guarantee(false, "Z assembly code requires stop: %s", msg); 7334 } 7335 7336 void MacroAssembler::stop(int type, const char* msg, int id) { 7337 BLOCK_COMMENT(err_msg("stop: %s {", msg)); 7338 7339 // Setup arguments. 7340 load_const(Z_ARG1, (void*) stop_types[type%stop_end]); 7341 load_const(Z_ARG2, (void*) msg); 7342 get_PC(Z_R14); // Following code pushes a frame without entering a new function. Use current pc as return address. 7343 save_return_pc(); // Saves return pc Z_R14. 7344 push_frame_abi160(0); 7345 call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2); 7346 // The plain disassembler does not recognize illtrap. It instead displays 7347 // a 32-bit value. Issueing two illtraps assures the disassembler finds 7348 // the proper beginning of the next instruction. 7349 z_illtrap(); // Illegal instruction. 7350 z_illtrap(); // Illegal instruction. 7351 7352 BLOCK_COMMENT(" } stop"); 7353 } 7354 7355 // Special version of stop() for code size reduction. 7356 // Reuses the previously generated call sequence, if any. 7357 // Generates the call sequence on its own, if necessary. 7358 // Note: This code will work only in non-relocatable code! 7359 // The relative address of the data elements (arg1, arg2) must not change. 7360 // The reentry point must not move relative to it's users. This prerequisite 7361 // should be given for "hand-written" code, if all chain calls are in the same code blob. 7362 // Generated code must not undergo any transformation, e.g. ShortenBranches, to be safe. 7363 address MacroAssembler::stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation) { 7364 BLOCK_COMMENT(err_msg("stop_chain(%s,%s): %s {", reentry==NULL?"init":"cont", allow_relocation?"reloc ":"static", msg)); 7365 7366 // Setup arguments. 7367 if (allow_relocation) { 7368 // Relocatable version (for comparison purposes). Remove after some time. 7369 load_const(Z_ARG1, (void*) stop_types[type%stop_end]); 7370 load_const(Z_ARG2, (void*) msg); 7371 } else { 7372 load_absolute_address(Z_ARG1, (address)stop_types[type%stop_end]); 7373 load_absolute_address(Z_ARG2, (address)msg); 7374 } 7375 if ((reentry != NULL) && RelAddr::is_in_range_of_RelAddr16(reentry, pc())) { 7376 BLOCK_COMMENT("branch to reentry point:"); 7377 z_brc(bcondAlways, reentry); 7378 } else { 7379 BLOCK_COMMENT("reentry point:"); 7380 reentry = pc(); // Re-entry point for subsequent stop calls. 7381 save_return_pc(); // Saves return pc Z_R14. 7382 push_frame_abi160(0); 7383 if (allow_relocation) { 7384 reentry = NULL; // Prevent reentry if code relocation is allowed. 7385 call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2); 7386 } else { 7387 call_VM_leaf_static(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2); 7388 } 7389 z_illtrap(); // Illegal instruction as emergency stop, should the above call return. 7390 } 7391 BLOCK_COMMENT(" } stop_chain"); 7392 7393 return reentry; 7394 } 7395 7396 // Special version of stop() for code size reduction. 7397 // Assumes constant relative addresses for data and runtime call. 7398 void MacroAssembler::stop_static(int type, const char* msg, int id) { 7399 stop_chain(NULL, type, msg, id, false); 7400 } 7401 7402 void MacroAssembler::stop_subroutine() { 7403 unimplemented("stop_subroutine", 710); 7404 } 7405 7406 // Prints msg to stdout from within generated code.. 7407 void MacroAssembler::warn(const char* msg) { 7408 RegisterSaver::save_live_registers(this, RegisterSaver::all_registers, Z_R14); 7409 load_absolute_address(Z_R1, (address) warning); 7410 load_absolute_address(Z_ARG1, (address) msg); 7411 (void) call(Z_R1); 7412 RegisterSaver::restore_live_registers(this, RegisterSaver::all_registers); 7413 } 7414 7415 #ifndef PRODUCT 7416 7417 // Write pattern 0x0101010101010101 in region [low-before, high+after]. 7418 void MacroAssembler::zap_from_to(Register low, Register high, Register val, Register addr, int before, int after) { 7419 if (!ZapEmptyStackFields) return; 7420 BLOCK_COMMENT("zap memory region {"); 7421 load_const_optimized(val, 0x0101010101010101); 7422 int size = before + after; 7423 if (low == high && size < 5 && size > 0) { 7424 int offset = -before*BytesPerWord; 7425 for (int i = 0; i < size; ++i) { 7426 z_stg(val, Address(low, offset)); 7427 offset +=(1*BytesPerWord); 7428 } 7429 } else { 7430 add2reg(addr, -before*BytesPerWord, low); 7431 if (after) { 7432 #ifdef ASSERT 7433 jlong check = after * BytesPerWord; 7434 assert(Immediate::is_simm32(check) && Immediate::is_simm32(-check), "value not encodable !"); 7435 #endif 7436 add2reg(high, after * BytesPerWord); 7437 } 7438 NearLabel loop; 7439 bind(loop); 7440 z_stg(val, Address(addr)); 7441 add2reg(addr, 8); 7442 compare64_and_branch(addr, high, bcondNotHigh, loop); 7443 if (after) { 7444 add2reg(high, -after * BytesPerWord); 7445 } 7446 } 7447 BLOCK_COMMENT("} zap memory region"); 7448 } 7449 #endif // !PRODUCT 7450 7451 SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value, Register _rscratch) { 7452 _masm = masm; 7453 _masm->load_absolute_address(_rscratch, (address)flag_addr); 7454 _masm->load_and_test_int(_rscratch, Address(_rscratch)); 7455 if (value) { 7456 _masm->z_brne(_label); // Skip if true, i.e. != 0. 7457 } else { 7458 _masm->z_bre(_label); // Skip if false, i.e. == 0. 7459 } 7460 } 7461 7462 SkipIfEqual::~SkipIfEqual() { 7463 _masm->bind(_label); 7464 }