1 /* 2 * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016, 2018, SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/codeBuffer.hpp" 28 #include "asm/macroAssembler.inline.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "gc/shared/barrierSet.hpp" 31 #include "gc/shared/barrierSetAssembler.hpp" 32 #include "gc/shared/collectedHeap.inline.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "gc/shared/cardTableBarrierSet.hpp" 35 #include "memory/resourceArea.hpp" 36 #include "memory/universe.hpp" 37 #include "oops/accessDecorators.hpp" 38 #include "oops/compressedOops.inline.hpp" 39 #include "oops/klass.inline.hpp" 40 #include "opto/compile.hpp" 41 #include "opto/intrinsicnode.hpp" 42 #include "opto/matcher.hpp" 43 #include "prims/methodHandles.hpp" 44 #include "registerSaver_s390.hpp" 45 #include "runtime/biasedLocking.hpp" 46 #include "runtime/icache.hpp" 47 #include "runtime/interfaceSupport.inline.hpp" 48 #include "runtime/objectMonitor.hpp" 49 #include "runtime/os.hpp" 50 #include "runtime/safepoint.hpp" 51 #include "runtime/safepointMechanism.hpp" 52 #include "runtime/sharedRuntime.hpp" 53 #include "runtime/stubRoutines.hpp" 54 #include "utilities/events.hpp" 55 #include "utilities/macros.hpp" 56 57 #include <ucontext.h> 58 59 #define BLOCK_COMMENT(str) block_comment(str) 60 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 61 62 // Move 32-bit register if destination and source are different. 63 void MacroAssembler::lr_if_needed(Register rd, Register rs) { 64 if (rs != rd) { z_lr(rd, rs); } 65 } 66 67 // Move register if destination and source are different. 68 void MacroAssembler::lgr_if_needed(Register rd, Register rs) { 69 if (rs != rd) { z_lgr(rd, rs); } 70 } 71 72 // Zero-extend 32-bit register into 64-bit register if destination and source are different. 73 void MacroAssembler::llgfr_if_needed(Register rd, Register rs) { 74 if (rs != rd) { z_llgfr(rd, rs); } 75 } 76 77 // Move float register if destination and source are different. 78 void MacroAssembler::ldr_if_needed(FloatRegister rd, FloatRegister rs) { 79 if (rs != rd) { z_ldr(rd, rs); } 80 } 81 82 // Move integer register if destination and source are different. 83 // It is assumed that shorter-than-int types are already 84 // appropriately sign-extended. 85 void MacroAssembler::move_reg_if_needed(Register dst, BasicType dst_type, Register src, 86 BasicType src_type) { 87 assert((dst_type != T_FLOAT) && (dst_type != T_DOUBLE), "use move_freg for float types"); 88 assert((src_type != T_FLOAT) && (src_type != T_DOUBLE), "use move_freg for float types"); 89 90 if (dst_type == src_type) { 91 lgr_if_needed(dst, src); // Just move all 64 bits. 92 return; 93 } 94 95 switch (dst_type) { 96 // Do not support these types for now. 97 // case T_BOOLEAN: 98 case T_BYTE: // signed byte 99 switch (src_type) { 100 case T_INT: 101 z_lgbr(dst, src); 102 break; 103 default: 104 ShouldNotReachHere(); 105 } 106 return; 107 108 case T_CHAR: 109 case T_SHORT: 110 switch (src_type) { 111 case T_INT: 112 if (dst_type == T_CHAR) { 113 z_llghr(dst, src); 114 } else { 115 z_lghr(dst, src); 116 } 117 break; 118 default: 119 ShouldNotReachHere(); 120 } 121 return; 122 123 case T_INT: 124 switch (src_type) { 125 case T_BOOLEAN: 126 case T_BYTE: 127 case T_CHAR: 128 case T_SHORT: 129 case T_INT: 130 case T_LONG: 131 case T_OBJECT: 132 case T_ARRAY: 133 case T_VOID: 134 case T_ADDRESS: 135 lr_if_needed(dst, src); 136 // llgfr_if_needed(dst, src); // zero-extend (in case we need to find a bug). 137 return; 138 139 default: 140 assert(false, "non-integer src type"); 141 return; 142 } 143 case T_LONG: 144 switch (src_type) { 145 case T_BOOLEAN: 146 case T_BYTE: 147 case T_CHAR: 148 case T_SHORT: 149 case T_INT: 150 z_lgfr(dst, src); // sign extension 151 return; 152 153 case T_LONG: 154 case T_OBJECT: 155 case T_ARRAY: 156 case T_VOID: 157 case T_ADDRESS: 158 lgr_if_needed(dst, src); 159 return; 160 161 default: 162 assert(false, "non-integer src type"); 163 return; 164 } 165 return; 166 case T_OBJECT: 167 case T_ARRAY: 168 case T_VOID: 169 case T_ADDRESS: 170 switch (src_type) { 171 // These types don't make sense to be converted to pointers: 172 // case T_BOOLEAN: 173 // case T_BYTE: 174 // case T_CHAR: 175 // case T_SHORT: 176 177 case T_INT: 178 z_llgfr(dst, src); // zero extension 179 return; 180 181 case T_LONG: 182 case T_OBJECT: 183 case T_ARRAY: 184 case T_VOID: 185 case T_ADDRESS: 186 lgr_if_needed(dst, src); 187 return; 188 189 default: 190 assert(false, "non-integer src type"); 191 return; 192 } 193 return; 194 default: 195 assert(false, "non-integer dst type"); 196 return; 197 } 198 } 199 200 // Move float register if destination and source are different. 201 void MacroAssembler::move_freg_if_needed(FloatRegister dst, BasicType dst_type, 202 FloatRegister src, BasicType src_type) { 203 assert((dst_type == T_FLOAT) || (dst_type == T_DOUBLE), "use move_reg for int types"); 204 assert((src_type == T_FLOAT) || (src_type == T_DOUBLE), "use move_reg for int types"); 205 if (dst_type == src_type) { 206 ldr_if_needed(dst, src); // Just move all 64 bits. 207 } else { 208 switch (dst_type) { 209 case T_FLOAT: 210 assert(src_type == T_DOUBLE, "invalid float type combination"); 211 z_ledbr(dst, src); 212 return; 213 case T_DOUBLE: 214 assert(src_type == T_FLOAT, "invalid float type combination"); 215 z_ldebr(dst, src); 216 return; 217 default: 218 assert(false, "non-float dst type"); 219 return; 220 } 221 } 222 } 223 224 // Optimized emitter for reg to mem operations. 225 // Uses modern instructions if running on modern hardware, classic instructions 226 // otherwise. Prefers (usually shorter) classic instructions if applicable. 227 // Data register (reg) cannot be used as work register. 228 // 229 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default). 230 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs! 231 void MacroAssembler::freg2mem_opt(FloatRegister reg, 232 int64_t disp, 233 Register index, 234 Register base, 235 void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register), 236 void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register), 237 Register scratch) { 238 index = (index == noreg) ? Z_R0 : index; 239 if (Displacement::is_shortDisp(disp)) { 240 (this->*classic)(reg, disp, index, base); 241 } else { 242 if (Displacement::is_validDisp(disp)) { 243 (this->*modern)(reg, disp, index, base); 244 } else { 245 if (scratch != Z_R0 && scratch != Z_R1) { 246 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 247 } else { 248 if (scratch != Z_R0) { // scratch == Z_R1 249 if ((scratch == index) || (index == base)) { 250 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 251 } else { 252 add2reg(scratch, disp, base); 253 (this->*classic)(reg, 0, index, scratch); 254 if (base == scratch) { 255 add2reg(base, -disp); // Restore base. 256 } 257 } 258 } else { // scratch == Z_R0 259 z_lgr(scratch, base); 260 add2reg(base, disp); 261 (this->*classic)(reg, 0, index, base); 262 z_lgr(base, scratch); // Restore base. 263 } 264 } 265 } 266 } 267 } 268 269 void MacroAssembler::freg2mem_opt(FloatRegister reg, const Address &a, bool is_double) { 270 if (is_double) { 271 freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stdy), CLASSIC_FFUN(z_std)); 272 } else { 273 freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stey), CLASSIC_FFUN(z_ste)); 274 } 275 } 276 277 // Optimized emitter for mem to reg operations. 278 // Uses modern instructions if running on modern hardware, classic instructions 279 // otherwise. Prefers (usually shorter) classic instructions if applicable. 280 // data register (reg) cannot be used as work register. 281 // 282 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default). 283 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs! 284 void MacroAssembler::mem2freg_opt(FloatRegister reg, 285 int64_t disp, 286 Register index, 287 Register base, 288 void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register), 289 void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register), 290 Register scratch) { 291 index = (index == noreg) ? Z_R0 : index; 292 if (Displacement::is_shortDisp(disp)) { 293 (this->*classic)(reg, disp, index, base); 294 } else { 295 if (Displacement::is_validDisp(disp)) { 296 (this->*modern)(reg, disp, index, base); 297 } else { 298 if (scratch != Z_R0 && scratch != Z_R1) { 299 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 300 } else { 301 if (scratch != Z_R0) { // scratch == Z_R1 302 if ((scratch == index) || (index == base)) { 303 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 304 } else { 305 add2reg(scratch, disp, base); 306 (this->*classic)(reg, 0, index, scratch); 307 if (base == scratch) { 308 add2reg(base, -disp); // Restore base. 309 } 310 } 311 } else { // scratch == Z_R0 312 z_lgr(scratch, base); 313 add2reg(base, disp); 314 (this->*classic)(reg, 0, index, base); 315 z_lgr(base, scratch); // Restore base. 316 } 317 } 318 } 319 } 320 } 321 322 void MacroAssembler::mem2freg_opt(FloatRegister reg, const Address &a, bool is_double) { 323 if (is_double) { 324 mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ldy), CLASSIC_FFUN(z_ld)); 325 } else { 326 mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ley), CLASSIC_FFUN(z_le)); 327 } 328 } 329 330 // Optimized emitter for reg to mem operations. 331 // Uses modern instructions if running on modern hardware, classic instructions 332 // otherwise. Prefers (usually shorter) classic instructions if applicable. 333 // Data register (reg) cannot be used as work register. 334 // 335 // Don't rely on register locking, instead pass a scratch register 336 // (Z_R0 by default) 337 // CAUTION! passing registers >= Z_R2 may produce bad results on old CPUs! 338 void MacroAssembler::reg2mem_opt(Register reg, 339 int64_t disp, 340 Register index, 341 Register base, 342 void (MacroAssembler::*modern) (Register, int64_t, Register, Register), 343 void (MacroAssembler::*classic)(Register, int64_t, Register, Register), 344 Register scratch) { 345 index = (index == noreg) ? Z_R0 : index; 346 if (Displacement::is_shortDisp(disp)) { 347 (this->*classic)(reg, disp, index, base); 348 } else { 349 if (Displacement::is_validDisp(disp)) { 350 (this->*modern)(reg, disp, index, base); 351 } else { 352 if (scratch != Z_R0 && scratch != Z_R1) { 353 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 354 } else { 355 if (scratch != Z_R0) { // scratch == Z_R1 356 if ((scratch == index) || (index == base)) { 357 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 358 } else { 359 add2reg(scratch, disp, base); 360 (this->*classic)(reg, 0, index, scratch); 361 if (base == scratch) { 362 add2reg(base, -disp); // Restore base. 363 } 364 } 365 } else { // scratch == Z_R0 366 if ((scratch == reg) || (scratch == base) || (reg == base)) { 367 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 368 } else { 369 z_lgr(scratch, base); 370 add2reg(base, disp); 371 (this->*classic)(reg, 0, index, base); 372 z_lgr(base, scratch); // Restore base. 373 } 374 } 375 } 376 } 377 } 378 } 379 380 int MacroAssembler::reg2mem_opt(Register reg, const Address &a, bool is_double) { 381 int store_offset = offset(); 382 if (is_double) { 383 reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_stg), CLASSIC_IFUN(z_stg)); 384 } else { 385 reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_sty), CLASSIC_IFUN(z_st)); 386 } 387 return store_offset; 388 } 389 390 // Optimized emitter for mem to reg operations. 391 // Uses modern instructions if running on modern hardware, classic instructions 392 // otherwise. Prefers (usually shorter) classic instructions if applicable. 393 // Data register (reg) will be used as work register where possible. 394 void MacroAssembler::mem2reg_opt(Register reg, 395 int64_t disp, 396 Register index, 397 Register base, 398 void (MacroAssembler::*modern) (Register, int64_t, Register, Register), 399 void (MacroAssembler::*classic)(Register, int64_t, Register, Register)) { 400 index = (index == noreg) ? Z_R0 : index; 401 if (Displacement::is_shortDisp(disp)) { 402 (this->*classic)(reg, disp, index, base); 403 } else { 404 if (Displacement::is_validDisp(disp)) { 405 (this->*modern)(reg, disp, index, base); 406 } else { 407 if ((reg == index) && (reg == base)) { 408 z_sllg(reg, reg, 1); 409 add2reg(reg, disp); 410 (this->*classic)(reg, 0, noreg, reg); 411 } else if ((reg == index) && (reg != Z_R0)) { 412 add2reg(reg, disp); 413 (this->*classic)(reg, 0, reg, base); 414 } else if (reg == base) { 415 add2reg(reg, disp); 416 (this->*classic)(reg, 0, index, reg); 417 } else if (reg != Z_R0) { 418 add2reg(reg, disp, base); 419 (this->*classic)(reg, 0, index, reg); 420 } else { // reg == Z_R0 && reg != base here 421 add2reg(base, disp); 422 (this->*classic)(reg, 0, index, base); 423 add2reg(base, -disp); 424 } 425 } 426 } 427 } 428 429 void MacroAssembler::mem2reg_opt(Register reg, const Address &a, bool is_double) { 430 if (is_double) { 431 z_lg(reg, a); 432 } else { 433 mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_ly), CLASSIC_IFUN(z_l)); 434 } 435 } 436 437 void MacroAssembler::mem2reg_signed_opt(Register reg, const Address &a) { 438 mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_lgf), CLASSIC_IFUN(z_lgf)); 439 } 440 441 void MacroAssembler::and_imm(Register r, long mask, 442 Register tmp /* = Z_R0 */, 443 bool wide /* = false */) { 444 assert(wide || Immediate::is_simm32(mask), "mask value too large"); 445 446 if (!wide) { 447 z_nilf(r, mask); 448 return; 449 } 450 451 assert(r != tmp, " need a different temporary register !"); 452 load_const_optimized(tmp, mask); 453 z_ngr(r, tmp); 454 } 455 456 // Calculate the 1's complement. 457 // Note: The condition code is neither preserved nor correctly set by this code!!! 458 // Note: (wide == false) does not protect the high order half of the target register 459 // from alteration. It only serves as optimization hint for 32-bit results. 460 void MacroAssembler::not_(Register r1, Register r2, bool wide) { 461 462 if ((r2 == noreg) || (r2 == r1)) { // Calc 1's complement in place. 463 z_xilf(r1, -1); 464 if (wide) { 465 z_xihf(r1, -1); 466 } 467 } else { // Distinct src and dst registers. 468 if (VM_Version::has_DistinctOpnds()) { 469 load_const_optimized(r1, -1); 470 z_xgrk(r1, r2, r1); 471 } else { 472 if (wide) { 473 z_lgr(r1, r2); 474 z_xilf(r1, -1); 475 z_xihf(r1, -1); 476 } else { 477 z_lr(r1, r2); 478 z_xilf(r1, -1); 479 } 480 } 481 } 482 } 483 484 unsigned long MacroAssembler::create_mask(int lBitPos, int rBitPos) { 485 assert(lBitPos >= 0, "zero is leftmost bit position"); 486 assert(rBitPos <= 63, "63 is rightmost bit position"); 487 assert(lBitPos <= rBitPos, "inverted selection interval"); 488 return (lBitPos == 0 ? (unsigned long)(-1L) : ((1UL<<(63-lBitPos+1))-1)) & (~((1UL<<(63-rBitPos))-1)); 489 } 490 491 // Helper function for the "Rotate_then_<logicalOP>" emitters. 492 // Rotate src, then mask register contents such that only bits in range survive. 493 // For oneBits == false, all bits not in range are set to 0. Useful for deleting all bits outside range. 494 // For oneBits == true, all bits not in range are set to 1. Useful for preserving all bits outside range. 495 // The caller must ensure that the selected range only contains bits with defined value. 496 void MacroAssembler::rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos, 497 int nRotate, bool src32bit, bool dst32bit, bool oneBits) { 498 assert(!(dst32bit && lBitPos < 32), "selection interval out of range for int destination"); 499 bool sll4rll = (nRotate >= 0) && (nRotate <= (63-rBitPos)); // Substitute SLL(G) for RLL(G). 500 bool srl4rll = (nRotate < 0) && (-nRotate <= lBitPos); // Substitute SRL(G) for RLL(G). 501 // Pre-determine which parts of dst will be zero after shift/rotate. 502 bool llZero = sll4rll && (nRotate >= 16); 503 bool lhZero = (sll4rll && (nRotate >= 32)) || (srl4rll && (nRotate <= -48)); 504 bool lfZero = llZero && lhZero; 505 bool hlZero = (sll4rll && (nRotate >= 48)) || (srl4rll && (nRotate <= -32)); 506 bool hhZero = (srl4rll && (nRotate <= -16)); 507 bool hfZero = hlZero && hhZero; 508 509 // rotate then mask src operand. 510 // if oneBits == true, all bits outside selected range are 1s. 511 // if oneBits == false, all bits outside selected range are 0s. 512 if (src32bit) { // There might be garbage in the upper 32 bits which will get masked away. 513 if (dst32bit) { 514 z_rll(dst, src, nRotate); // Copy and rotate, upper half of reg remains undisturbed. 515 } else { 516 if (sll4rll) { z_sllg(dst, src, nRotate); } 517 else if (srl4rll) { z_srlg(dst, src, -nRotate); } 518 else { z_rllg(dst, src, nRotate); } 519 } 520 } else { 521 if (sll4rll) { z_sllg(dst, src, nRotate); } 522 else if (srl4rll) { z_srlg(dst, src, -nRotate); } 523 else { z_rllg(dst, src, nRotate); } 524 } 525 526 unsigned long range_mask = create_mask(lBitPos, rBitPos); 527 unsigned int range_mask_h = (unsigned int)(range_mask >> 32); 528 unsigned int range_mask_l = (unsigned int)range_mask; 529 unsigned short range_mask_hh = (unsigned short)(range_mask >> 48); 530 unsigned short range_mask_hl = (unsigned short)(range_mask >> 32); 531 unsigned short range_mask_lh = (unsigned short)(range_mask >> 16); 532 unsigned short range_mask_ll = (unsigned short)range_mask; 533 // Works for z9 and newer H/W. 534 if (oneBits) { 535 if ((~range_mask_l) != 0) { z_oilf(dst, ~range_mask_l); } // All bits outside range become 1s. 536 if (((~range_mask_h) != 0) && !dst32bit) { z_oihf(dst, ~range_mask_h); } 537 } else { 538 // All bits outside range become 0s 539 if (((~range_mask_l) != 0) && !lfZero) { 540 z_nilf(dst, range_mask_l); 541 } 542 if (((~range_mask_h) != 0) && !dst32bit && !hfZero) { 543 z_nihf(dst, range_mask_h); 544 } 545 } 546 } 547 548 // Rotate src, then insert selected range from rotated src into dst. 549 // Clear dst before, if requested. 550 void MacroAssembler::rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos, 551 int nRotate, bool clear_dst) { 552 // This version does not depend on src being zero-extended int2long. 553 nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. 554 z_risbg(dst, src, lBitPos, rBitPos, nRotate, clear_dst); // Rotate, then insert selected, clear the rest. 555 } 556 557 // Rotate src, then and selected range from rotated src into dst. 558 // Set condition code only if so requested. Otherwise it is unpredictable. 559 // See performance note in macroAssembler_s390.hpp for important information. 560 void MacroAssembler::rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos, 561 int nRotate, bool test_only) { 562 guarantee(!test_only, "Emitter not fit for test_only instruction variant."); 563 // This version does not depend on src being zero-extended int2long. 564 nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. 565 z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected. 566 } 567 568 // Rotate src, then or selected range from rotated src into dst. 569 // Set condition code only if so requested. Otherwise it is unpredictable. 570 // See performance note in macroAssembler_s390.hpp for important information. 571 void MacroAssembler::rotate_then_or(Register dst, Register src, int lBitPos, int rBitPos, 572 int nRotate, bool test_only) { 573 guarantee(!test_only, "Emitter not fit for test_only instruction variant."); 574 // This version does not depend on src being zero-extended int2long. 575 nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. 576 z_rosbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected. 577 } 578 579 // Rotate src, then xor selected range from rotated src into dst. 580 // Set condition code only if so requested. Otherwise it is unpredictable. 581 // See performance note in macroAssembler_s390.hpp for important information. 582 void MacroAssembler::rotate_then_xor(Register dst, Register src, int lBitPos, int rBitPos, 583 int nRotate, bool test_only) { 584 guarantee(!test_only, "Emitter not fit for test_only instruction variant."); 585 // This version does not depend on src being zero-extended int2long. 586 nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. 587 z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected. 588 } 589 590 void MacroAssembler::add64(Register r1, RegisterOrConstant inc) { 591 if (inc.is_register()) { 592 z_agr(r1, inc.as_register()); 593 } else { // constant 594 intptr_t imm = inc.as_constant(); 595 add2reg(r1, imm); 596 } 597 } 598 // Helper function to multiply the 64bit contents of a register by a 16bit constant. 599 // The optimization tries to avoid the mghi instruction, since it uses the FPU for 600 // calculation and is thus rather slow. 601 // 602 // There is no handling for special cases, e.g. cval==0 or cval==1. 603 // 604 // Returns len of generated code block. 605 unsigned int MacroAssembler::mul_reg64_const16(Register rval, Register work, int cval) { 606 int block_start = offset(); 607 608 bool sign_flip = cval < 0; 609 cval = sign_flip ? -cval : cval; 610 611 BLOCK_COMMENT("Reg64*Con16 {"); 612 613 int bit1 = cval & -cval; 614 if (bit1 == cval) { 615 z_sllg(rval, rval, exact_log2(bit1)); 616 if (sign_flip) { z_lcgr(rval, rval); } 617 } else { 618 int bit2 = (cval-bit1) & -(cval-bit1); 619 if ((bit1+bit2) == cval) { 620 z_sllg(work, rval, exact_log2(bit1)); 621 z_sllg(rval, rval, exact_log2(bit2)); 622 z_agr(rval, work); 623 if (sign_flip) { z_lcgr(rval, rval); } 624 } else { 625 if (sign_flip) { z_mghi(rval, -cval); } 626 else { z_mghi(rval, cval); } 627 } 628 } 629 BLOCK_COMMENT("} Reg64*Con16"); 630 631 int block_end = offset(); 632 return block_end - block_start; 633 } 634 635 // Generic operation r1 := r2 + imm. 636 // 637 // Should produce the best code for each supported CPU version. 638 // r2 == noreg yields r1 := r1 + imm 639 // imm == 0 emits either no instruction or r1 := r2 ! 640 // NOTES: 1) Don't use this function where fixed sized 641 // instruction sequences are required!!! 642 // 2) Don't use this function if condition code 643 // setting is required! 644 // 3) Despite being declared as int64_t, the parameter imm 645 // must be a simm_32 value (= signed 32-bit integer). 646 void MacroAssembler::add2reg(Register r1, int64_t imm, Register r2) { 647 assert(Immediate::is_simm32(imm), "probably an implicit conversion went wrong"); 648 649 if (r2 == noreg) { r2 = r1; } 650 651 // Handle special case imm == 0. 652 if (imm == 0) { 653 lgr_if_needed(r1, r2); 654 // Nothing else to do. 655 return; 656 } 657 658 if (!PreferLAoverADD || (r2 == Z_R0)) { 659 bool distinctOpnds = VM_Version::has_DistinctOpnds(); 660 661 // Can we encode imm in 16 bits signed? 662 if (Immediate::is_simm16(imm)) { 663 if (r1 == r2) { 664 z_aghi(r1, imm); 665 return; 666 } 667 if (distinctOpnds) { 668 z_aghik(r1, r2, imm); 669 return; 670 } 671 z_lgr(r1, r2); 672 z_aghi(r1, imm); 673 return; 674 } 675 } else { 676 // Can we encode imm in 12 bits unsigned? 677 if (Displacement::is_shortDisp(imm)) { 678 z_la(r1, imm, r2); 679 return; 680 } 681 // Can we encode imm in 20 bits signed? 682 if (Displacement::is_validDisp(imm)) { 683 // Always use LAY instruction, so we don't need the tmp register. 684 z_lay(r1, imm, r2); 685 return; 686 } 687 688 } 689 690 // Can handle it (all possible values) with long immediates. 691 lgr_if_needed(r1, r2); 692 z_agfi(r1, imm); 693 } 694 695 // Generic operation r := b + x + d 696 // 697 // Addition of several operands with address generation semantics - sort of: 698 // - no restriction on the registers. Any register will do for any operand. 699 // - x == noreg: operand will be disregarded. 700 // - b == noreg: will use (contents of) result reg as operand (r := r + d). 701 // - x == Z_R0: just disregard 702 // - b == Z_R0: use as operand. This is not address generation semantics!!! 703 // 704 // The same restrictions as on add2reg() are valid!!! 705 void MacroAssembler::add2reg_with_index(Register r, int64_t d, Register x, Register b) { 706 assert(Immediate::is_simm32(d), "probably an implicit conversion went wrong"); 707 708 if (x == noreg) { x = Z_R0; } 709 if (b == noreg) { b = r; } 710 711 // Handle special case x == R0. 712 if (x == Z_R0) { 713 // Can simply add the immediate value to the base register. 714 add2reg(r, d, b); 715 return; 716 } 717 718 if (!PreferLAoverADD || (b == Z_R0)) { 719 bool distinctOpnds = VM_Version::has_DistinctOpnds(); 720 // Handle special case d == 0. 721 if (d == 0) { 722 if (b == x) { z_sllg(r, b, 1); return; } 723 if (r == x) { z_agr(r, b); return; } 724 if (r == b) { z_agr(r, x); return; } 725 if (distinctOpnds) { z_agrk(r, x, b); return; } 726 z_lgr(r, b); 727 z_agr(r, x); 728 } else { 729 if (x == b) { z_sllg(r, x, 1); } 730 else if (r == x) { z_agr(r, b); } 731 else if (r == b) { z_agr(r, x); } 732 else if (distinctOpnds) { z_agrk(r, x, b); } 733 else { 734 z_lgr(r, b); 735 z_agr(r, x); 736 } 737 add2reg(r, d); 738 } 739 } else { 740 // Can we encode imm in 12 bits unsigned? 741 if (Displacement::is_shortDisp(d)) { 742 z_la(r, d, x, b); 743 return; 744 } 745 // Can we encode imm in 20 bits signed? 746 if (Displacement::is_validDisp(d)) { 747 z_lay(r, d, x, b); 748 return; 749 } 750 z_la(r, 0, x, b); 751 add2reg(r, d); 752 } 753 } 754 755 // Generic emitter (32bit) for direct memory increment. 756 // For optimal code, do not specify Z_R0 as temp register. 757 void MacroAssembler::add2mem_32(const Address &a, int64_t imm, Register tmp) { 758 if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) { 759 z_asi(a, imm); 760 } else { 761 z_lgf(tmp, a); 762 add2reg(tmp, imm); 763 z_st(tmp, a); 764 } 765 } 766 767 void MacroAssembler::add2mem_64(const Address &a, int64_t imm, Register tmp) { 768 if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) { 769 z_agsi(a, imm); 770 } else { 771 z_lg(tmp, a); 772 add2reg(tmp, imm); 773 z_stg(tmp, a); 774 } 775 } 776 777 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) { 778 switch (size_in_bytes) { 779 case 8: z_lg(dst, src); break; 780 case 4: is_signed ? z_lgf(dst, src) : z_llgf(dst, src); break; 781 case 2: is_signed ? z_lgh(dst, src) : z_llgh(dst, src); break; 782 case 1: is_signed ? z_lgb(dst, src) : z_llgc(dst, src); break; 783 default: ShouldNotReachHere(); 784 } 785 } 786 787 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { 788 switch (size_in_bytes) { 789 case 8: z_stg(src, dst); break; 790 case 4: z_st(src, dst); break; 791 case 2: z_sth(src, dst); break; 792 case 1: z_stc(src, dst); break; 793 default: ShouldNotReachHere(); 794 } 795 } 796 797 // Split a si20 offset (20bit, signed) into an ui12 offset (12bit, unsigned) and 798 // a high-order summand in register tmp. 799 // 800 // return value: < 0: No split required, si20 actually has property uimm12. 801 // >= 0: Split performed. Use return value as uimm12 displacement and 802 // tmp as index register. 803 int MacroAssembler::split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate) { 804 assert(Immediate::is_simm20(si20_offset), "sanity"); 805 int lg_off = (int)si20_offset & 0x0fff; // Punch out low-order 12 bits, always positive. 806 int ll_off = (int)si20_offset & ~0x0fff; // Force low-order 12 bits to zero. 807 assert((Displacement::is_shortDisp(si20_offset) && (ll_off == 0)) || 808 !Displacement::is_shortDisp(si20_offset), "unexpected offset values"); 809 assert((lg_off+ll_off) == si20_offset, "offset splitup error"); 810 811 Register work = accumulate? Z_R0 : tmp; 812 813 if (fixed_codelen) { // Len of code = 10 = 4 + 6. 814 z_lghi(work, ll_off>>12); // Implicit sign extension. 815 z_slag(work, work, 12); 816 } else { // Len of code = 0..10. 817 if (ll_off == 0) { return -1; } 818 // ll_off has 8 significant bits (at most) plus sign. 819 if ((ll_off & 0x0000f000) == 0) { // Non-zero bits only in upper halfbyte. 820 z_llilh(work, ll_off >> 16); 821 if (ll_off < 0) { // Sign-extension required. 822 z_lgfr(work, work); 823 } 824 } else { 825 if ((ll_off & 0x000f0000) == 0) { // Non-zero bits only in lower halfbyte. 826 z_llill(work, ll_off); 827 } else { // Non-zero bits in both halfbytes. 828 z_lghi(work, ll_off>>12); // Implicit sign extension. 829 z_slag(work, work, 12); 830 } 831 } 832 } 833 if (accumulate) { z_algr(tmp, work); } // len of code += 4 834 return lg_off; 835 } 836 837 void MacroAssembler::load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) { 838 if (Displacement::is_validDisp(si20)) { 839 z_ley(t, si20, a); 840 } else { 841 // Fixed_codelen = true is a simple way to ensure that the size of load_float_largeoffset 842 // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant 843 // pool loads). 844 bool accumulate = true; 845 bool fixed_codelen = true; 846 Register work; 847 848 if (fixed_codelen) { 849 z_lgr(tmp, a); // Lgr_if_needed not applicable due to fixed_codelen. 850 } else { 851 accumulate = (a == tmp); 852 } 853 work = tmp; 854 855 int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate); 856 if (disp12 < 0) { 857 z_le(t, si20, work); 858 } else { 859 if (accumulate) { 860 z_le(t, disp12, work); 861 } else { 862 z_le(t, disp12, work, a); 863 } 864 } 865 } 866 } 867 868 void MacroAssembler::load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) { 869 if (Displacement::is_validDisp(si20)) { 870 z_ldy(t, si20, a); 871 } else { 872 // Fixed_codelen = true is a simple way to ensure that the size of load_double_largeoffset 873 // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant 874 // pool loads). 875 bool accumulate = true; 876 bool fixed_codelen = true; 877 Register work; 878 879 if (fixed_codelen) { 880 z_lgr(tmp, a); // Lgr_if_needed not applicable due to fixed_codelen. 881 } else { 882 accumulate = (a == tmp); 883 } 884 work = tmp; 885 886 int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate); 887 if (disp12 < 0) { 888 z_ld(t, si20, work); 889 } else { 890 if (accumulate) { 891 z_ld(t, disp12, work); 892 } else { 893 z_ld(t, disp12, work, a); 894 } 895 } 896 } 897 } 898 899 // PCrelative TOC access. 900 // Returns distance (in bytes) from current position to start of consts section. 901 // Returns 0 (zero) if no consts section exists or if it has size zero. 902 long MacroAssembler::toc_distance() { 903 CodeSection* cs = code()->consts(); 904 return (long)((cs != NULL) ? cs->start()-pc() : 0); 905 } 906 907 // Implementation on x86/sparc assumes that constant and instruction section are 908 // adjacent, but this doesn't hold. Two special situations may occur, that we must 909 // be able to handle: 910 // 1. const section may be located apart from the inst section. 911 // 2. const section may be empty 912 // In both cases, we use the const section's start address to compute the "TOC", 913 // this seems to occur only temporarily; in the final step we always seem to end up 914 // with the pc-relatice variant. 915 // 916 // PC-relative offset could be +/-2**32 -> use long for disp 917 // Furthermore: makes no sense to have special code for 918 // adjacent const and inst sections. 919 void MacroAssembler::load_toc(Register Rtoc) { 920 // Simply use distance from start of const section (should be patched in the end). 921 long disp = toc_distance(); 922 923 RelocationHolder rspec = internal_word_Relocation::spec(pc() + disp); 924 relocate(rspec); 925 z_larl(Rtoc, RelAddr::pcrel_off32(disp)); // Offset is in halfwords. 926 } 927 928 // PCrelative TOC access. 929 // Load from anywhere pcrelative (with relocation of load instr) 930 void MacroAssembler::load_long_pcrelative(Register Rdst, address dataLocation) { 931 address pc = this->pc(); 932 ptrdiff_t total_distance = dataLocation - pc; 933 RelocationHolder rspec = internal_word_Relocation::spec(dataLocation); 934 935 assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory"); 936 assert(total_distance != 0, "sanity"); 937 938 // Some extra safety net. 939 if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) { 940 guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance); 941 } 942 943 (this)->relocate(rspec, relocInfo::pcrel_addr_format); 944 z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance)); 945 } 946 947 948 // PCrelative TOC access. 949 // Load from anywhere pcrelative (with relocation of load instr) 950 // loaded addr has to be relocated when added to constant pool. 951 void MacroAssembler::load_addr_pcrelative(Register Rdst, address addrLocation) { 952 address pc = this->pc(); 953 ptrdiff_t total_distance = addrLocation - pc; 954 RelocationHolder rspec = internal_word_Relocation::spec(addrLocation); 955 956 assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory"); 957 958 // Some extra safety net. 959 if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) { 960 guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance); 961 } 962 963 (this)->relocate(rspec, relocInfo::pcrel_addr_format); 964 z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance)); 965 } 966 967 // Generic operation: load a value from memory and test. 968 // CondCode indicates the sign (<0, ==0, >0) of the loaded value. 969 void MacroAssembler::load_and_test_byte(Register dst, const Address &a) { 970 z_lb(dst, a); 971 z_ltr(dst, dst); 972 } 973 974 void MacroAssembler::load_and_test_short(Register dst, const Address &a) { 975 int64_t disp = a.disp20(); 976 if (Displacement::is_shortDisp(disp)) { 977 z_lh(dst, a); 978 } else if (Displacement::is_longDisp(disp)) { 979 z_lhy(dst, a); 980 } else { 981 guarantee(false, "displacement out of range"); 982 } 983 z_ltr(dst, dst); 984 } 985 986 void MacroAssembler::load_and_test_int(Register dst, const Address &a) { 987 z_lt(dst, a); 988 } 989 990 void MacroAssembler::load_and_test_int2long(Register dst, const Address &a) { 991 z_ltgf(dst, a); 992 } 993 994 void MacroAssembler::load_and_test_long(Register dst, const Address &a) { 995 z_ltg(dst, a); 996 } 997 998 // Test a bit in memory. 999 void MacroAssembler::testbit(const Address &a, unsigned int bit) { 1000 assert(a.index() == noreg, "no index reg allowed in testbit"); 1001 if (bit <= 7) { 1002 z_tm(a.disp() + 3, a.base(), 1 << bit); 1003 } else if (bit <= 15) { 1004 z_tm(a.disp() + 2, a.base(), 1 << (bit - 8)); 1005 } else if (bit <= 23) { 1006 z_tm(a.disp() + 1, a.base(), 1 << (bit - 16)); 1007 } else if (bit <= 31) { 1008 z_tm(a.disp() + 0, a.base(), 1 << (bit - 24)); 1009 } else { 1010 ShouldNotReachHere(); 1011 } 1012 } 1013 1014 // Test a bit in a register. Result is reflected in CC. 1015 void MacroAssembler::testbit(Register r, unsigned int bitPos) { 1016 if (bitPos < 16) { 1017 z_tmll(r, 1U<<bitPos); 1018 } else if (bitPos < 32) { 1019 z_tmlh(r, 1U<<(bitPos-16)); 1020 } else if (bitPos < 48) { 1021 z_tmhl(r, 1U<<(bitPos-32)); 1022 } else if (bitPos < 64) { 1023 z_tmhh(r, 1U<<(bitPos-48)); 1024 } else { 1025 ShouldNotReachHere(); 1026 } 1027 } 1028 1029 void MacroAssembler::prefetch_read(Address a) { 1030 z_pfd(1, a.disp20(), a.indexOrR0(), a.base()); 1031 } 1032 void MacroAssembler::prefetch_update(Address a) { 1033 z_pfd(2, a.disp20(), a.indexOrR0(), a.base()); 1034 } 1035 1036 // Clear a register, i.e. load const zero into reg. 1037 // Return len (in bytes) of generated instruction(s). 1038 // whole_reg: Clear 64 bits if true, 32 bits otherwise. 1039 // set_cc: Use instruction that sets the condition code, if true. 1040 int MacroAssembler::clear_reg(Register r, bool whole_reg, bool set_cc) { 1041 unsigned int start_off = offset(); 1042 if (whole_reg) { 1043 set_cc ? z_xgr(r, r) : z_laz(r, 0, Z_R0); 1044 } else { // Only 32bit register. 1045 set_cc ? z_xr(r, r) : z_lhi(r, 0); 1046 } 1047 return offset() - start_off; 1048 } 1049 1050 #ifdef ASSERT 1051 int MacroAssembler::preset_reg(Register r, unsigned long pattern, int pattern_len) { 1052 switch (pattern_len) { 1053 case 1: 1054 pattern = (pattern & 0x000000ff) | ((pattern & 0x000000ff)<<8); 1055 case 2: 1056 pattern = (pattern & 0x0000ffff) | ((pattern & 0x0000ffff)<<16); 1057 case 4: 1058 pattern = (pattern & 0xffffffffL) | ((pattern & 0xffffffffL)<<32); 1059 case 8: 1060 return load_const_optimized_rtn_len(r, pattern, true); 1061 break; 1062 default: 1063 guarantee(false, "preset_reg: bad len"); 1064 } 1065 return 0; 1066 } 1067 #endif 1068 1069 // addr: Address descriptor of memory to clear index register will not be used ! 1070 // size: Number of bytes to clear. 1071 // !!! DO NOT USE THEM FOR ATOMIC MEMORY CLEARING !!! 1072 // !!! Use store_const() instead !!! 1073 void MacroAssembler::clear_mem(const Address& addr, unsigned size) { 1074 guarantee(size <= 256, "MacroAssembler::clear_mem: size too large"); 1075 1076 if (size == 1) { 1077 z_mvi(addr, 0); 1078 return; 1079 } 1080 1081 switch (size) { 1082 case 2: z_mvhhi(addr, 0); 1083 return; 1084 case 4: z_mvhi(addr, 0); 1085 return; 1086 case 8: z_mvghi(addr, 0); 1087 return; 1088 default: ; // Fallthru to xc. 1089 } 1090 1091 z_xc(addr, size, addr); 1092 } 1093 1094 void MacroAssembler::align(int modulus) { 1095 while (offset() % modulus != 0) z_nop(); 1096 } 1097 1098 // Special version for non-relocateable code if required alignment 1099 // is larger than CodeEntryAlignment. 1100 void MacroAssembler::align_address(int modulus) { 1101 while ((uintptr_t)pc() % modulus != 0) z_nop(); 1102 } 1103 1104 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 1105 Register temp_reg, 1106 int64_t extra_slot_offset) { 1107 // On Z, we can have index and disp in an Address. So don't call argument_offset, 1108 // which issues an unnecessary add instruction. 1109 int stackElementSize = Interpreter::stackElementSize; 1110 int64_t offset = extra_slot_offset * stackElementSize; 1111 const Register argbase = Z_esp; 1112 if (arg_slot.is_constant()) { 1113 offset += arg_slot.as_constant() * stackElementSize; 1114 return Address(argbase, offset); 1115 } 1116 // else 1117 assert(temp_reg != noreg, "must specify"); 1118 assert(temp_reg != Z_ARG1, "base and index are conflicting"); 1119 z_sllg(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize)); // tempreg = arg_slot << 3 1120 return Address(argbase, temp_reg, offset); 1121 } 1122 1123 1124 //=================================================================== 1125 //=== START C O N S T A N T S I N C O D E S T R E A M === 1126 //=================================================================== 1127 //=== P A T CH A B L E C O N S T A N T S === 1128 //=================================================================== 1129 1130 1131 //--------------------------------------------------- 1132 // Load (patchable) constant into register 1133 //--------------------------------------------------- 1134 1135 1136 // Load absolute address (and try to optimize). 1137 // Note: This method is usable only for position-fixed code, 1138 // referring to a position-fixed target location. 1139 // If not so, relocations and patching must be used. 1140 void MacroAssembler::load_absolute_address(Register d, address addr) { 1141 assert(addr != NULL, "should not happen"); 1142 BLOCK_COMMENT("load_absolute_address:"); 1143 if (addr == NULL) { 1144 z_larl(d, pc()); // Dummy emit for size calc. 1145 return; 1146 } 1147 1148 if (RelAddr::is_in_range_of_RelAddr32(addr, pc())) { 1149 z_larl(d, addr); 1150 return; 1151 } 1152 1153 load_const_optimized(d, (long)addr); 1154 } 1155 1156 // Load a 64bit constant. 1157 // Patchable code sequence, but not atomically patchable. 1158 // Make sure to keep code size constant -> no value-dependent optimizations. 1159 // Do not kill condition code. 1160 void MacroAssembler::load_const(Register t, long x) { 1161 Assembler::z_iihf(t, (int)(x >> 32)); 1162 Assembler::z_iilf(t, (int)(x & 0xffffffff)); 1163 } 1164 1165 // Load a 32bit constant into a 64bit register, sign-extend or zero-extend. 1166 // Patchable code sequence, but not atomically patchable. 1167 // Make sure to keep code size constant -> no value-dependent optimizations. 1168 // Do not kill condition code. 1169 void MacroAssembler::load_const_32to64(Register t, int64_t x, bool sign_extend) { 1170 if (sign_extend) { Assembler::z_lgfi(t, x); } 1171 else { Assembler::z_llilf(t, x); } 1172 } 1173 1174 // Load narrow oop constant, no decompression. 1175 void MacroAssembler::load_narrow_oop(Register t, narrowOop a) { 1176 assert(UseCompressedOops, "must be on to call this method"); 1177 load_const_32to64(t, a, false /*sign_extend*/); 1178 } 1179 1180 // Load narrow klass constant, compression required. 1181 void MacroAssembler::load_narrow_klass(Register t, Klass* k) { 1182 assert(UseCompressedClassPointers, "must be on to call this method"); 1183 narrowKlass encoded_k = Klass::encode_klass(k); 1184 load_const_32to64(t, encoded_k, false /*sign_extend*/); 1185 } 1186 1187 //------------------------------------------------------ 1188 // Compare (patchable) constant with register. 1189 //------------------------------------------------------ 1190 1191 // Compare narrow oop in reg with narrow oop constant, no decompression. 1192 void MacroAssembler::compare_immediate_narrow_oop(Register oop1, narrowOop oop2) { 1193 assert(UseCompressedOops, "must be on to call this method"); 1194 1195 Assembler::z_clfi(oop1, oop2); 1196 } 1197 1198 // Compare narrow oop in reg with narrow oop constant, no decompression. 1199 void MacroAssembler::compare_immediate_narrow_klass(Register klass1, Klass* klass2) { 1200 assert(UseCompressedClassPointers, "must be on to call this method"); 1201 narrowKlass encoded_k = Klass::encode_klass(klass2); 1202 1203 Assembler::z_clfi(klass1, encoded_k); 1204 } 1205 1206 //---------------------------------------------------------- 1207 // Check which kind of load_constant we have here. 1208 //---------------------------------------------------------- 1209 1210 // Detection of CPU version dependent load_const sequence. 1211 // The detection is valid only for code sequences generated by load_const, 1212 // not load_const_optimized. 1213 bool MacroAssembler::is_load_const(address a) { 1214 unsigned long inst1, inst2; 1215 unsigned int len1, len2; 1216 1217 len1 = get_instruction(a, &inst1); 1218 len2 = get_instruction(a + len1, &inst2); 1219 1220 return is_z_iihf(inst1) && is_z_iilf(inst2); 1221 } 1222 1223 // Detection of CPU version dependent load_const_32to64 sequence. 1224 // Mostly used for narrow oops and narrow Klass pointers. 1225 // The detection is valid only for code sequences generated by load_const_32to64. 1226 bool MacroAssembler::is_load_const_32to64(address pos) { 1227 unsigned long inst1, inst2; 1228 unsigned int len1; 1229 1230 len1 = get_instruction(pos, &inst1); 1231 return is_z_llilf(inst1); 1232 } 1233 1234 // Detection of compare_immediate_narrow sequence. 1235 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop. 1236 bool MacroAssembler::is_compare_immediate32(address pos) { 1237 return is_equal(pos, CLFI_ZOPC, RIL_MASK); 1238 } 1239 1240 // Detection of compare_immediate_narrow sequence. 1241 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop. 1242 bool MacroAssembler::is_compare_immediate_narrow_oop(address pos) { 1243 return is_compare_immediate32(pos); 1244 } 1245 1246 // Detection of compare_immediate_narrow sequence. 1247 // The detection is valid only for code sequences generated by compare_immediate_narrow_klass. 1248 bool MacroAssembler::is_compare_immediate_narrow_klass(address pos) { 1249 return is_compare_immediate32(pos); 1250 } 1251 1252 //----------------------------------- 1253 // patch the load_constant 1254 //----------------------------------- 1255 1256 // CPU-version dependend patching of load_const. 1257 void MacroAssembler::patch_const(address a, long x) { 1258 assert(is_load_const(a), "not a load of a constant"); 1259 set_imm32((address)a, (int) ((x >> 32) & 0xffffffff)); 1260 set_imm32((address)(a + 6), (int)(x & 0xffffffff)); 1261 } 1262 1263 // Patching the value of CPU version dependent load_const_32to64 sequence. 1264 // The passed ptr MUST be in compressed format! 1265 int MacroAssembler::patch_load_const_32to64(address pos, int64_t np) { 1266 assert(is_load_const_32to64(pos), "not a load of a narrow ptr (oop or klass)"); 1267 1268 set_imm32(pos, np); 1269 return 6; 1270 } 1271 1272 // Patching the value of CPU version dependent compare_immediate_narrow sequence. 1273 // The passed ptr MUST be in compressed format! 1274 int MacroAssembler::patch_compare_immediate_32(address pos, int64_t np) { 1275 assert(is_compare_immediate32(pos), "not a compressed ptr compare"); 1276 1277 set_imm32(pos, np); 1278 return 6; 1279 } 1280 1281 // Patching the immediate value of CPU version dependent load_narrow_oop sequence. 1282 // The passed ptr must NOT be in compressed format! 1283 int MacroAssembler::patch_load_narrow_oop(address pos, oop o) { 1284 assert(UseCompressedOops, "Can only patch compressed oops"); 1285 1286 narrowOop no = CompressedOops::encode(o); 1287 return patch_load_const_32to64(pos, no); 1288 } 1289 1290 // Patching the immediate value of CPU version dependent load_narrow_klass sequence. 1291 // The passed ptr must NOT be in compressed format! 1292 int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) { 1293 assert(UseCompressedClassPointers, "Can only patch compressed klass pointers"); 1294 1295 narrowKlass nk = Klass::encode_klass(k); 1296 return patch_load_const_32to64(pos, nk); 1297 } 1298 1299 // Patching the immediate value of CPU version dependent compare_immediate_narrow_oop sequence. 1300 // The passed ptr must NOT be in compressed format! 1301 int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) { 1302 assert(UseCompressedOops, "Can only patch compressed oops"); 1303 1304 narrowOop no = CompressedOops::encode(o); 1305 return patch_compare_immediate_32(pos, no); 1306 } 1307 1308 // Patching the immediate value of CPU version dependent compare_immediate_narrow_klass sequence. 1309 // The passed ptr must NOT be in compressed format! 1310 int MacroAssembler::patch_compare_immediate_narrow_klass(address pos, Klass* k) { 1311 assert(UseCompressedClassPointers, "Can only patch compressed klass pointers"); 1312 1313 narrowKlass nk = Klass::encode_klass(k); 1314 return patch_compare_immediate_32(pos, nk); 1315 } 1316 1317 //------------------------------------------------------------------------ 1318 // Extract the constant from a load_constant instruction stream. 1319 //------------------------------------------------------------------------ 1320 1321 // Get constant from a load_const sequence. 1322 long MacroAssembler::get_const(address a) { 1323 assert(is_load_const(a), "not a load of a constant"); 1324 unsigned long x; 1325 x = (((unsigned long) (get_imm32(a,0) & 0xffffffff)) << 32); 1326 x |= (((unsigned long) (get_imm32(a,1) & 0xffffffff))); 1327 return (long) x; 1328 } 1329 1330 //-------------------------------------- 1331 // Store a constant in memory. 1332 //-------------------------------------- 1333 1334 // General emitter to move a constant to memory. 1335 // The store is atomic. 1336 // o Address must be given in RS format (no index register) 1337 // o Displacement should be 12bit unsigned for efficiency. 20bit signed also supported. 1338 // o Constant can be 1, 2, 4, or 8 bytes, signed or unsigned. 1339 // o Memory slot can be 1, 2, 4, or 8 bytes, signed or unsigned. 1340 // o Memory slot must be at least as wide as constant, will assert otherwise. 1341 // o Signed constants will sign-extend, unsigned constants will zero-extend to slot width. 1342 int MacroAssembler::store_const(const Address &dest, long imm, 1343 unsigned int lm, unsigned int lc, 1344 Register scratch) { 1345 int64_t disp = dest.disp(); 1346 Register base = dest.base(); 1347 assert(!dest.has_index(), "not supported"); 1348 assert((lm==1)||(lm==2)||(lm==4)||(lm==8), "memory length not supported"); 1349 assert((lc==1)||(lc==2)||(lc==4)||(lc==8), "constant length not supported"); 1350 assert(lm>=lc, "memory slot too small"); 1351 assert(lc==8 || Immediate::is_simm(imm, lc*8), "const out of range"); 1352 assert(Displacement::is_validDisp(disp), "displacement out of range"); 1353 1354 bool is_shortDisp = Displacement::is_shortDisp(disp); 1355 int store_offset = -1; 1356 1357 // For target len == 1 it's easy. 1358 if (lm == 1) { 1359 store_offset = offset(); 1360 if (is_shortDisp) { 1361 z_mvi(disp, base, imm); 1362 return store_offset; 1363 } else { 1364 z_mviy(disp, base, imm); 1365 return store_offset; 1366 } 1367 } 1368 1369 // All the "good stuff" takes an unsigned displacement. 1370 if (is_shortDisp) { 1371 // NOTE: Cannot use clear_mem for imm==0, because it is not atomic. 1372 1373 store_offset = offset(); 1374 switch (lm) { 1375 case 2: // Lc == 1 handled correctly here, even for unsigned. Instruction does no widening. 1376 z_mvhhi(disp, base, imm); 1377 return store_offset; 1378 case 4: 1379 if (Immediate::is_simm16(imm)) { 1380 z_mvhi(disp, base, imm); 1381 return store_offset; 1382 } 1383 break; 1384 case 8: 1385 if (Immediate::is_simm16(imm)) { 1386 z_mvghi(disp, base, imm); 1387 return store_offset; 1388 } 1389 break; 1390 default: 1391 ShouldNotReachHere(); 1392 break; 1393 } 1394 } 1395 1396 // Can't optimize, so load value and store it. 1397 guarantee(scratch != noreg, " need a scratch register here !"); 1398 if (imm != 0) { 1399 load_const_optimized(scratch, imm); // Preserves CC anyway. 1400 } else { 1401 // Leave CC alone!! 1402 (void) clear_reg(scratch, true, false); // Indicate unused result. 1403 } 1404 1405 store_offset = offset(); 1406 if (is_shortDisp) { 1407 switch (lm) { 1408 case 2: 1409 z_sth(scratch, disp, Z_R0, base); 1410 return store_offset; 1411 case 4: 1412 z_st(scratch, disp, Z_R0, base); 1413 return store_offset; 1414 case 8: 1415 z_stg(scratch, disp, Z_R0, base); 1416 return store_offset; 1417 default: 1418 ShouldNotReachHere(); 1419 break; 1420 } 1421 } else { 1422 switch (lm) { 1423 case 2: 1424 z_sthy(scratch, disp, Z_R0, base); 1425 return store_offset; 1426 case 4: 1427 z_sty(scratch, disp, Z_R0, base); 1428 return store_offset; 1429 case 8: 1430 z_stg(scratch, disp, Z_R0, base); 1431 return store_offset; 1432 default: 1433 ShouldNotReachHere(); 1434 break; 1435 } 1436 } 1437 return -1; // should not reach here 1438 } 1439 1440 //=================================================================== 1441 //=== N O T P A T CH A B L E C O N S T A N T S === 1442 //=================================================================== 1443 1444 // Load constant x into register t with a fast instrcution sequence 1445 // depending on the bits in x. Preserves CC under all circumstances. 1446 int MacroAssembler::load_const_optimized_rtn_len(Register t, long x, bool emit) { 1447 if (x == 0) { 1448 int len; 1449 if (emit) { 1450 len = clear_reg(t, true, false); 1451 } else { 1452 len = 4; 1453 } 1454 return len; 1455 } 1456 1457 if (Immediate::is_simm16(x)) { 1458 if (emit) { z_lghi(t, x); } 1459 return 4; 1460 } 1461 1462 // 64 bit value: | part1 | part2 | part3 | part4 | 1463 // At least one part is not zero! 1464 int part1 = ((x >> 32) & 0xffff0000) >> 16; 1465 int part2 = (x >> 32) & 0x0000ffff; 1466 int part3 = (x & 0xffff0000) >> 16; 1467 int part4 = (x & 0x0000ffff); 1468 1469 // Lower word only (unsigned). 1470 if ((part1 == 0) && (part2 == 0)) { 1471 if (part3 == 0) { 1472 if (emit) z_llill(t, part4); 1473 return 4; 1474 } 1475 if (part4 == 0) { 1476 if (emit) z_llilh(t, part3); 1477 return 4; 1478 } 1479 if (emit) z_llilf(t, (int)(x & 0xffffffff)); 1480 return 6; 1481 } 1482 1483 // Upper word only. 1484 if ((part3 == 0) && (part4 == 0)) { 1485 if (part1 == 0) { 1486 if (emit) z_llihl(t, part2); 1487 return 4; 1488 } 1489 if (part2 == 0) { 1490 if (emit) z_llihh(t, part1); 1491 return 4; 1492 } 1493 if (emit) z_llihf(t, (int)(x >> 32)); 1494 return 6; 1495 } 1496 1497 // Lower word only (signed). 1498 if ((part1 == 0x0000ffff) && (part2 == 0x0000ffff) && ((part3 & 0x00008000) != 0)) { 1499 if (emit) z_lgfi(t, (int)(x & 0xffffffff)); 1500 return 6; 1501 } 1502 1503 int len = 0; 1504 1505 if ((part1 == 0) || (part2 == 0)) { 1506 if (part1 == 0) { 1507 if (emit) z_llihl(t, part2); 1508 len += 4; 1509 } else { 1510 if (emit) z_llihh(t, part1); 1511 len += 4; 1512 } 1513 } else { 1514 if (emit) z_llihf(t, (int)(x >> 32)); 1515 len += 6; 1516 } 1517 1518 if ((part3 == 0) || (part4 == 0)) { 1519 if (part3 == 0) { 1520 if (emit) z_iill(t, part4); 1521 len += 4; 1522 } else { 1523 if (emit) z_iilh(t, part3); 1524 len += 4; 1525 } 1526 } else { 1527 if (emit) z_iilf(t, (int)(x & 0xffffffff)); 1528 len += 6; 1529 } 1530 return len; 1531 } 1532 1533 //===================================================================== 1534 //=== H I G H E R L E V E L B R A N C H E M I T T E R S === 1535 //===================================================================== 1536 1537 // Note: In the worst case, one of the scratch registers is destroyed!!! 1538 void MacroAssembler::compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { 1539 // Right operand is constant. 1540 if (x2.is_constant()) { 1541 jlong value = x2.as_constant(); 1542 compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/true); 1543 return; 1544 } 1545 1546 // Right operand is in register. 1547 compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/true); 1548 } 1549 1550 // Note: In the worst case, one of the scratch registers is destroyed!!! 1551 void MacroAssembler::compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { 1552 // Right operand is constant. 1553 if (x2.is_constant()) { 1554 jlong value = x2.as_constant(); 1555 compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/false); 1556 return; 1557 } 1558 1559 // Right operand is in register. 1560 compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/false); 1561 } 1562 1563 // Note: In the worst case, one of the scratch registers is destroyed!!! 1564 void MacroAssembler::compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { 1565 // Right operand is constant. 1566 if (x2.is_constant()) { 1567 jlong value = x2.as_constant(); 1568 compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/true); 1569 return; 1570 } 1571 1572 // Right operand is in register. 1573 compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/true); 1574 } 1575 1576 void MacroAssembler::compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { 1577 // Right operand is constant. 1578 if (x2.is_constant()) { 1579 jlong value = x2.as_constant(); 1580 compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/false); 1581 return; 1582 } 1583 1584 // Right operand is in register. 1585 compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/false); 1586 } 1587 1588 // Generate an optimal branch to the branch target. 1589 // Optimal means that a relative branch (brc or brcl) is used if the 1590 // branch distance is short enough. Loading the target address into a 1591 // register and branching via reg is used as fallback only. 1592 // 1593 // Used registers: 1594 // Z_R1 - work reg. Holds branch target address. 1595 // Used in fallback case only. 1596 // 1597 // This version of branch_optimized is good for cases where the target address is known 1598 // and constant, i.e. is never changed (no relocation, no patching). 1599 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, address branch_addr) { 1600 address branch_origin = pc(); 1601 1602 if (RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) { 1603 z_brc(cond, branch_addr); 1604 } else if (RelAddr::is_in_range_of_RelAddr32(branch_addr, branch_origin)) { 1605 z_brcl(cond, branch_addr); 1606 } else { 1607 load_const_optimized(Z_R1, branch_addr); // CC must not get killed by load_const_optimized. 1608 z_bcr(cond, Z_R1); 1609 } 1610 } 1611 1612 // This version of branch_optimized is good for cases where the target address 1613 // is potentially not yet known at the time the code is emitted. 1614 // 1615 // One very common case is a branch to an unbound label which is handled here. 1616 // The caller might know (or hope) that the branch distance is short enough 1617 // to be encoded in a 16bit relative address. In this case he will pass a 1618 // NearLabel branch_target. 1619 // Care must be taken with unbound labels. Each call to target(label) creates 1620 // an entry in the patch queue for that label to patch all references of the label 1621 // once it gets bound. Those recorded patch locations must be patchable. Otherwise, 1622 // an assertion fires at patch time. 1623 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& branch_target) { 1624 if (branch_target.is_bound()) { 1625 address branch_addr = target(branch_target); 1626 branch_optimized(cond, branch_addr); 1627 } else if (branch_target.is_near()) { 1628 z_brc(cond, branch_target); // Caller assures that the target will be in range for z_brc. 1629 } else { 1630 z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time. 1631 } 1632 } 1633 1634 // Generate an optimal compare and branch to the branch target. 1635 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the 1636 // branch distance is short enough. Loading the target address into a 1637 // register and branching via reg is used as fallback only. 1638 // 1639 // Input: 1640 // r1 - left compare operand 1641 // r2 - right compare operand 1642 void MacroAssembler::compare_and_branch_optimized(Register r1, 1643 Register r2, 1644 Assembler::branch_condition cond, 1645 address branch_addr, 1646 bool len64, 1647 bool has_sign) { 1648 unsigned int casenum = (len64?2:0)+(has_sign?0:1); 1649 1650 address branch_origin = pc(); 1651 if (VM_Version::has_CompareBranch() && RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) { 1652 switch (casenum) { 1653 case 0: z_crj( r1, r2, cond, branch_addr); break; 1654 case 1: z_clrj (r1, r2, cond, branch_addr); break; 1655 case 2: z_cgrj(r1, r2, cond, branch_addr); break; 1656 case 3: z_clgrj(r1, r2, cond, branch_addr); break; 1657 default: ShouldNotReachHere(); break; 1658 } 1659 } else { 1660 switch (casenum) { 1661 case 0: z_cr( r1, r2); break; 1662 case 1: z_clr(r1, r2); break; 1663 case 2: z_cgr(r1, r2); break; 1664 case 3: z_clgr(r1, r2); break; 1665 default: ShouldNotReachHere(); break; 1666 } 1667 branch_optimized(cond, branch_addr); 1668 } 1669 } 1670 1671 // Generate an optimal compare and branch to the branch target. 1672 // Optimal means that a relative branch (clgij, brc or brcl) is used if the 1673 // branch distance is short enough. Loading the target address into a 1674 // register and branching via reg is used as fallback only. 1675 // 1676 // Input: 1677 // r1 - left compare operand (in register) 1678 // x2 - right compare operand (immediate) 1679 void MacroAssembler::compare_and_branch_optimized(Register r1, 1680 jlong x2, 1681 Assembler::branch_condition cond, 1682 Label& branch_target, 1683 bool len64, 1684 bool has_sign) { 1685 address branch_origin = pc(); 1686 bool x2_imm8 = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2)); 1687 bool is_RelAddr16 = branch_target.is_near() || 1688 (branch_target.is_bound() && 1689 RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin)); 1690 unsigned int casenum = (len64?2:0)+(has_sign?0:1); 1691 1692 if (VM_Version::has_CompareBranch() && is_RelAddr16 && x2_imm8) { 1693 switch (casenum) { 1694 case 0: z_cij( r1, x2, cond, branch_target); break; 1695 case 1: z_clij(r1, x2, cond, branch_target); break; 1696 case 2: z_cgij(r1, x2, cond, branch_target); break; 1697 case 3: z_clgij(r1, x2, cond, branch_target); break; 1698 default: ShouldNotReachHere(); break; 1699 } 1700 return; 1701 } 1702 1703 if (x2 == 0) { 1704 switch (casenum) { 1705 case 0: z_ltr(r1, r1); break; 1706 case 1: z_ltr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication! 1707 case 2: z_ltgr(r1, r1); break; 1708 case 3: z_ltgr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication! 1709 default: ShouldNotReachHere(); break; 1710 } 1711 } else { 1712 if ((has_sign && Immediate::is_simm16(x2)) || (!has_sign && Immediate::is_uimm(x2, 15))) { 1713 switch (casenum) { 1714 case 0: z_chi(r1, x2); break; 1715 case 1: z_chi(r1, x2); break; // positive immediate < 2**15 1716 case 2: z_cghi(r1, x2); break; 1717 case 3: z_cghi(r1, x2); break; // positive immediate < 2**15 1718 default: break; 1719 } 1720 } else if ( (has_sign && Immediate::is_simm32(x2)) || (!has_sign && Immediate::is_uimm32(x2)) ) { 1721 switch (casenum) { 1722 case 0: z_cfi( r1, x2); break; 1723 case 1: z_clfi(r1, x2); break; 1724 case 2: z_cgfi(r1, x2); break; 1725 case 3: z_clgfi(r1, x2); break; 1726 default: ShouldNotReachHere(); break; 1727 } 1728 } else { 1729 // No instruction with immediate operand possible, so load into register. 1730 Register scratch = (r1 != Z_R0) ? Z_R0 : Z_R1; 1731 load_const_optimized(scratch, x2); 1732 switch (casenum) { 1733 case 0: z_cr( r1, scratch); break; 1734 case 1: z_clr(r1, scratch); break; 1735 case 2: z_cgr(r1, scratch); break; 1736 case 3: z_clgr(r1, scratch); break; 1737 default: ShouldNotReachHere(); break; 1738 } 1739 } 1740 } 1741 branch_optimized(cond, branch_target); 1742 } 1743 1744 // Generate an optimal compare and branch to the branch target. 1745 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the 1746 // branch distance is short enough. Loading the target address into a 1747 // register and branching via reg is used as fallback only. 1748 // 1749 // Input: 1750 // r1 - left compare operand 1751 // r2 - right compare operand 1752 void MacroAssembler::compare_and_branch_optimized(Register r1, 1753 Register r2, 1754 Assembler::branch_condition cond, 1755 Label& branch_target, 1756 bool len64, 1757 bool has_sign) { 1758 unsigned int casenum = (len64 ? 2 : 0) + (has_sign ? 0 : 1); 1759 1760 if (branch_target.is_bound()) { 1761 address branch_addr = target(branch_target); 1762 compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign); 1763 } else { 1764 if (VM_Version::has_CompareBranch() && branch_target.is_near()) { 1765 switch (casenum) { 1766 case 0: z_crj( r1, r2, cond, branch_target); break; 1767 case 1: z_clrj( r1, r2, cond, branch_target); break; 1768 case 2: z_cgrj( r1, r2, cond, branch_target); break; 1769 case 3: z_clgrj(r1, r2, cond, branch_target); break; 1770 default: ShouldNotReachHere(); break; 1771 } 1772 } else { 1773 switch (casenum) { 1774 case 0: z_cr( r1, r2); break; 1775 case 1: z_clr(r1, r2); break; 1776 case 2: z_cgr(r1, r2); break; 1777 case 3: z_clgr(r1, r2); break; 1778 default: ShouldNotReachHere(); break; 1779 } 1780 branch_optimized(cond, branch_target); 1781 } 1782 } 1783 } 1784 1785 //=========================================================================== 1786 //=== END H I G H E R L E V E L B R A N C H E M I T T E R S === 1787 //=========================================================================== 1788 1789 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { 1790 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1791 int index = oop_recorder()->allocate_metadata_index(obj); 1792 RelocationHolder rspec = metadata_Relocation::spec(index); 1793 return AddressLiteral((address)obj, rspec); 1794 } 1795 1796 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { 1797 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1798 int index = oop_recorder()->find_index(obj); 1799 RelocationHolder rspec = metadata_Relocation::spec(index); 1800 return AddressLiteral((address)obj, rspec); 1801 } 1802 1803 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) { 1804 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1805 int oop_index = oop_recorder()->allocate_oop_index(obj); 1806 return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); 1807 } 1808 1809 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { 1810 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1811 int oop_index = oop_recorder()->find_index(obj); 1812 return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); 1813 } 1814 1815 // NOTE: destroys r 1816 void MacroAssembler::c2bool(Register r, Register t) { 1817 z_lcr(t, r); // t = -r 1818 z_or(r, t); // r = -r OR r 1819 z_srl(r, 31); // Yields 0 if r was 0, 1 otherwise. 1820 } 1821 1822 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 1823 Register tmp, 1824 int offset) { 1825 intptr_t value = *delayed_value_addr; 1826 if (value != 0) { 1827 return RegisterOrConstant(value + offset); 1828 } 1829 1830 BLOCK_COMMENT("delayed_value {"); 1831 // Load indirectly to solve generation ordering problem. 1832 load_absolute_address(tmp, (address) delayed_value_addr); // tmp = a; 1833 z_lg(tmp, 0, tmp); // tmp = *tmp; 1834 1835 #ifdef ASSERT 1836 NearLabel L; 1837 compare64_and_branch(tmp, (intptr_t)0L, Assembler::bcondNotEqual, L); 1838 z_illtrap(); 1839 bind(L); 1840 #endif 1841 1842 if (offset != 0) { 1843 z_agfi(tmp, offset); // tmp = tmp + offset; 1844 } 1845 1846 BLOCK_COMMENT("} delayed_value"); 1847 return RegisterOrConstant(tmp); 1848 } 1849 1850 // Patch instruction `inst' at offset `inst_pos' to refer to `dest_pos' 1851 // and return the resulting instruction. 1852 // Dest_pos and inst_pos are 32 bit only. These parms can only designate 1853 // relative positions. 1854 // Use correct argument types. Do not pre-calculate distance. 1855 unsigned long MacroAssembler::patched_branch(address dest_pos, unsigned long inst, address inst_pos) { 1856 int c = 0; 1857 unsigned long patched_inst = 0; 1858 if (is_call_pcrelative_short(inst) || 1859 is_branch_pcrelative_short(inst) || 1860 is_branchoncount_pcrelative_short(inst) || 1861 is_branchonindex32_pcrelative_short(inst)) { 1862 c = 1; 1863 int m = fmask(15, 0); // simm16(-1, 16, 32); 1864 int v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 32); 1865 patched_inst = (inst & ~m) | v; 1866 } else if (is_compareandbranch_pcrelative_short(inst)) { 1867 c = 2; 1868 long m = fmask(31, 16); // simm16(-1, 16, 48); 1869 long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48); 1870 patched_inst = (inst & ~m) | v; 1871 } else if (is_branchonindex64_pcrelative_short(inst)) { 1872 c = 3; 1873 long m = fmask(31, 16); // simm16(-1, 16, 48); 1874 long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48); 1875 patched_inst = (inst & ~m) | v; 1876 } else if (is_call_pcrelative_long(inst) || is_branch_pcrelative_long(inst)) { 1877 c = 4; 1878 long m = fmask(31, 0); // simm32(-1, 16, 48); 1879 long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48); 1880 patched_inst = (inst & ~m) | v; 1881 } else if (is_pcrelative_long(inst)) { // These are the non-branch pc-relative instructions. 1882 c = 5; 1883 long m = fmask(31, 0); // simm32(-1, 16, 48); 1884 long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48); 1885 patched_inst = (inst & ~m) | v; 1886 } else { 1887 print_dbg_msg(tty, inst, "not a relative branch", 0); 1888 dump_code_range(tty, inst_pos, 32, "not a pcrelative branch"); 1889 ShouldNotReachHere(); 1890 } 1891 1892 long new_off = get_pcrel_offset(patched_inst); 1893 if (new_off != (dest_pos-inst_pos)) { 1894 tty->print_cr("case %d: dest_pos = %p, inst_pos = %p, disp = %ld(%12.12lx)", c, dest_pos, inst_pos, new_off, new_off); 1895 print_dbg_msg(tty, inst, "<- original instruction: branch patching error", 0); 1896 print_dbg_msg(tty, patched_inst, "<- patched instruction: branch patching error", 0); 1897 #ifdef LUCY_DBG 1898 VM_Version::z_SIGSEGV(); 1899 #endif 1900 ShouldNotReachHere(); 1901 } 1902 return patched_inst; 1903 } 1904 1905 // Only called when binding labels (share/vm/asm/assembler.cpp) 1906 // Pass arguments as intended. Do not pre-calculate distance. 1907 void MacroAssembler::pd_patch_instruction(address branch, address target) { 1908 unsigned long stub_inst; 1909 int inst_len = get_instruction(branch, &stub_inst); 1910 1911 set_instruction(branch, patched_branch(target, stub_inst, branch), inst_len); 1912 } 1913 1914 1915 // Extract relative address (aka offset). 1916 // inv_simm16 works for 4-byte instructions only. 1917 // compare and branch instructions are 6-byte and have a 16bit offset "in the middle". 1918 long MacroAssembler::get_pcrel_offset(unsigned long inst) { 1919 1920 if (MacroAssembler::is_pcrelative_short(inst)) { 1921 if (((inst&0xFFFFffff00000000UL) == 0) && ((inst&0x00000000FFFF0000UL) != 0)) { 1922 return RelAddr::inv_pcrel_off16(inv_simm16(inst)); 1923 } else { 1924 return RelAddr::inv_pcrel_off16(inv_simm16_48(inst)); 1925 } 1926 } 1927 1928 if (MacroAssembler::is_pcrelative_long(inst)) { 1929 return RelAddr::inv_pcrel_off32(inv_simm32(inst)); 1930 } 1931 1932 print_dbg_msg(tty, inst, "not a pcrelative instruction", 6); 1933 #ifdef LUCY_DBG 1934 VM_Version::z_SIGSEGV(); 1935 #else 1936 ShouldNotReachHere(); 1937 #endif 1938 return -1; 1939 } 1940 1941 long MacroAssembler::get_pcrel_offset(address pc) { 1942 unsigned long inst; 1943 unsigned int len = get_instruction(pc, &inst); 1944 1945 #ifdef ASSERT 1946 long offset; 1947 if (MacroAssembler::is_pcrelative_short(inst) || MacroAssembler::is_pcrelative_long(inst)) { 1948 offset = get_pcrel_offset(inst); 1949 } else { 1950 offset = -1; 1951 } 1952 1953 if (offset == -1) { 1954 dump_code_range(tty, pc, 32, "not a pcrelative instruction"); 1955 #ifdef LUCY_DBG 1956 VM_Version::z_SIGSEGV(); 1957 #else 1958 ShouldNotReachHere(); 1959 #endif 1960 } 1961 return offset; 1962 #else 1963 return get_pcrel_offset(inst); 1964 #endif // ASSERT 1965 } 1966 1967 // Get target address from pc-relative instructions. 1968 address MacroAssembler::get_target_addr_pcrel(address pc) { 1969 assert(is_pcrelative_long(pc), "not a pcrelative instruction"); 1970 return pc + get_pcrel_offset(pc); 1971 } 1972 1973 // Patch pc relative load address. 1974 void MacroAssembler::patch_target_addr_pcrel(address pc, address con) { 1975 unsigned long inst; 1976 // Offset is +/- 2**32 -> use long. 1977 ptrdiff_t distance = con - pc; 1978 1979 get_instruction(pc, &inst); 1980 1981 if (is_pcrelative_short(inst)) { 1982 *(short *)(pc+2) = RelAddr::pcrel_off16(con, pc); // Instructions are at least 2-byte aligned, no test required. 1983 1984 // Some extra safety net. 1985 if (!RelAddr::is_in_range_of_RelAddr16(distance)) { 1986 print_dbg_msg(tty, inst, "distance out of range (16bit)", 4); 1987 dump_code_range(tty, pc, 32, "distance out of range (16bit)"); 1988 guarantee(RelAddr::is_in_range_of_RelAddr16(distance), "too far away (more than +/- 2**16"); 1989 } 1990 return; 1991 } 1992 1993 if (is_pcrelative_long(inst)) { 1994 *(int *)(pc+2) = RelAddr::pcrel_off32(con, pc); 1995 1996 // Some Extra safety net. 1997 if (!RelAddr::is_in_range_of_RelAddr32(distance)) { 1998 print_dbg_msg(tty, inst, "distance out of range (32bit)", 6); 1999 dump_code_range(tty, pc, 32, "distance out of range (32bit)"); 2000 guarantee(RelAddr::is_in_range_of_RelAddr32(distance), "too far away (more than +/- 2**32"); 2001 } 2002 return; 2003 } 2004 2005 guarantee(false, "not a pcrelative instruction to patch!"); 2006 } 2007 2008 // "Current PC" here means the address just behind the basr instruction. 2009 address MacroAssembler::get_PC(Register result) { 2010 z_basr(result, Z_R0); // Don't branch, just save next instruction address in result. 2011 return pc(); 2012 } 2013 2014 // Get current PC + offset. 2015 // Offset given in bytes, must be even! 2016 // "Current PC" here means the address of the larl instruction plus the given offset. 2017 address MacroAssembler::get_PC(Register result, int64_t offset) { 2018 address here = pc(); 2019 z_larl(result, offset/2); // Save target instruction address in result. 2020 return here + offset; 2021 } 2022 2023 void MacroAssembler::instr_size(Register size, Register pc) { 2024 // Extract 2 most significant bits of current instruction. 2025 z_llgc(size, Address(pc)); 2026 z_srl(size, 6); 2027 // Compute (x+3)&6 which translates 0->2, 1->4, 2->4, 3->6. 2028 z_ahi(size, 3); 2029 z_nill(size, 6); 2030 } 2031 2032 // Resize_frame with SP(new) = SP(old) - [offset]. 2033 void MacroAssembler::resize_frame_sub(Register offset, Register fp, bool load_fp) 2034 { 2035 assert_different_registers(offset, fp, Z_SP); 2036 if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); } 2037 2038 z_sgr(Z_SP, offset); 2039 z_stg(fp, _z_abi(callers_sp), Z_SP); 2040 } 2041 2042 // Resize_frame with SP(new) = [newSP] + offset. 2043 // This emitter is useful if we already have calculated a pointer 2044 // into the to-be-allocated stack space, e.g. with special alignment properties, 2045 // but need some additional space, e.g. for spilling. 2046 // newSP is the pre-calculated pointer. It must not be modified. 2047 // fp holds, or is filled with, the frame pointer. 2048 // offset is the additional increment which is added to addr to form the new SP. 2049 // Note: specify a negative value to reserve more space! 2050 // load_fp == true only indicates that fp is not pre-filled with the frame pointer. 2051 // It does not guarantee that fp contains the frame pointer at the end. 2052 void MacroAssembler::resize_frame_abs_with_offset(Register newSP, Register fp, int offset, bool load_fp) { 2053 assert_different_registers(newSP, fp, Z_SP); 2054 2055 if (load_fp) { 2056 z_lg(fp, _z_abi(callers_sp), Z_SP); 2057 } 2058 2059 add2reg(Z_SP, offset, newSP); 2060 z_stg(fp, _z_abi(callers_sp), Z_SP); 2061 } 2062 2063 // Resize_frame with SP(new) = [newSP]. 2064 // load_fp == true only indicates that fp is not pre-filled with the frame pointer. 2065 // It does not guarantee that fp contains the frame pointer at the end. 2066 void MacroAssembler::resize_frame_absolute(Register newSP, Register fp, bool load_fp) { 2067 assert_different_registers(newSP, fp, Z_SP); 2068 2069 if (load_fp) { 2070 z_lg(fp, _z_abi(callers_sp), Z_SP); // need to use load/store. 2071 } 2072 2073 z_lgr(Z_SP, newSP); 2074 if (newSP != Z_R0) { // make sure we generate correct code, no matter what register newSP uses. 2075 z_stg(fp, _z_abi(callers_sp), newSP); 2076 } else { 2077 z_stg(fp, _z_abi(callers_sp), Z_SP); 2078 } 2079 } 2080 2081 // Resize_frame with SP(new) = SP(old) + offset. 2082 void MacroAssembler::resize_frame(RegisterOrConstant offset, Register fp, bool load_fp) { 2083 assert_different_registers(fp, Z_SP); 2084 2085 if (load_fp) { 2086 z_lg(fp, _z_abi(callers_sp), Z_SP); 2087 } 2088 add64(Z_SP, offset); 2089 z_stg(fp, _z_abi(callers_sp), Z_SP); 2090 } 2091 2092 void MacroAssembler::push_frame(Register bytes, Register old_sp, bool copy_sp, bool bytes_with_inverted_sign) { 2093 #ifdef ASSERT 2094 assert_different_registers(bytes, old_sp, Z_SP); 2095 if (!copy_sp) { 2096 z_cgr(old_sp, Z_SP); 2097 asm_assert_eq("[old_sp]!=[Z_SP]", 0x211); 2098 } 2099 #endif 2100 if (copy_sp) { z_lgr(old_sp, Z_SP); } 2101 if (bytes_with_inverted_sign) { 2102 z_agr(Z_SP, bytes); 2103 } else { 2104 z_sgr(Z_SP, bytes); // Z_sgfr sufficient, but probably not faster. 2105 } 2106 z_stg(old_sp, _z_abi(callers_sp), Z_SP); 2107 } 2108 2109 unsigned int MacroAssembler::push_frame(unsigned int bytes, Register scratch) { 2110 long offset = Assembler::align(bytes, frame::alignment_in_bytes); 2111 assert(offset > 0, "should push a frame with positive size, size = %ld.", offset); 2112 assert(Displacement::is_validDisp(-offset), "frame size out of range, size = %ld", offset); 2113 2114 // We must not write outside the current stack bounds (given by Z_SP). 2115 // Thus, we have to first update Z_SP and then store the previous SP as stack linkage. 2116 // We rely on Z_R0 by default to be available as scratch. 2117 z_lgr(scratch, Z_SP); 2118 add2reg(Z_SP, -offset); 2119 z_stg(scratch, _z_abi(callers_sp), Z_SP); 2120 #ifdef ASSERT 2121 // Just make sure nobody uses the value in the default scratch register. 2122 // When another register is used, the caller might rely on it containing the frame pointer. 2123 if (scratch == Z_R0) { 2124 z_iihf(scratch, 0xbaadbabe); 2125 z_iilf(scratch, 0xdeadbeef); 2126 } 2127 #endif 2128 return offset; 2129 } 2130 2131 // Push a frame of size `bytes' plus abi160 on top. 2132 unsigned int MacroAssembler::push_frame_abi160(unsigned int bytes) { 2133 BLOCK_COMMENT("push_frame_abi160 {"); 2134 unsigned int res = push_frame(bytes + frame::z_abi_160_size); 2135 BLOCK_COMMENT("} push_frame_abi160"); 2136 return res; 2137 } 2138 2139 // Pop current C frame. 2140 void MacroAssembler::pop_frame() { 2141 BLOCK_COMMENT("pop_frame:"); 2142 Assembler::z_lg(Z_SP, _z_abi(callers_sp), Z_SP); 2143 } 2144 2145 // Pop current C frame and restore return PC register (Z_R14). 2146 void MacroAssembler::pop_frame_restore_retPC(int frame_size_in_bytes) { 2147 BLOCK_COMMENT("pop_frame_restore_retPC:"); 2148 int retPC_offset = _z_abi16(return_pc) + frame_size_in_bytes; 2149 // If possible, pop frame by add instead of load (a penny saved is a penny got :-). 2150 if (Displacement::is_validDisp(retPC_offset)) { 2151 z_lg(Z_R14, retPC_offset, Z_SP); 2152 add2reg(Z_SP, frame_size_in_bytes); 2153 } else { 2154 add2reg(Z_SP, frame_size_in_bytes); 2155 restore_return_pc(); 2156 } 2157 } 2158 2159 void MacroAssembler::call_VM_leaf_base(address entry_point, bool allow_relocation) { 2160 if (allow_relocation) { 2161 call_c(entry_point); 2162 } else { 2163 call_c_static(entry_point); 2164 } 2165 } 2166 2167 void MacroAssembler::call_VM_leaf_base(address entry_point) { 2168 bool allow_relocation = true; 2169 call_VM_leaf_base(entry_point, allow_relocation); 2170 } 2171 2172 void MacroAssembler::call_VM_base(Register oop_result, 2173 Register last_java_sp, 2174 address entry_point, 2175 bool allow_relocation, 2176 bool check_exceptions) { // Defaults to true. 2177 // Allow_relocation indicates, if true, that the generated code shall 2178 // be fit for code relocation or referenced data relocation. In other 2179 // words: all addresses must be considered variable. PC-relative addressing 2180 // is not possible then. 2181 // On the other hand, if (allow_relocation == false), addresses and offsets 2182 // may be considered stable, enabling us to take advantage of some PC-relative 2183 // addressing tweaks. These might improve performance and reduce code size. 2184 2185 // Determine last_java_sp register. 2186 if (!last_java_sp->is_valid()) { 2187 last_java_sp = Z_SP; // Load Z_SP as SP. 2188 } 2189 2190 set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, Z_R1, allow_relocation); 2191 2192 // ARG1 must hold thread address. 2193 z_lgr(Z_ARG1, Z_thread); 2194 2195 address return_pc = NULL; 2196 if (allow_relocation) { 2197 return_pc = call_c(entry_point); 2198 } else { 2199 return_pc = call_c_static(entry_point); 2200 } 2201 2202 reset_last_Java_frame(allow_relocation); 2203 2204 // C++ interp handles this in the interpreter. 2205 check_and_handle_popframe(Z_thread); 2206 check_and_handle_earlyret(Z_thread); 2207 2208 // Check for pending exceptions. 2209 if (check_exceptions) { 2210 // Check for pending exceptions (java_thread is set upon return). 2211 load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset())); 2212 2213 // This used to conditionally jump to forward_exception however it is 2214 // possible if we relocate that the branch will not reach. So we must jump 2215 // around so we can always reach. 2216 2217 Label ok; 2218 z_bre(ok); // Bcondequal is the same as bcondZero. 2219 call_stub(StubRoutines::forward_exception_entry()); 2220 bind(ok); 2221 } 2222 2223 // Get oop result if there is one and reset the value in the thread. 2224 if (oop_result->is_valid()) { 2225 get_vm_result(oop_result); 2226 } 2227 2228 _last_calls_return_pc = return_pc; // Wipe out other (error handling) calls. 2229 } 2230 2231 void MacroAssembler::call_VM_base(Register oop_result, 2232 Register last_java_sp, 2233 address entry_point, 2234 bool check_exceptions) { // Defaults to true. 2235 bool allow_relocation = true; 2236 call_VM_base(oop_result, last_java_sp, entry_point, allow_relocation, check_exceptions); 2237 } 2238 2239 // VM calls without explicit last_java_sp. 2240 2241 void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) { 2242 // Call takes possible detour via InterpreterMacroAssembler. 2243 call_VM_base(oop_result, noreg, entry_point, true, check_exceptions); 2244 } 2245 2246 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 2247 // Z_ARG1 is reserved for the thread. 2248 lgr_if_needed(Z_ARG2, arg_1); 2249 call_VM(oop_result, entry_point, check_exceptions); 2250 } 2251 2252 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 2253 // Z_ARG1 is reserved for the thread. 2254 lgr_if_needed(Z_ARG2, arg_1); 2255 assert(arg_2 != Z_ARG2, "smashed argument"); 2256 lgr_if_needed(Z_ARG3, arg_2); 2257 call_VM(oop_result, entry_point, check_exceptions); 2258 } 2259 2260 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, 2261 Register arg_3, bool check_exceptions) { 2262 // Z_ARG1 is reserved for the thread. 2263 lgr_if_needed(Z_ARG2, arg_1); 2264 assert(arg_2 != Z_ARG2, "smashed argument"); 2265 lgr_if_needed(Z_ARG3, arg_2); 2266 assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument"); 2267 lgr_if_needed(Z_ARG4, arg_3); 2268 call_VM(oop_result, entry_point, check_exceptions); 2269 } 2270 2271 // VM static calls without explicit last_java_sp. 2272 2273 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, bool check_exceptions) { 2274 // Call takes possible detour via InterpreterMacroAssembler. 2275 call_VM_base(oop_result, noreg, entry_point, false, check_exceptions); 2276 } 2277 2278 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2, 2279 Register arg_3, bool check_exceptions) { 2280 // Z_ARG1 is reserved for the thread. 2281 lgr_if_needed(Z_ARG2, arg_1); 2282 assert(arg_2 != Z_ARG2, "smashed argument"); 2283 lgr_if_needed(Z_ARG3, arg_2); 2284 assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument"); 2285 lgr_if_needed(Z_ARG4, arg_3); 2286 call_VM_static(oop_result, entry_point, check_exceptions); 2287 } 2288 2289 // VM calls with explicit last_java_sp. 2290 2291 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions) { 2292 // Call takes possible detour via InterpreterMacroAssembler. 2293 call_VM_base(oop_result, last_java_sp, entry_point, true, check_exceptions); 2294 } 2295 2296 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { 2297 // Z_ARG1 is reserved for the thread. 2298 lgr_if_needed(Z_ARG2, arg_1); 2299 call_VM(oop_result, last_java_sp, entry_point, check_exceptions); 2300 } 2301 2302 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, 2303 Register arg_2, bool check_exceptions) { 2304 // Z_ARG1 is reserved for the thread. 2305 lgr_if_needed(Z_ARG2, arg_1); 2306 assert(arg_2 != Z_ARG2, "smashed argument"); 2307 lgr_if_needed(Z_ARG3, arg_2); 2308 call_VM(oop_result, last_java_sp, entry_point, check_exceptions); 2309 } 2310 2311 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, 2312 Register arg_2, Register arg_3, bool check_exceptions) { 2313 // Z_ARG1 is reserved for the thread. 2314 lgr_if_needed(Z_ARG2, arg_1); 2315 assert(arg_2 != Z_ARG2, "smashed argument"); 2316 lgr_if_needed(Z_ARG3, arg_2); 2317 assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument"); 2318 lgr_if_needed(Z_ARG4, arg_3); 2319 call_VM(oop_result, last_java_sp, entry_point, check_exceptions); 2320 } 2321 2322 // VM leaf calls. 2323 2324 void MacroAssembler::call_VM_leaf(address entry_point) { 2325 // Call takes possible detour via InterpreterMacroAssembler. 2326 call_VM_leaf_base(entry_point, true); 2327 } 2328 2329 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) { 2330 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2331 call_VM_leaf(entry_point); 2332 } 2333 2334 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) { 2335 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2336 assert(arg_2 != Z_ARG1, "smashed argument"); 2337 if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); 2338 call_VM_leaf(entry_point); 2339 } 2340 2341 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) { 2342 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2343 assert(arg_2 != Z_ARG1, "smashed argument"); 2344 if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); 2345 assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument"); 2346 if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3); 2347 call_VM_leaf(entry_point); 2348 } 2349 2350 // Static VM leaf calls. 2351 // Really static VM leaf calls are never patched. 2352 2353 void MacroAssembler::call_VM_leaf_static(address entry_point) { 2354 // Call takes possible detour via InterpreterMacroAssembler. 2355 call_VM_leaf_base(entry_point, false); 2356 } 2357 2358 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1) { 2359 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2360 call_VM_leaf_static(entry_point); 2361 } 2362 2363 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2) { 2364 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2365 assert(arg_2 != Z_ARG1, "smashed argument"); 2366 if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); 2367 call_VM_leaf_static(entry_point); 2368 } 2369 2370 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3) { 2371 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2372 assert(arg_2 != Z_ARG1, "smashed argument"); 2373 if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); 2374 assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument"); 2375 if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3); 2376 call_VM_leaf_static(entry_point); 2377 } 2378 2379 // Don't use detour via call_c(reg). 2380 address MacroAssembler::call_c(address function_entry) { 2381 load_const(Z_R1, function_entry); 2382 return call(Z_R1); 2383 } 2384 2385 // Variant for really static (non-relocatable) calls which are never patched. 2386 address MacroAssembler::call_c_static(address function_entry) { 2387 load_absolute_address(Z_R1, function_entry); 2388 #if 0 // def ASSERT 2389 // Verify that call site did not move. 2390 load_const_optimized(Z_R0, function_entry); 2391 z_cgr(Z_R1, Z_R0); 2392 z_brc(bcondEqual, 3); 2393 z_illtrap(0xba); 2394 #endif 2395 return call(Z_R1); 2396 } 2397 2398 address MacroAssembler::call_c_opt(address function_entry) { 2399 bool success = call_far_patchable(function_entry, -2 /* emit relocation + constant */); 2400 _last_calls_return_pc = success ? pc() : NULL; 2401 return _last_calls_return_pc; 2402 } 2403 2404 // Identify a call_far_patchable instruction: LARL + LG + BASR 2405 // 2406 // nop ; optionally, if required for alignment 2407 // lgrl rx,A(TOC entry) ; PC-relative access into constant pool 2408 // basr Z_R14,rx ; end of this instruction must be aligned to a word boundary 2409 // 2410 // Code pattern will eventually get patched into variant2 (see below for detection code). 2411 // 2412 bool MacroAssembler::is_call_far_patchable_variant0_at(address instruction_addr) { 2413 address iaddr = instruction_addr; 2414 2415 // Check for the actual load instruction. 2416 if (!is_load_const_from_toc(iaddr)) { return false; } 2417 iaddr += load_const_from_toc_size(); 2418 2419 // Check for the call (BASR) instruction, finally. 2420 assert(iaddr-instruction_addr+call_byregister_size() == call_far_patchable_size(), "size mismatch"); 2421 return is_call_byregister(iaddr); 2422 } 2423 2424 // Identify a call_far_patchable instruction: BRASL 2425 // 2426 // Code pattern to suits atomic patching: 2427 // nop ; Optionally, if required for alignment. 2428 // nop ... ; Multiple filler nops to compensate for size difference (variant0 is longer). 2429 // nop ; For code pattern detection: Prepend each BRASL with a nop. 2430 // brasl Z_R14,<reladdr> ; End of code must be 4-byte aligned ! 2431 bool MacroAssembler::is_call_far_patchable_variant2_at(address instruction_addr) { 2432 const address call_addr = (address)((intptr_t)instruction_addr + call_far_patchable_size() - call_far_pcrelative_size()); 2433 2434 // Check for correct number of leading nops. 2435 address iaddr; 2436 for (iaddr = instruction_addr; iaddr < call_addr; iaddr += nop_size()) { 2437 if (!is_z_nop(iaddr)) { return false; } 2438 } 2439 assert(iaddr == call_addr, "sanity"); 2440 2441 // --> Check for call instruction. 2442 if (is_call_far_pcrelative(call_addr)) { 2443 assert(call_addr-instruction_addr+call_far_pcrelative_size() == call_far_patchable_size(), "size mismatch"); 2444 return true; 2445 } 2446 2447 return false; 2448 } 2449 2450 // Emit a NOT mt-safely patchable 64 bit absolute call. 2451 // If toc_offset == -2, then the destination of the call (= target) is emitted 2452 // to the constant pool and a runtime_call relocation is added 2453 // to the code buffer. 2454 // If toc_offset != -2, target must already be in the constant pool at 2455 // _ctableStart+toc_offset (a caller can retrieve toc_offset 2456 // from the runtime_call relocation). 2457 // Special handling of emitting to scratch buffer when there is no constant pool. 2458 // Slightly changed code pattern. We emit an additional nop if we would 2459 // not end emitting at a word aligned address. This is to ensure 2460 // an atomically patchable displacement in brasl instructions. 2461 // 2462 // A call_far_patchable comes in different flavors: 2463 // - LARL(CP) / LG(CP) / BR (address in constant pool, access via CP register) 2464 // - LGRL(CP) / BR (address in constant pool, pc-relative accesss) 2465 // - BRASL (relative address of call target coded in instruction) 2466 // All flavors occupy the same amount of space. Length differences are compensated 2467 // by leading nops, such that the instruction sequence always ends at the same 2468 // byte offset. This is required to keep the return offset constant. 2469 // Furthermore, the return address (the end of the instruction sequence) is forced 2470 // to be on a 4-byte boundary. This is required for atomic patching, should we ever 2471 // need to patch the call target of the BRASL flavor. 2472 // RETURN value: false, if no constant pool entry could be allocated, true otherwise. 2473 bool MacroAssembler::call_far_patchable(address target, int64_t tocOffset) { 2474 // Get current pc and ensure word alignment for end of instr sequence. 2475 const address start_pc = pc(); 2476 const intptr_t start_off = offset(); 2477 assert(!call_far_patchable_requires_alignment_nop(start_pc), "call_far_patchable requires aligned address"); 2478 const ptrdiff_t dist = (ptrdiff_t)(target - (start_pc + 2)); // Prepend each BRASL with a nop. 2479 const bool emit_target_to_pool = (tocOffset == -2) && !code_section()->scratch_emit(); 2480 const bool emit_relative_call = !emit_target_to_pool && 2481 RelAddr::is_in_range_of_RelAddr32(dist) && 2482 ReoptimizeCallSequences && 2483 !code_section()->scratch_emit(); 2484 2485 if (emit_relative_call) { 2486 // Add padding to get the same size as below. 2487 const unsigned int padding = call_far_patchable_size() - call_far_pcrelative_size(); 2488 unsigned int current_padding; 2489 for (current_padding = 0; current_padding < padding; current_padding += nop_size()) { z_nop(); } 2490 assert(current_padding == padding, "sanity"); 2491 2492 // relative call: len = 2(nop) + 6 (brasl) 2493 // CodeBlob resize cannot occur in this case because 2494 // this call is emitted into pre-existing space. 2495 z_nop(); // Prepend each BRASL with a nop. 2496 z_brasl(Z_R14, target); 2497 } else { 2498 // absolute call: Get address from TOC. 2499 // len = (load TOC){6|0} + (load from TOC){6} + (basr){2} = {14|8} 2500 if (emit_target_to_pool) { 2501 // When emitting the call for the first time, we do not need to use 2502 // the pc-relative version. It will be patched anyway, when the code 2503 // buffer is copied. 2504 // Relocation is not needed when !ReoptimizeCallSequences. 2505 relocInfo::relocType rt = ReoptimizeCallSequences ? relocInfo::runtime_call_w_cp_type : relocInfo::none; 2506 AddressLiteral dest(target, rt); 2507 // Store_oop_in_toc() adds dest to the constant table. As side effect, this kills 2508 // inst_mark(). Reset if possible. 2509 bool reset_mark = (inst_mark() == pc()); 2510 tocOffset = store_oop_in_toc(dest); 2511 if (reset_mark) { set_inst_mark(); } 2512 if (tocOffset == -1) { 2513 return false; // Couldn't create constant pool entry. 2514 } 2515 } 2516 assert(offset() == start_off, "emit no code before this point!"); 2517 2518 address tocPos = pc() + tocOffset; 2519 if (emit_target_to_pool) { 2520 tocPos = code()->consts()->start() + tocOffset; 2521 } 2522 load_long_pcrelative(Z_R14, tocPos); 2523 z_basr(Z_R14, Z_R14); 2524 } 2525 2526 #ifdef ASSERT 2527 // Assert that we can identify the emitted call. 2528 assert(is_call_far_patchable_at(addr_at(start_off)), "can't identify emitted call"); 2529 assert(offset() == start_off+call_far_patchable_size(), "wrong size"); 2530 2531 if (emit_target_to_pool) { 2532 assert(get_dest_of_call_far_patchable_at(addr_at(start_off), code()->consts()->start()) == target, 2533 "wrong encoding of dest address"); 2534 } 2535 #endif 2536 return true; // success 2537 } 2538 2539 // Identify a call_far_patchable instruction. 2540 // For more detailed information see header comment of call_far_patchable. 2541 bool MacroAssembler::is_call_far_patchable_at(address instruction_addr) { 2542 return is_call_far_patchable_variant2_at(instruction_addr) || // short version: BRASL 2543 is_call_far_patchable_variant0_at(instruction_addr); // long version LARL + LG + BASR 2544 } 2545 2546 // Does the call_far_patchable instruction use a pc-relative encoding 2547 // of the call destination? 2548 bool MacroAssembler::is_call_far_patchable_pcrelative_at(address instruction_addr) { 2549 // Variant 2 is pc-relative. 2550 return is_call_far_patchable_variant2_at(instruction_addr); 2551 } 2552 2553 bool MacroAssembler::is_call_far_pcrelative(address instruction_addr) { 2554 // Prepend each BRASL with a nop. 2555 return is_z_nop(instruction_addr) && is_z_brasl(instruction_addr + nop_size()); // Match at position after one nop required. 2556 } 2557 2558 // Set destination address of a call_far_patchable instruction. 2559 void MacroAssembler::set_dest_of_call_far_patchable_at(address instruction_addr, address dest, int64_t tocOffset) { 2560 ResourceMark rm; 2561 2562 // Now that CP entry is verified, patch call to a pc-relative call (if circumstances permit). 2563 int code_size = MacroAssembler::call_far_patchable_size(); 2564 CodeBuffer buf(instruction_addr, code_size); 2565 MacroAssembler masm(&buf); 2566 masm.call_far_patchable(dest, tocOffset); 2567 ICache::invalidate_range(instruction_addr, code_size); // Empty on z. 2568 } 2569 2570 // Get dest address of a call_far_patchable instruction. 2571 address MacroAssembler::get_dest_of_call_far_patchable_at(address instruction_addr, address ctable) { 2572 // Dynamic TOC: absolute address in constant pool. 2573 // Check variant2 first, it is more frequent. 2574 2575 // Relative address encoded in call instruction. 2576 if (is_call_far_patchable_variant2_at(instruction_addr)) { 2577 return MacroAssembler::get_target_addr_pcrel(instruction_addr + nop_size()); // Prepend each BRASL with a nop. 2578 2579 // Absolute address in constant pool. 2580 } else if (is_call_far_patchable_variant0_at(instruction_addr)) { 2581 address iaddr = instruction_addr; 2582 2583 long tocOffset = get_load_const_from_toc_offset(iaddr); 2584 address tocLoc = iaddr + tocOffset; 2585 return *(address *)(tocLoc); 2586 } else { 2587 fprintf(stderr, "MacroAssembler::get_dest_of_call_far_patchable_at has a problem at %p:\n", instruction_addr); 2588 fprintf(stderr, "not a call_far_patchable: %16.16lx %16.16lx, len = %d\n", 2589 *(unsigned long*)instruction_addr, 2590 *(unsigned long*)(instruction_addr+8), 2591 call_far_patchable_size()); 2592 Disassembler::decode(instruction_addr, instruction_addr+call_far_patchable_size()); 2593 ShouldNotReachHere(); 2594 return NULL; 2595 } 2596 } 2597 2598 void MacroAssembler::align_call_far_patchable(address pc) { 2599 if (call_far_patchable_requires_alignment_nop(pc)) { z_nop(); } 2600 } 2601 2602 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { 2603 } 2604 2605 void MacroAssembler::check_and_handle_popframe(Register java_thread) { 2606 } 2607 2608 // Read from the polling page. 2609 // Use TM or TMY instruction, depending on read offset. 2610 // offset = 0: Use TM, safepoint polling. 2611 // offset < 0: Use TMY, profiling safepoint polling. 2612 void MacroAssembler::load_from_polling_page(Register polling_page_address, int64_t offset) { 2613 if (Immediate::is_uimm12(offset)) { 2614 z_tm(offset, polling_page_address, mask_safepoint); 2615 } else { 2616 z_tmy(offset, polling_page_address, mask_profiling); 2617 } 2618 } 2619 2620 // Check whether z_instruction is a read access to the polling page 2621 // which was emitted by load_from_polling_page(..). 2622 bool MacroAssembler::is_load_from_polling_page(address instr_loc) { 2623 unsigned long z_instruction; 2624 unsigned int ilen = get_instruction(instr_loc, &z_instruction); 2625 2626 if (ilen == 2) { return false; } // It's none of the allowed instructions. 2627 2628 if (ilen == 4) { 2629 if (!is_z_tm(z_instruction)) { return false; } // It's len=4, but not a z_tm. fail. 2630 2631 int ms = inv_mask(z_instruction,8,32); // mask 2632 int ra = inv_reg(z_instruction,16,32); // base register 2633 int ds = inv_uimm12(z_instruction); // displacement 2634 2635 if (!(ds == 0 && ra != 0 && ms == mask_safepoint)) { 2636 return false; // It's not a z_tm(0, ra, mask_safepoint). Fail. 2637 } 2638 2639 } else { /* if (ilen == 6) */ 2640 2641 assert(!is_z_lg(z_instruction), "old form (LG) polling page access. Please fix and use TM(Y)."); 2642 2643 if (!is_z_tmy(z_instruction)) { return false; } // It's len=6, but not a z_tmy. fail. 2644 2645 int ms = inv_mask(z_instruction,8,48); // mask 2646 int ra = inv_reg(z_instruction,16,48); // base register 2647 int ds = inv_simm20(z_instruction); // displacement 2648 } 2649 2650 return true; 2651 } 2652 2653 // Extract poll address from instruction and ucontext. 2654 address MacroAssembler::get_poll_address(address instr_loc, void* ucontext) { 2655 assert(ucontext != NULL, "must have ucontext"); 2656 ucontext_t* uc = (ucontext_t*) ucontext; 2657 unsigned long z_instruction; 2658 unsigned int ilen = get_instruction(instr_loc, &z_instruction); 2659 2660 if (ilen == 4 && is_z_tm(z_instruction)) { 2661 int ra = inv_reg(z_instruction, 16, 32); // base register 2662 int ds = inv_uimm12(z_instruction); // displacement 2663 address addr = (address)uc->uc_mcontext.gregs[ra]; 2664 return addr + ds; 2665 } else if (ilen == 6 && is_z_tmy(z_instruction)) { 2666 int ra = inv_reg(z_instruction, 16, 48); // base register 2667 int ds = inv_simm20(z_instruction); // displacement 2668 address addr = (address)uc->uc_mcontext.gregs[ra]; 2669 return addr + ds; 2670 } 2671 2672 ShouldNotReachHere(); 2673 return NULL; 2674 } 2675 2676 // Extract poll register from instruction. 2677 uint MacroAssembler::get_poll_register(address instr_loc) { 2678 unsigned long z_instruction; 2679 unsigned int ilen = get_instruction(instr_loc, &z_instruction); 2680 2681 if (ilen == 4 && is_z_tm(z_instruction)) { 2682 return (uint)inv_reg(z_instruction, 16, 32); // base register 2683 } else if (ilen == 6 && is_z_tmy(z_instruction)) { 2684 return (uint)inv_reg(z_instruction, 16, 48); // base register 2685 } 2686 2687 ShouldNotReachHere(); 2688 return 0; 2689 } 2690 2691 bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) { 2692 ShouldNotCallThis(); 2693 return false; 2694 } 2695 2696 // Write serialization page so VM thread can do a pseudo remote membar 2697 // We use the current thread pointer to calculate a thread specific 2698 // offset to write to within the page. This minimizes bus traffic 2699 // due to cache line collision. 2700 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { 2701 assert_different_registers(tmp1, tmp2); 2702 z_sllg(tmp2, thread, os::get_serialize_page_shift_count()); 2703 load_const_optimized(tmp1, (long) os::get_memory_serialize_page()); 2704 2705 int mask = os::get_serialize_page_mask(); 2706 if (Immediate::is_uimm16(mask)) { 2707 z_nill(tmp2, mask); 2708 z_llghr(tmp2, tmp2); 2709 } else { 2710 z_nilf(tmp2, mask); 2711 z_llgfr(tmp2, tmp2); 2712 } 2713 2714 z_release(); 2715 z_st(Z_R0, 0, tmp2, tmp1); 2716 } 2717 2718 void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) { 2719 if (SafepointMechanism::uses_thread_local_poll()) { 2720 const Address poll_byte_addr(Z_thread, in_bytes(Thread::polling_page_offset()) + 7 /* Big Endian */); 2721 // Armed page has poll_bit set. 2722 z_tm(poll_byte_addr, SafepointMechanism::poll_bit()); 2723 z_brnaz(slow_path); 2724 } else { 2725 load_const_optimized(temp_reg, SafepointSynchronize::address_of_state()); 2726 z_cli(/*SafepointSynchronize::sz_state()*/4-1, temp_reg, SafepointSynchronize::_not_synchronized); 2727 z_brne(slow_path); 2728 } 2729 } 2730 2731 // Don't rely on register locking, always use Z_R1 as scratch register instead. 2732 void MacroAssembler::bang_stack_with_offset(int offset) { 2733 // Stack grows down, caller passes positive offset. 2734 assert(offset > 0, "must bang with positive offset"); 2735 if (Displacement::is_validDisp(-offset)) { 2736 z_tmy(-offset, Z_SP, mask_stackbang); 2737 } else { 2738 add2reg(Z_R1, -offset, Z_SP); // Do not destroy Z_SP!!! 2739 z_tm(0, Z_R1, mask_stackbang); // Just banging. 2740 } 2741 } 2742 2743 void MacroAssembler::reserved_stack_check(Register return_pc) { 2744 // Test if reserved zone needs to be enabled. 2745 Label no_reserved_zone_enabling; 2746 assert(return_pc == Z_R14, "Return pc must be in R14 before z_br() to StackOverflow stub."); 2747 BLOCK_COMMENT("reserved_stack_check {"); 2748 2749 z_clg(Z_SP, Address(Z_thread, JavaThread::reserved_stack_activation_offset())); 2750 z_brl(no_reserved_zone_enabling); 2751 2752 // Enable reserved zone again, throw stack overflow exception. 2753 save_return_pc(); 2754 push_frame_abi160(0); 2755 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), Z_thread); 2756 pop_frame(); 2757 restore_return_pc(); 2758 2759 load_const_optimized(Z_R1, StubRoutines::throw_delayed_StackOverflowError_entry()); 2760 // Don't use call() or z_basr(), they will invalidate Z_R14 which contains the return pc. 2761 z_br(Z_R1); 2762 2763 should_not_reach_here(); 2764 2765 bind(no_reserved_zone_enabling); 2766 BLOCK_COMMENT("} reserved_stack_check"); 2767 } 2768 2769 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 2770 void MacroAssembler::tlab_allocate(Register obj, 2771 Register var_size_in_bytes, 2772 int con_size_in_bytes, 2773 Register t1, 2774 Label& slow_case) { 2775 assert_different_registers(obj, var_size_in_bytes, t1); 2776 Register end = t1; 2777 Register thread = Z_thread; 2778 2779 z_lg(obj, Address(thread, JavaThread::tlab_top_offset())); 2780 if (var_size_in_bytes == noreg) { 2781 z_lay(end, Address(obj, con_size_in_bytes)); 2782 } else { 2783 z_lay(end, Address(obj, var_size_in_bytes)); 2784 } 2785 z_cg(end, Address(thread, JavaThread::tlab_end_offset())); 2786 branch_optimized(bcondHigh, slow_case); 2787 2788 // Update the tlab top pointer. 2789 z_stg(end, Address(thread, JavaThread::tlab_top_offset())); 2790 2791 // Recover var_size_in_bytes if necessary. 2792 if (var_size_in_bytes == end) { 2793 z_sgr(var_size_in_bytes, obj); 2794 } 2795 } 2796 2797 // Emitter for interface method lookup. 2798 // input: recv_klass, intf_klass, itable_index 2799 // output: method_result 2800 // kills: itable_index, temp1_reg, Z_R0, Z_R1 2801 // TODO: Temp2_reg is unused. we may use this emitter also in the itable stubs. 2802 // If the register is still not needed then, remove it. 2803 void MacroAssembler::lookup_interface_method(Register recv_klass, 2804 Register intf_klass, 2805 RegisterOrConstant itable_index, 2806 Register method_result, 2807 Register temp1_reg, 2808 Label& no_such_interface, 2809 bool return_method) { 2810 2811 const Register vtable_len = temp1_reg; // Used to compute itable_entry_addr. 2812 const Register itable_entry_addr = Z_R1_scratch; 2813 const Register itable_interface = Z_R0_scratch; 2814 2815 BLOCK_COMMENT("lookup_interface_method {"); 2816 2817 // Load start of itable entries into itable_entry_addr. 2818 z_llgf(vtable_len, Address(recv_klass, Klass::vtable_length_offset())); 2819 z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes())); 2820 2821 // Loop over all itable entries until desired interfaceOop(Rinterface) found. 2822 const int vtable_base_offset = in_bytes(Klass::vtable_start_offset()); 2823 2824 add2reg_with_index(itable_entry_addr, 2825 vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(), 2826 recv_klass, vtable_len); 2827 2828 const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize; 2829 Label search; 2830 2831 bind(search); 2832 2833 // Handle IncompatibleClassChangeError. 2834 // If the entry is NULL then we've reached the end of the table 2835 // without finding the expected interface, so throw an exception. 2836 load_and_test_long(itable_interface, Address(itable_entry_addr)); 2837 z_bre(no_such_interface); 2838 2839 add2reg(itable_entry_addr, itable_offset_search_inc); 2840 z_cgr(itable_interface, intf_klass); 2841 z_brne(search); 2842 2843 // Entry found and itable_entry_addr points to it, get offset of vtable for interface. 2844 if (return_method) { 2845 const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() - 2846 itableOffsetEntry::interface_offset_in_bytes()) - 2847 itable_offset_search_inc; 2848 2849 // Compute itableMethodEntry and get method and entry point 2850 // we use addressing with index and displacement, since the formula 2851 // for computing the entry's offset has a fixed and a dynamic part, 2852 // the latter depending on the matched interface entry and on the case, 2853 // that the itable index has been passed as a register, not a constant value. 2854 int method_offset = itableMethodEntry::method_offset_in_bytes(); 2855 // Fixed part (displacement), common operand. 2856 Register itable_offset = method_result; // Dynamic part (index register). 2857 2858 if (itable_index.is_register()) { 2859 // Compute the method's offset in that register, for the formula, see the 2860 // else-clause below. 2861 z_sllg(itable_offset, itable_index.as_register(), exact_log2(itableMethodEntry::size() * wordSize)); 2862 z_agf(itable_offset, vtable_offset_offset, itable_entry_addr); 2863 } else { 2864 // Displacement increases. 2865 method_offset += itableMethodEntry::size() * wordSize * itable_index.as_constant(); 2866 2867 // Load index from itable. 2868 z_llgf(itable_offset, vtable_offset_offset, itable_entry_addr); 2869 } 2870 2871 // Finally load the method's oop. 2872 z_lg(method_result, method_offset, itable_offset, recv_klass); 2873 } 2874 BLOCK_COMMENT("} lookup_interface_method"); 2875 } 2876 2877 // Lookup for virtual method invocation. 2878 void MacroAssembler::lookup_virtual_method(Register recv_klass, 2879 RegisterOrConstant vtable_index, 2880 Register method_result) { 2881 assert_different_registers(recv_klass, vtable_index.register_or_noreg()); 2882 assert(vtableEntry::size() * wordSize == wordSize, 2883 "else adjust the scaling in the code below"); 2884 2885 BLOCK_COMMENT("lookup_virtual_method {"); 2886 2887 const int base = in_bytes(Klass::vtable_start_offset()); 2888 2889 if (vtable_index.is_constant()) { 2890 // Load with base + disp. 2891 Address vtable_entry_addr(recv_klass, 2892 vtable_index.as_constant() * wordSize + 2893 base + 2894 vtableEntry::method_offset_in_bytes()); 2895 2896 z_lg(method_result, vtable_entry_addr); 2897 } else { 2898 // Shift index properly and load with base + index + disp. 2899 Register vindex = vtable_index.as_register(); 2900 Address vtable_entry_addr(recv_klass, vindex, 2901 base + vtableEntry::method_offset_in_bytes()); 2902 2903 z_sllg(vindex, vindex, exact_log2(wordSize)); 2904 z_lg(method_result, vtable_entry_addr); 2905 } 2906 BLOCK_COMMENT("} lookup_virtual_method"); 2907 } 2908 2909 // Factor out code to call ic_miss_handler. 2910 // Generate code to call the inline cache miss handler. 2911 // 2912 // In most cases, this code will be generated out-of-line. 2913 // The method parameters are intended to provide some variability. 2914 // ICM - Label which has to be bound to the start of useful code (past any traps). 2915 // trapMarker - Marking byte for the generated illtrap instructions (if any). 2916 // Any value except 0x00 is supported. 2917 // = 0x00 - do not generate illtrap instructions. 2918 // use nops to fill ununsed space. 2919 // requiredSize - required size of the generated code. If the actually 2920 // generated code is smaller, use padding instructions to fill up. 2921 // = 0 - no size requirement, no padding. 2922 // scratch - scratch register to hold branch target address. 2923 // 2924 // The method returns the code offset of the bound label. 2925 unsigned int MacroAssembler::call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch) { 2926 intptr_t startOffset = offset(); 2927 2928 // Prevent entry at content_begin(). 2929 if (trapMarker != 0) { 2930 z_illtrap(trapMarker); 2931 } 2932 2933 // Load address of inline cache miss code into scratch register 2934 // and branch to cache miss handler. 2935 BLOCK_COMMENT("IC miss handler {"); 2936 BIND(ICM); 2937 unsigned int labelOffset = offset(); 2938 AddressLiteral icmiss(SharedRuntime::get_ic_miss_stub()); 2939 2940 load_const_optimized(scratch, icmiss); 2941 z_br(scratch); 2942 2943 // Fill unused space. 2944 if (requiredSize > 0) { 2945 while ((offset() - startOffset) < requiredSize) { 2946 if (trapMarker == 0) { 2947 z_nop(); 2948 } else { 2949 z_illtrap(trapMarker); 2950 } 2951 } 2952 } 2953 BLOCK_COMMENT("} IC miss handler"); 2954 return labelOffset; 2955 } 2956 2957 void MacroAssembler::nmethod_UEP(Label& ic_miss) { 2958 Register ic_reg = as_Register(Matcher::inline_cache_reg_encode()); 2959 int klass_offset = oopDesc::klass_offset_in_bytes(); 2960 if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) { 2961 if (VM_Version::has_CompareBranch()) { 2962 z_cgij(Z_ARG1, 0, Assembler::bcondEqual, ic_miss); 2963 } else { 2964 z_ltgr(Z_ARG1, Z_ARG1); 2965 z_bre(ic_miss); 2966 } 2967 } 2968 // Compare cached class against klass from receiver. 2969 compare_klass_ptr(ic_reg, klass_offset, Z_ARG1, false); 2970 z_brne(ic_miss); 2971 } 2972 2973 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 2974 Register super_klass, 2975 Register temp1_reg, 2976 Label* L_success, 2977 Label* L_failure, 2978 Label* L_slow_path, 2979 RegisterOrConstant super_check_offset) { 2980 2981 const int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2982 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2983 2984 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 2985 bool need_slow_path = (must_load_sco || 2986 super_check_offset.constant_or_zero() == sc_offset); 2987 2988 // Input registers must not overlap. 2989 assert_different_registers(sub_klass, super_klass, temp1_reg); 2990 if (super_check_offset.is_register()) { 2991 assert_different_registers(sub_klass, super_klass, 2992 super_check_offset.as_register()); 2993 } else if (must_load_sco) { 2994 assert(temp1_reg != noreg, "supply either a temp or a register offset"); 2995 } 2996 2997 const Register Rsuper_check_offset = temp1_reg; 2998 2999 NearLabel L_fallthrough; 3000 int label_nulls = 0; 3001 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 3002 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 3003 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 3004 assert(label_nulls <= 1 || 3005 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), 3006 "at most one NULL in the batch, usually"); 3007 3008 BLOCK_COMMENT("check_klass_subtype_fast_path {"); 3009 // If the pointers are equal, we are done (e.g., String[] elements). 3010 // This self-check enables sharing of secondary supertype arrays among 3011 // non-primary types such as array-of-interface. Otherwise, each such 3012 // type would need its own customized SSA. 3013 // We move this check to the front of the fast path because many 3014 // type checks are in fact trivially successful in this manner, 3015 // so we get a nicely predicted branch right at the start of the check. 3016 compare64_and_branch(sub_klass, super_klass, bcondEqual, *L_success); 3017 3018 // Check the supertype display, which is uint. 3019 if (must_load_sco) { 3020 z_llgf(Rsuper_check_offset, sco_offset, super_klass); 3021 super_check_offset = RegisterOrConstant(Rsuper_check_offset); 3022 } 3023 Address super_check_addr(sub_klass, super_check_offset, 0); 3024 z_cg(super_klass, super_check_addr); // compare w/ displayed supertype 3025 3026 // This check has worked decisively for primary supers. 3027 // Secondary supers are sought in the super_cache ('super_cache_addr'). 3028 // (Secondary supers are interfaces and very deeply nested subtypes.) 3029 // This works in the same check above because of a tricky aliasing 3030 // between the super_cache and the primary super display elements. 3031 // (The 'super_check_addr' can address either, as the case requires.) 3032 // Note that the cache is updated below if it does not help us find 3033 // what we need immediately. 3034 // So if it was a primary super, we can just fail immediately. 3035 // Otherwise, it's the slow path for us (no success at this point). 3036 3037 // Hacked jmp, which may only be used just before L_fallthrough. 3038 #define final_jmp(label) \ 3039 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 3040 else { branch_optimized(Assembler::bcondAlways, label); } /*omit semicolon*/ 3041 3042 if (super_check_offset.is_register()) { 3043 branch_optimized(Assembler::bcondEqual, *L_success); 3044 z_cfi(super_check_offset.as_register(), sc_offset); 3045 if (L_failure == &L_fallthrough) { 3046 branch_optimized(Assembler::bcondEqual, *L_slow_path); 3047 } else { 3048 branch_optimized(Assembler::bcondNotEqual, *L_failure); 3049 final_jmp(*L_slow_path); 3050 } 3051 } else if (super_check_offset.as_constant() == sc_offset) { 3052 // Need a slow path; fast failure is impossible. 3053 if (L_slow_path == &L_fallthrough) { 3054 branch_optimized(Assembler::bcondEqual, *L_success); 3055 } else { 3056 branch_optimized(Assembler::bcondNotEqual, *L_slow_path); 3057 final_jmp(*L_success); 3058 } 3059 } else { 3060 // No slow path; it's a fast decision. 3061 if (L_failure == &L_fallthrough) { 3062 branch_optimized(Assembler::bcondEqual, *L_success); 3063 } else { 3064 branch_optimized(Assembler::bcondNotEqual, *L_failure); 3065 final_jmp(*L_success); 3066 } 3067 } 3068 3069 bind(L_fallthrough); 3070 #undef local_brc 3071 #undef final_jmp 3072 BLOCK_COMMENT("} check_klass_subtype_fast_path"); 3073 // fallthru (to slow path) 3074 } 3075 3076 void MacroAssembler::check_klass_subtype_slow_path(Register Rsubklass, 3077 Register Rsuperklass, 3078 Register Rarray_ptr, // tmp 3079 Register Rlength, // tmp 3080 Label* L_success, 3081 Label* L_failure) { 3082 // Input registers must not overlap. 3083 // Also check for R1 which is explicitely used here. 3084 assert_different_registers(Z_R1, Rsubklass, Rsuperklass, Rarray_ptr, Rlength); 3085 NearLabel L_fallthrough, L_loop; 3086 int label_nulls = 0; 3087 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 3088 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 3089 assert(label_nulls <= 1, "at most one NULL in the batch"); 3090 3091 const int ss_offset = in_bytes(Klass::secondary_supers_offset()); 3092 const int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 3093 3094 const int length_offset = Array<Klass*>::length_offset_in_bytes(); 3095 const int base_offset = Array<Klass*>::base_offset_in_bytes(); 3096 3097 // Hacked jmp, which may only be used just before L_fallthrough. 3098 #define final_jmp(label) \ 3099 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 3100 else branch_optimized(Assembler::bcondAlways, label) /*omit semicolon*/ 3101 3102 NearLabel loop_iterate, loop_count, match; 3103 3104 BLOCK_COMMENT("check_klass_subtype_slow_path {"); 3105 z_lg(Rarray_ptr, ss_offset, Rsubklass); 3106 3107 load_and_test_int(Rlength, Address(Rarray_ptr, length_offset)); 3108 branch_optimized(Assembler::bcondZero, *L_failure); 3109 3110 // Oops in table are NO MORE compressed. 3111 z_cg(Rsuperklass, base_offset, Rarray_ptr); // Check array element for match. 3112 z_bre(match); // Shortcut for array length = 1. 3113 3114 // No match yet, so we must walk the array's elements. 3115 z_lngfr(Rlength, Rlength); 3116 z_sllg(Rlength, Rlength, LogBytesPerWord); // -#bytes of cache array 3117 z_llill(Z_R1, BytesPerWord); // Set increment/end index. 3118 add2reg(Rlength, 2 * BytesPerWord); // start index = -(n-2)*BytesPerWord 3119 z_slgr(Rarray_ptr, Rlength); // start addr: += (n-2)*BytesPerWord 3120 z_bru(loop_count); 3121 3122 BIND(loop_iterate); 3123 z_cg(Rsuperklass, base_offset, Rlength, Rarray_ptr); // Check array element for match. 3124 z_bre(match); 3125 BIND(loop_count); 3126 z_brxlg(Rlength, Z_R1, loop_iterate); 3127 3128 // Rsuperklass not found among secondary super classes -> failure. 3129 branch_optimized(Assembler::bcondAlways, *L_failure); 3130 3131 // Got a hit. Return success (zero result). Set cache. 3132 // Cache load doesn't happen here. For speed it is directly emitted by the compiler. 3133 3134 BIND(match); 3135 3136 z_stg(Rsuperklass, sc_offset, Rsubklass); // Save result to cache. 3137 3138 final_jmp(*L_success); 3139 3140 // Exit to the surrounding code. 3141 BIND(L_fallthrough); 3142 #undef local_brc 3143 #undef final_jmp 3144 BLOCK_COMMENT("} check_klass_subtype_slow_path"); 3145 } 3146 3147 // Emitter for combining fast and slow path. 3148 void MacroAssembler::check_klass_subtype(Register sub_klass, 3149 Register super_klass, 3150 Register temp1_reg, 3151 Register temp2_reg, 3152 Label& L_success) { 3153 NearLabel failure; 3154 BLOCK_COMMENT(err_msg("check_klass_subtype(%s subclass of %s) {", sub_klass->name(), super_klass->name())); 3155 check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg, 3156 &L_success, &failure, NULL); 3157 check_klass_subtype_slow_path(sub_klass, super_klass, 3158 temp1_reg, temp2_reg, &L_success, NULL); 3159 BIND(failure); 3160 BLOCK_COMMENT("} check_klass_subtype"); 3161 } 3162 3163 // Increment a counter at counter_address when the eq condition code is 3164 // set. Kills registers tmp1_reg and tmp2_reg and preserves the condition code. 3165 void MacroAssembler::increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg) { 3166 Label l; 3167 z_brne(l); 3168 load_const(tmp1_reg, counter_address); 3169 add2mem_32(Address(tmp1_reg), 1, tmp2_reg); 3170 z_cr(tmp1_reg, tmp1_reg); // Set cc to eq. 3171 bind(l); 3172 } 3173 3174 // Semantics are dependent on the slow_case label: 3175 // If the slow_case label is not NULL, failure to biased-lock the object 3176 // transfers control to the location of the slow_case label. If the 3177 // object could be biased-locked, control is transferred to the done label. 3178 // The condition code is unpredictable. 3179 // 3180 // If the slow_case label is NULL, failure to biased-lock the object results 3181 // in a transfer of control to the done label with a condition code of not_equal. 3182 // If the biased-lock could be successfully obtained, control is transfered to 3183 // the done label with a condition code of equal. 3184 // It is mandatory to react on the condition code At the done label. 3185 // 3186 void MacroAssembler::biased_locking_enter(Register obj_reg, 3187 Register mark_reg, 3188 Register temp_reg, 3189 Register temp2_reg, // May be Z_RO! 3190 Label &done, 3191 Label *slow_case) { 3192 assert(UseBiasedLocking, "why call this otherwise?"); 3193 assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg); 3194 3195 Label cas_label; // Try, if implemented, CAS locking. Fall thru to slow path otherwise. 3196 3197 BLOCK_COMMENT("biased_locking_enter {"); 3198 3199 // Biased locking 3200 // See whether the lock is currently biased toward our thread and 3201 // whether the epoch is still valid. 3202 // Note that the runtime guarantees sufficient alignment of JavaThread 3203 // pointers to allow age to be placed into low bits. 3204 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, 3205 "biased locking makes assumptions about bit layout"); 3206 z_lr(temp_reg, mark_reg); 3207 z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place); 3208 z_chi(temp_reg, markOopDesc::biased_lock_pattern); 3209 z_brne(cas_label); // Try cas if object is not biased, i.e. cannot be biased locked. 3210 3211 load_prototype_header(temp_reg, obj_reg); 3212 load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place)); 3213 3214 z_ogr(temp_reg, Z_thread); 3215 z_xgr(temp_reg, mark_reg); 3216 z_ngr(temp_reg, temp2_reg); 3217 if (PrintBiasedLockingStatistics) { 3218 increment_counter_eq((address) BiasedLocking::biased_lock_entry_count_addr(), mark_reg, temp2_reg); 3219 // Restore mark_reg. 3220 z_lg(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg); 3221 } 3222 branch_optimized(Assembler::bcondEqual, done); // Biased lock obtained, return success. 3223 3224 Label try_revoke_bias; 3225 Label try_rebias; 3226 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); 3227 3228 //---------------------------------------------------------------------------- 3229 // At this point we know that the header has the bias pattern and 3230 // that we are not the bias owner in the current epoch. We need to 3231 // figure out more details about the state of the header in order to 3232 // know what operations can be legally performed on the object's 3233 // header. 3234 3235 // If the low three bits in the xor result aren't clear, that means 3236 // the prototype header is no longer biased and we have to revoke 3237 // the bias on this object. 3238 z_tmll(temp_reg, markOopDesc::biased_lock_mask_in_place); 3239 z_brnaz(try_revoke_bias); 3240 3241 // Biasing is still enabled for this data type. See whether the 3242 // epoch of the current bias is still valid, meaning that the epoch 3243 // bits of the mark word are equal to the epoch bits of the 3244 // prototype header. (Note that the prototype header's epoch bits 3245 // only change at a safepoint.) If not, attempt to rebias the object 3246 // toward the current thread. Note that we must be absolutely sure 3247 // that the current epoch is invalid in order to do this because 3248 // otherwise the manipulations it performs on the mark word are 3249 // illegal. 3250 z_tmll(temp_reg, markOopDesc::epoch_mask_in_place); 3251 z_brnaz(try_rebias); 3252 3253 //---------------------------------------------------------------------------- 3254 // The epoch of the current bias is still valid but we know nothing 3255 // about the owner; it might be set or it might be clear. Try to 3256 // acquire the bias of the object using an atomic operation. If this 3257 // fails we will go in to the runtime to revoke the object's bias. 3258 // Note that we first construct the presumed unbiased header so we 3259 // don't accidentally blow away another thread's valid bias. 3260 z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | 3261 markOopDesc::epoch_mask_in_place); 3262 z_lgr(temp_reg, Z_thread); 3263 z_llgfr(mark_reg, mark_reg); 3264 z_ogr(temp_reg, mark_reg); 3265 3266 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 3267 3268 z_csg(mark_reg, temp_reg, 0, obj_reg); 3269 3270 // If the biasing toward our thread failed, this means that 3271 // another thread succeeded in biasing it toward itself and we 3272 // need to revoke that bias. The revocation will occur in the 3273 // interpreter runtime in the slow case. 3274 3275 if (PrintBiasedLockingStatistics) { 3276 increment_counter_eq((address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), 3277 temp_reg, temp2_reg); 3278 } 3279 if (slow_case != NULL) { 3280 branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way. 3281 } 3282 branch_optimized(Assembler::bcondAlways, done); // Biased lock status given in condition code. 3283 3284 //---------------------------------------------------------------------------- 3285 bind(try_rebias); 3286 // At this point we know the epoch has expired, meaning that the 3287 // current "bias owner", if any, is actually invalid. Under these 3288 // circumstances _only_, we are allowed to use the current header's 3289 // value as the comparison value when doing the cas to acquire the 3290 // bias in the current epoch. In other words, we allow transfer of 3291 // the bias from one thread to another directly in this situation. 3292 3293 z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); 3294 load_prototype_header(temp_reg, obj_reg); 3295 z_llgfr(mark_reg, mark_reg); 3296 3297 z_ogr(temp_reg, Z_thread); 3298 3299 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 3300 3301 z_csg(mark_reg, temp_reg, 0, obj_reg); 3302 3303 // If the biasing toward our thread failed, this means that 3304 // another thread succeeded in biasing it toward itself and we 3305 // need to revoke that bias. The revocation will occur in the 3306 // interpreter runtime in the slow case. 3307 3308 if (PrintBiasedLockingStatistics) { 3309 increment_counter_eq((address) BiasedLocking::rebiased_lock_entry_count_addr(), temp_reg, temp2_reg); 3310 } 3311 if (slow_case != NULL) { 3312 branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way. 3313 } 3314 z_bru(done); // Biased lock status given in condition code. 3315 3316 //---------------------------------------------------------------------------- 3317 bind(try_revoke_bias); 3318 // The prototype mark in the klass doesn't have the bias bit set any 3319 // more, indicating that objects of this data type are not supposed 3320 // to be biased any more. We are going to try to reset the mark of 3321 // this object to the prototype value and fall through to the 3322 // CAS-based locking scheme. Note that if our CAS fails, it means 3323 // that another thread raced us for the privilege of revoking the 3324 // bias of this particular object, so it's okay to continue in the 3325 // normal locking code. 3326 load_prototype_header(temp_reg, obj_reg); 3327 3328 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 3329 3330 z_csg(mark_reg, temp_reg, 0, obj_reg); 3331 3332 // Fall through to the normal CAS-based lock, because no matter what 3333 // the result of the above CAS, some thread must have succeeded in 3334 // removing the bias bit from the object's header. 3335 if (PrintBiasedLockingStatistics) { 3336 // z_cgr(mark_reg, temp2_reg); 3337 increment_counter_eq((address) BiasedLocking::revoked_lock_entry_count_addr(), temp_reg, temp2_reg); 3338 } 3339 3340 bind(cas_label); 3341 BLOCK_COMMENT("} biased_locking_enter"); 3342 } 3343 3344 void MacroAssembler::biased_locking_exit(Register mark_addr, Register temp_reg, Label& done) { 3345 // Check for biased locking unlock case, which is a no-op 3346 // Note: we do not have to check the thread ID for two reasons. 3347 // First, the interpreter checks for IllegalMonitorStateException at 3348 // a higher level. Second, if the bias was revoked while we held the 3349 // lock, the object could not be rebiased toward another thread, so 3350 // the bias bit would be clear. 3351 BLOCK_COMMENT("biased_locking_exit {"); 3352 3353 z_lg(temp_reg, 0, mark_addr); 3354 z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place); 3355 3356 z_chi(temp_reg, markOopDesc::biased_lock_pattern); 3357 z_bre(done); 3358 BLOCK_COMMENT("} biased_locking_exit"); 3359 } 3360 3361 void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) { 3362 Register displacedHeader = temp1; 3363 Register currentHeader = temp1; 3364 Register temp = temp2; 3365 NearLabel done, object_has_monitor; 3366 3367 BLOCK_COMMENT("compiler_fast_lock_object {"); 3368 3369 // Load markOop from oop into mark. 3370 z_lg(displacedHeader, 0, oop); 3371 3372 if (try_bias) { 3373 biased_locking_enter(oop, displacedHeader, temp, Z_R0, done); 3374 } 3375 3376 // Handle existing monitor. 3377 if ((EmitSync & 0x01) == 0) { 3378 // The object has an existing monitor iff (mark & monitor_value) != 0. 3379 guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word"); 3380 z_lr(temp, displacedHeader); 3381 z_nill(temp, markOopDesc::monitor_value); 3382 z_brne(object_has_monitor); 3383 } 3384 3385 // Set mark to markOop | markOopDesc::unlocked_value. 3386 z_oill(displacedHeader, markOopDesc::unlocked_value); 3387 3388 // Load Compare Value application register. 3389 3390 // Initialize the box (must happen before we update the object mark). 3391 z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box); 3392 3393 // Memory Fence (in cmpxchgd) 3394 // Compare object markOop with mark and if equal exchange scratch1 with object markOop. 3395 3396 // If the compare-and-swap succeeded, then we found an unlocked object and we 3397 // have now locked it. 3398 z_csg(displacedHeader, box, 0, oop); 3399 assert(currentHeader==displacedHeader, "must be same register"); // Identified two registers from z/Architecture. 3400 z_bre(done); 3401 3402 // We did not see an unlocked object so try the fast recursive case. 3403 3404 z_sgr(currentHeader, Z_SP); 3405 load_const_optimized(temp, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place)); 3406 3407 z_ngr(currentHeader, temp); 3408 // z_brne(done); 3409 // z_release(); 3410 z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box); 3411 3412 z_bru(done); 3413 3414 if ((EmitSync & 0x01) == 0) { 3415 Register zero = temp; 3416 Register monitor_tagged = displacedHeader; // Tagged with markOopDesc::monitor_value. 3417 bind(object_has_monitor); 3418 // The object's monitor m is unlocked iff m->owner == NULL, 3419 // otherwise m->owner may contain a thread or a stack address. 3420 // 3421 // Try to CAS m->owner from NULL to current thread. 3422 z_lghi(zero, 0); 3423 // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ. 3424 z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged); 3425 // Store a non-null value into the box. 3426 z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box); 3427 #ifdef ASSERT 3428 z_brne(done); 3429 // We've acquired the monitor, check some invariants. 3430 // Invariant 1: _recursions should be 0. 3431 asm_assert_mem8_is_zero(OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), monitor_tagged, 3432 "monitor->_recursions should be 0", -1); 3433 z_ltgr(zero, zero); // Set CR=EQ. 3434 #endif 3435 } 3436 bind(done); 3437 3438 BLOCK_COMMENT("} compiler_fast_lock_object"); 3439 // If locking was successful, CR should indicate 'EQ'. 3440 // The compiler or the native wrapper generates a branch to the runtime call 3441 // _complete_monitor_locking_Java. 3442 } 3443 3444 void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) { 3445 Register displacedHeader = temp1; 3446 Register currentHeader = temp2; 3447 Register temp = temp1; 3448 Register monitor = temp2; 3449 3450 Label done, object_has_monitor; 3451 3452 BLOCK_COMMENT("compiler_fast_unlock_object {"); 3453 3454 if (try_bias) { 3455 biased_locking_exit(oop, currentHeader, done); 3456 } 3457 3458 // Find the lock address and load the displaced header from the stack. 3459 // if the displaced header is zero, we have a recursive unlock. 3460 load_and_test_long(displacedHeader, Address(box, BasicLock::displaced_header_offset_in_bytes())); 3461 z_bre(done); 3462 3463 // Handle existing monitor. 3464 if ((EmitSync & 0x02) == 0) { 3465 // The object has an existing monitor iff (mark & monitor_value) != 0. 3466 z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop); 3467 guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word"); 3468 z_nill(currentHeader, markOopDesc::monitor_value); 3469 z_brne(object_has_monitor); 3470 } 3471 3472 // Check if it is still a light weight lock, this is true if we see 3473 // the stack address of the basicLock in the markOop of the object 3474 // copy box to currentHeader such that csg does not kill it. 3475 z_lgr(currentHeader, box); 3476 z_csg(currentHeader, displacedHeader, 0, oop); 3477 z_bru(done); // Csg sets CR as desired. 3478 3479 // Handle existing monitor. 3480 if ((EmitSync & 0x02) == 0) { 3481 bind(object_has_monitor); 3482 z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop); // CurrentHeader is tagged with monitor_value set. 3483 load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); 3484 z_brne(done); 3485 load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 3486 z_brne(done); 3487 load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); 3488 z_brne(done); 3489 load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); 3490 z_brne(done); 3491 z_release(); 3492 z_stg(temp/*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), currentHeader); 3493 } 3494 3495 bind(done); 3496 3497 BLOCK_COMMENT("} compiler_fast_unlock_object"); 3498 // flag == EQ indicates success 3499 // flag == NE indicates failure 3500 } 3501 3502 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) { 3503 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 3504 bs->resolve_jobject(this, value, tmp1, tmp2); 3505 } 3506 3507 // Last_Java_sp must comply to the rules in frame_s390.hpp. 3508 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation) { 3509 BLOCK_COMMENT("set_last_Java_frame {"); 3510 3511 // Always set last_Java_pc and flags first because once last_Java_sp 3512 // is visible has_last_Java_frame is true and users will look at the 3513 // rest of the fields. (Note: flags should always be zero before we 3514 // get here so doesn't need to be set.) 3515 3516 // Verify that last_Java_pc was zeroed on return to Java. 3517 if (allow_relocation) { 3518 asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()), 3519 Z_thread, 3520 "last_Java_pc not zeroed before leaving Java", 3521 0x200); 3522 } else { 3523 asm_assert_mem8_is_zero_static(in_bytes(JavaThread::last_Java_pc_offset()), 3524 Z_thread, 3525 "last_Java_pc not zeroed before leaving Java", 3526 0x200); 3527 } 3528 3529 // When returning from calling out from Java mode the frame anchor's 3530 // last_Java_pc will always be set to NULL. It is set here so that 3531 // if we are doing a call to native (not VM) that we capture the 3532 // known pc and don't have to rely on the native call having a 3533 // standard frame linkage where we can find the pc. 3534 if (last_Java_pc!=noreg) { 3535 z_stg(last_Java_pc, Address(Z_thread, JavaThread::last_Java_pc_offset())); 3536 } 3537 3538 // This membar release is not required on z/Architecture, since the sequence of stores 3539 // in maintained. Nevertheless, we leave it in to document the required ordering. 3540 // The implementation of z_release() should be empty. 3541 // z_release(); 3542 3543 z_stg(last_Java_sp, Address(Z_thread, JavaThread::last_Java_sp_offset())); 3544 BLOCK_COMMENT("} set_last_Java_frame"); 3545 } 3546 3547 void MacroAssembler::reset_last_Java_frame(bool allow_relocation) { 3548 BLOCK_COMMENT("reset_last_Java_frame {"); 3549 3550 if (allow_relocation) { 3551 asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()), 3552 Z_thread, 3553 "SP was not set, still zero", 3554 0x202); 3555 } else { 3556 asm_assert_mem8_isnot_zero_static(in_bytes(JavaThread::last_Java_sp_offset()), 3557 Z_thread, 3558 "SP was not set, still zero", 3559 0x202); 3560 } 3561 3562 // _last_Java_sp = 0 3563 // Clearing storage must be atomic here, so don't use clear_mem()! 3564 store_const(Address(Z_thread, JavaThread::last_Java_sp_offset()), 0); 3565 3566 // _last_Java_pc = 0 3567 store_const(Address(Z_thread, JavaThread::last_Java_pc_offset()), 0); 3568 3569 BLOCK_COMMENT("} reset_last_Java_frame"); 3570 return; 3571 } 3572 3573 void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation) { 3574 assert_different_registers(sp, tmp1); 3575 3576 // We cannot trust that code generated by the C++ compiler saves R14 3577 // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at 3578 // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()). 3579 // Therefore we load the PC into tmp1 and let set_last_Java_frame() save 3580 // it into the frame anchor. 3581 get_PC(tmp1); 3582 set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1, allow_relocation); 3583 } 3584 3585 void MacroAssembler::set_thread_state(JavaThreadState new_state) { 3586 z_release(); 3587 3588 assert(Immediate::is_uimm16(_thread_max_state), "enum value out of range for instruction"); 3589 assert(sizeof(JavaThreadState) == sizeof(int), "enum value must have base type int"); 3590 store_const(Address(Z_thread, JavaThread::thread_state_offset()), new_state, Z_R0, false); 3591 } 3592 3593 void MacroAssembler::get_vm_result(Register oop_result) { 3594 verify_thread(); 3595 3596 z_lg(oop_result, Address(Z_thread, JavaThread::vm_result_offset())); 3597 clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(void*)); 3598 3599 verify_oop(oop_result); 3600 } 3601 3602 void MacroAssembler::get_vm_result_2(Register result) { 3603 verify_thread(); 3604 3605 z_lg(result, Address(Z_thread, JavaThread::vm_result_2_offset())); 3606 clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(void*)); 3607 } 3608 3609 // We require that C code which does not return a value in vm_result will 3610 // leave it undisturbed. 3611 void MacroAssembler::set_vm_result(Register oop_result) { 3612 z_stg(oop_result, Address(Z_thread, JavaThread::vm_result_offset())); 3613 } 3614 3615 // Explicit null checks (used for method handle code). 3616 void MacroAssembler::null_check(Register reg, Register tmp, int64_t offset) { 3617 if (!ImplicitNullChecks) { 3618 NearLabel ok; 3619 3620 compare64_and_branch(reg, (intptr_t) 0, Assembler::bcondNotEqual, ok); 3621 3622 // We just put the address into reg if it was 0 (tmp==Z_R0 is allowed so we can't use it for the address). 3623 address exception_entry = Interpreter::throw_NullPointerException_entry(); 3624 load_absolute_address(reg, exception_entry); 3625 z_br(reg); 3626 3627 bind(ok); 3628 } else { 3629 if (needs_explicit_null_check((intptr_t)offset)) { 3630 // Provoke OS NULL exception if reg = NULL by 3631 // accessing M[reg] w/o changing any registers. 3632 z_lg(tmp, 0, reg); 3633 } 3634 // else 3635 // Nothing to do, (later) access of M[reg + offset] 3636 // will provoke OS NULL exception if reg = NULL. 3637 } 3638 } 3639 3640 //------------------------------------- 3641 // Compressed Klass Pointers 3642 //------------------------------------- 3643 3644 // Klass oop manipulations if compressed. 3645 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 3646 Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible. 3647 address base = Universe::narrow_klass_base(); 3648 int shift = Universe::narrow_klass_shift(); 3649 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 3650 3651 BLOCK_COMMENT("cKlass encoder {"); 3652 3653 #ifdef ASSERT 3654 Label ok; 3655 z_tmll(current, KlassAlignmentInBytes-1); // Check alignment. 3656 z_brc(Assembler::bcondAllZero, ok); 3657 // The plain disassembler does not recognize illtrap. It instead displays 3658 // a 32-bit value. Issueing two illtraps assures the disassembler finds 3659 // the proper beginning of the next instruction. 3660 z_illtrap(0xee); 3661 z_illtrap(0xee); 3662 bind(ok); 3663 #endif 3664 3665 if (base != NULL) { 3666 unsigned int base_h = ((unsigned long)base)>>32; 3667 unsigned int base_l = (unsigned int)((unsigned long)base); 3668 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 3669 lgr_if_needed(dst, current); 3670 z_aih(dst, -((int)base_h)); // Base has no set bits in lower half. 3671 } else if ((base_h == 0) && (base_l != 0)) { 3672 lgr_if_needed(dst, current); 3673 z_agfi(dst, -(int)base_l); 3674 } else { 3675 load_const(Z_R0, base); 3676 lgr_if_needed(dst, current); 3677 z_sgr(dst, Z_R0); 3678 } 3679 current = dst; 3680 } 3681 if (shift != 0) { 3682 assert (LogKlassAlignmentInBytes == shift, "decode alg wrong"); 3683 z_srlg(dst, current, shift); 3684 current = dst; 3685 } 3686 lgr_if_needed(dst, current); // Move may be required (if neither base nor shift != 0). 3687 3688 BLOCK_COMMENT("} cKlass encoder"); 3689 } 3690 3691 // This function calculates the size of the code generated by 3692 // decode_klass_not_null(register dst, Register src) 3693 // when (Universe::heap() != NULL). Hence, if the instructions 3694 // it generates change, then this method needs to be updated. 3695 int MacroAssembler::instr_size_for_decode_klass_not_null() { 3696 address base = Universe::narrow_klass_base(); 3697 int shift_size = Universe::narrow_klass_shift() == 0 ? 0 : 6; /* sllg */ 3698 int addbase_size = 0; 3699 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 3700 3701 if (base != NULL) { 3702 unsigned int base_h = ((unsigned long)base)>>32; 3703 unsigned int base_l = (unsigned int)((unsigned long)base); 3704 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 3705 addbase_size += 6; /* aih */ 3706 } else if ((base_h == 0) && (base_l != 0)) { 3707 addbase_size += 6; /* algfi */ 3708 } else { 3709 addbase_size += load_const_size(); 3710 addbase_size += 4; /* algr */ 3711 } 3712 } 3713 #ifdef ASSERT 3714 addbase_size += 10; 3715 addbase_size += 2; // Extra sigill. 3716 #endif 3717 return addbase_size + shift_size; 3718 } 3719 3720 // !!! If the instructions that get generated here change 3721 // then function instr_size_for_decode_klass_not_null() 3722 // needs to get updated. 3723 // This variant of decode_klass_not_null() must generate predictable code! 3724 // The code must only depend on globally known parameters. 3725 void MacroAssembler::decode_klass_not_null(Register dst) { 3726 address base = Universe::narrow_klass_base(); 3727 int shift = Universe::narrow_klass_shift(); 3728 int beg_off = offset(); 3729 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 3730 3731 BLOCK_COMMENT("cKlass decoder (const size) {"); 3732 3733 if (shift != 0) { // Shift required? 3734 z_sllg(dst, dst, shift); 3735 } 3736 if (base != NULL) { 3737 unsigned int base_h = ((unsigned long)base)>>32; 3738 unsigned int base_l = (unsigned int)((unsigned long)base); 3739 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 3740 z_aih(dst, base_h); // Base has no set bits in lower half. 3741 } else if ((base_h == 0) && (base_l != 0)) { 3742 z_algfi(dst, base_l); // Base has no set bits in upper half. 3743 } else { 3744 load_const(Z_R0, base); // Base has set bits everywhere. 3745 z_algr(dst, Z_R0); 3746 } 3747 } 3748 3749 #ifdef ASSERT 3750 Label ok; 3751 z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment. 3752 z_brc(Assembler::bcondAllZero, ok); 3753 // The plain disassembler does not recognize illtrap. It instead displays 3754 // a 32-bit value. Issueing two illtraps assures the disassembler finds 3755 // the proper beginning of the next instruction. 3756 z_illtrap(0xd1); 3757 z_illtrap(0xd1); 3758 bind(ok); 3759 #endif 3760 assert(offset() == beg_off + instr_size_for_decode_klass_not_null(), "Code gen mismatch."); 3761 3762 BLOCK_COMMENT("} cKlass decoder (const size)"); 3763 } 3764 3765 // This variant of decode_klass_not_null() is for cases where 3766 // 1) the size of the generated instructions may vary 3767 // 2) the result is (potentially) stored in a register different from the source. 3768 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 3769 address base = Universe::narrow_klass_base(); 3770 int shift = Universe::narrow_klass_shift(); 3771 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 3772 3773 BLOCK_COMMENT("cKlass decoder {"); 3774 3775 if (src == noreg) src = dst; 3776 3777 if (shift != 0) { // Shift or at least move required? 3778 z_sllg(dst, src, shift); 3779 } else { 3780 lgr_if_needed(dst, src); 3781 } 3782 3783 if (base != NULL) { 3784 unsigned int base_h = ((unsigned long)base)>>32; 3785 unsigned int base_l = (unsigned int)((unsigned long)base); 3786 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 3787 z_aih(dst, base_h); // Base has not set bits in lower half. 3788 } else if ((base_h == 0) && (base_l != 0)) { 3789 z_algfi(dst, base_l); // Base has no set bits in upper half. 3790 } else { 3791 load_const_optimized(Z_R0, base); // Base has set bits everywhere. 3792 z_algr(dst, Z_R0); 3793 } 3794 } 3795 3796 #ifdef ASSERT 3797 Label ok; 3798 z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment. 3799 z_brc(Assembler::bcondAllZero, ok); 3800 // The plain disassembler does not recognize illtrap. It instead displays 3801 // a 32-bit value. Issueing two illtraps assures the disassembler finds 3802 // the proper beginning of the next instruction. 3803 z_illtrap(0xd2); 3804 z_illtrap(0xd2); 3805 bind(ok); 3806 #endif 3807 BLOCK_COMMENT("} cKlass decoder"); 3808 } 3809 3810 void MacroAssembler::load_klass(Register klass, Address mem) { 3811 if (UseCompressedClassPointers) { 3812 z_llgf(klass, mem); 3813 // Attention: no null check here! 3814 decode_klass_not_null(klass); 3815 } else { 3816 z_lg(klass, mem); 3817 } 3818 } 3819 3820 void MacroAssembler::load_klass(Register klass, Register src_oop) { 3821 if (UseCompressedClassPointers) { 3822 z_llgf(klass, oopDesc::klass_offset_in_bytes(), src_oop); 3823 // Attention: no null check here! 3824 decode_klass_not_null(klass); 3825 } else { 3826 z_lg(klass, oopDesc::klass_offset_in_bytes(), src_oop); 3827 } 3828 } 3829 3830 void MacroAssembler::load_prototype_header(Register Rheader, Register Rsrc_oop) { 3831 assert_different_registers(Rheader, Rsrc_oop); 3832 load_klass(Rheader, Rsrc_oop); 3833 z_lg(Rheader, Address(Rheader, Klass::prototype_header_offset())); 3834 } 3835 3836 void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) { 3837 if (UseCompressedClassPointers) { 3838 assert_different_registers(dst_oop, klass, Z_R0); 3839 if (ck == noreg) ck = klass; 3840 encode_klass_not_null(ck, klass); 3841 z_st(ck, Address(dst_oop, oopDesc::klass_offset_in_bytes())); 3842 } else { 3843 z_stg(klass, Address(dst_oop, oopDesc::klass_offset_in_bytes())); 3844 } 3845 } 3846 3847 void MacroAssembler::store_klass_gap(Register s, Register d) { 3848 if (UseCompressedClassPointers) { 3849 assert(s != d, "not enough registers"); 3850 // Support s = noreg. 3851 if (s != noreg) { 3852 z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes())); 3853 } else { 3854 z_mvhi(Address(d, oopDesc::klass_gap_offset_in_bytes()), 0); 3855 } 3856 } 3857 } 3858 3859 // Compare klass ptr in memory against klass ptr in register. 3860 // 3861 // Rop1 - klass in register, always uncompressed. 3862 // disp - Offset of klass in memory, compressed/uncompressed, depending on runtime flag. 3863 // Rbase - Base address of cKlass in memory. 3864 // maybeNULL - True if Rop1 possibly is a NULL. 3865 void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL) { 3866 3867 BLOCK_COMMENT("compare klass ptr {"); 3868 3869 if (UseCompressedClassPointers) { 3870 const int shift = Universe::narrow_klass_shift(); 3871 address base = Universe::narrow_klass_base(); 3872 3873 assert((shift == 0) || (shift == LogKlassAlignmentInBytes), "cKlass encoder detected bad shift"); 3874 assert_different_registers(Rop1, Z_R0); 3875 assert_different_registers(Rop1, Rbase, Z_R1); 3876 3877 // First encode register oop and then compare with cOop in memory. 3878 // This sequence saves an unnecessary cOop load and decode. 3879 if (base == NULL) { 3880 if (shift == 0) { 3881 z_cl(Rop1, disp, Rbase); // Unscaled 3882 } else { 3883 z_srlg(Z_R0, Rop1, shift); // ZeroBased 3884 z_cl(Z_R0, disp, Rbase); 3885 } 3886 } else { // HeapBased 3887 #ifdef ASSERT 3888 bool used_R0 = true; 3889 bool used_R1 = true; 3890 #endif 3891 Register current = Rop1; 3892 Label done; 3893 3894 if (maybeNULL) { // NULL ptr must be preserved! 3895 z_ltgr(Z_R0, current); 3896 z_bre(done); 3897 current = Z_R0; 3898 } 3899 3900 unsigned int base_h = ((unsigned long)base)>>32; 3901 unsigned int base_l = (unsigned int)((unsigned long)base); 3902 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 3903 lgr_if_needed(Z_R0, current); 3904 z_aih(Z_R0, -((int)base_h)); // Base has no set bits in lower half. 3905 } else if ((base_h == 0) && (base_l != 0)) { 3906 lgr_if_needed(Z_R0, current); 3907 z_agfi(Z_R0, -(int)base_l); 3908 } else { 3909 int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base)); 3910 add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); // Subtract base by adding complement. 3911 } 3912 3913 if (shift != 0) { 3914 z_srlg(Z_R0, Z_R0, shift); 3915 } 3916 bind(done); 3917 z_cl(Z_R0, disp, Rbase); 3918 #ifdef ASSERT 3919 if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2); 3920 if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2); 3921 #endif 3922 } 3923 } else { 3924 z_clg(Rop1, disp, Z_R0, Rbase); 3925 } 3926 BLOCK_COMMENT("} compare klass ptr"); 3927 } 3928 3929 //--------------------------- 3930 // Compressed oops 3931 //--------------------------- 3932 3933 void MacroAssembler::encode_heap_oop(Register oop) { 3934 oop_encoder(oop, oop, true /*maybe null*/); 3935 } 3936 3937 void MacroAssembler::encode_heap_oop_not_null(Register oop) { 3938 oop_encoder(oop, oop, false /*not null*/); 3939 } 3940 3941 // Called with something derived from the oop base. e.g. oop_base>>3. 3942 int MacroAssembler::get_oop_base_pow2_offset(uint64_t oop_base) { 3943 unsigned int oop_base_ll = ((unsigned int)(oop_base >> 0)) & 0xffff; 3944 unsigned int oop_base_lh = ((unsigned int)(oop_base >> 16)) & 0xffff; 3945 unsigned int oop_base_hl = ((unsigned int)(oop_base >> 32)) & 0xffff; 3946 unsigned int oop_base_hh = ((unsigned int)(oop_base >> 48)) & 0xffff; 3947 unsigned int n_notzero_parts = (oop_base_ll == 0 ? 0:1) 3948 + (oop_base_lh == 0 ? 0:1) 3949 + (oop_base_hl == 0 ? 0:1) 3950 + (oop_base_hh == 0 ? 0:1); 3951 3952 assert(oop_base != 0, "This is for HeapBased cOops only"); 3953 3954 if (n_notzero_parts != 1) { // Check if oop_base is just a few pages shy of a power of 2. 3955 uint64_t pow2_offset = 0x10000 - oop_base_ll; 3956 if (pow2_offset < 0x8000) { // This might not be necessary. 3957 uint64_t oop_base2 = oop_base + pow2_offset; 3958 3959 oop_base_ll = ((unsigned int)(oop_base2 >> 0)) & 0xffff; 3960 oop_base_lh = ((unsigned int)(oop_base2 >> 16)) & 0xffff; 3961 oop_base_hl = ((unsigned int)(oop_base2 >> 32)) & 0xffff; 3962 oop_base_hh = ((unsigned int)(oop_base2 >> 48)) & 0xffff; 3963 n_notzero_parts = (oop_base_ll == 0 ? 0:1) + 3964 (oop_base_lh == 0 ? 0:1) + 3965 (oop_base_hl == 0 ? 0:1) + 3966 (oop_base_hh == 0 ? 0:1); 3967 if (n_notzero_parts == 1) { 3968 assert(-(int64_t)pow2_offset != (int64_t)-1, "We use -1 to signal uninitialized base register"); 3969 return -pow2_offset; 3970 } 3971 } 3972 } 3973 return 0; 3974 } 3975 3976 // If base address is offset from a straight power of two by just a few pages, 3977 // return this offset to the caller for a possible later composite add. 3978 // TODO/FIX: will only work correctly for 4k pages. 3979 int MacroAssembler::get_oop_base(Register Rbase, uint64_t oop_base) { 3980 int pow2_offset = get_oop_base_pow2_offset(oop_base); 3981 3982 load_const_optimized(Rbase, oop_base - pow2_offset); // Best job possible. 3983 3984 return pow2_offset; 3985 } 3986 3987 int MacroAssembler::get_oop_base_complement(Register Rbase, uint64_t oop_base) { 3988 int offset = get_oop_base(Rbase, oop_base); 3989 z_lcgr(Rbase, Rbase); 3990 return -offset; 3991 } 3992 3993 // Compare compressed oop in memory against oop in register. 3994 // Rop1 - Oop in register. 3995 // disp - Offset of cOop in memory. 3996 // Rbase - Base address of cOop in memory. 3997 // maybeNULL - True if Rop1 possibly is a NULL. 3998 // maybeNULLtarget - Branch target for Rop1 == NULL, if flow control shall NOT continue with compare instruction. 3999 void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL) { 4000 Register Rbase = mem.baseOrR0(); 4001 Register Rindex = mem.indexOrR0(); 4002 int64_t disp = mem.disp(); 4003 4004 const int shift = Universe::narrow_oop_shift(); 4005 address base = Universe::narrow_oop_base(); 4006 4007 assert(UseCompressedOops, "must be on to call this method"); 4008 assert(Universe::heap() != NULL, "java heap must be initialized to call this method"); 4009 assert((shift == 0) || (shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift"); 4010 assert_different_registers(Rop1, Z_R0); 4011 assert_different_registers(Rop1, Rbase, Z_R1); 4012 assert_different_registers(Rop1, Rindex, Z_R1); 4013 4014 BLOCK_COMMENT("compare heap oop {"); 4015 4016 // First encode register oop and then compare with cOop in memory. 4017 // This sequence saves an unnecessary cOop load and decode. 4018 if (base == NULL) { 4019 if (shift == 0) { 4020 z_cl(Rop1, disp, Rindex, Rbase); // Unscaled 4021 } else { 4022 z_srlg(Z_R0, Rop1, shift); // ZeroBased 4023 z_cl(Z_R0, disp, Rindex, Rbase); 4024 } 4025 } else { // HeapBased 4026 #ifdef ASSERT 4027 bool used_R0 = true; 4028 bool used_R1 = true; 4029 #endif 4030 Label done; 4031 int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base)); 4032 4033 if (maybeNULL) { // NULL ptr must be preserved! 4034 z_ltgr(Z_R0, Rop1); 4035 z_bre(done); 4036 } 4037 4038 add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); 4039 z_srlg(Z_R0, Z_R0, shift); 4040 4041 bind(done); 4042 z_cl(Z_R0, disp, Rindex, Rbase); 4043 #ifdef ASSERT 4044 if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2); 4045 if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2); 4046 #endif 4047 } 4048 BLOCK_COMMENT("} compare heap oop"); 4049 } 4050 4051 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, 4052 const Address& addr, Register val, 4053 Register tmp1, Register tmp2, Register tmp3) { 4054 assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL | 4055 ON_UNKNOWN_OOP_REF)) == 0, "unsupported decorator"); 4056 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 4057 decorators = AccessInternal::decorator_fixup(decorators); 4058 bool as_raw = (decorators & AS_RAW) != 0; 4059 if (as_raw) { 4060 bs->BarrierSetAssembler::store_at(this, decorators, type, 4061 addr, val, 4062 tmp1, tmp2, tmp3); 4063 } else { 4064 bs->store_at(this, decorators, type, 4065 addr, val, 4066 tmp1, tmp2, tmp3); 4067 } 4068 } 4069 4070 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, 4071 const Address& addr, Register dst, 4072 Register tmp1, Register tmp2, Label *is_null) { 4073 assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL | 4074 ON_PHANTOM_OOP_REF | ON_WEAK_OOP_REF)) == 0, "unsupported decorator"); 4075 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 4076 decorators = AccessInternal::decorator_fixup(decorators); 4077 bool as_raw = (decorators & AS_RAW) != 0; 4078 if (as_raw) { 4079 bs->BarrierSetAssembler::load_at(this, decorators, type, 4080 addr, dst, 4081 tmp1, tmp2, is_null); 4082 } else { 4083 bs->load_at(this, decorators, type, 4084 addr, dst, 4085 tmp1, tmp2, is_null); 4086 } 4087 } 4088 4089 void MacroAssembler::load_heap_oop(Register dest, const Address &a, 4090 Register tmp1, Register tmp2, 4091 DecoratorSet decorators, Label *is_null) { 4092 access_load_at(T_OBJECT, IN_HEAP | decorators, a, dest, tmp1, tmp2, is_null); 4093 } 4094 4095 void MacroAssembler::store_heap_oop(Register Roop, const Address &a, 4096 Register tmp1, Register tmp2, Register tmp3, 4097 DecoratorSet decorators) { 4098 access_store_at(T_OBJECT, IN_HEAP | decorators, a, Roop, tmp1, tmp2, tmp3); 4099 } 4100 4101 //------------------------------------------------- 4102 // Encode compressed oop. Generally usable encoder. 4103 //------------------------------------------------- 4104 // Rsrc - contains regular oop on entry. It remains unchanged. 4105 // Rdst - contains compressed oop on exit. 4106 // Rdst and Rsrc may indicate same register, in which case Rsrc does not remain unchanged. 4107 // 4108 // Rdst must not indicate scratch register Z_R1 (Z_R1_scratch) for functionality. 4109 // Rdst should not indicate scratch register Z_R0 (Z_R0_scratch) for performance. 4110 // 4111 // only32bitValid is set, if later code only uses the lower 32 bits. In this 4112 // case we must not fix the upper 32 bits. 4113 void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL, 4114 Register Rbase, int pow2_offset, bool only32bitValid) { 4115 4116 const address oop_base = Universe::narrow_oop_base(); 4117 const int oop_shift = Universe::narrow_oop_shift(); 4118 const bool disjoint = Universe::narrow_oop_base_disjoint(); 4119 4120 assert(UseCompressedOops, "must be on to call this method"); 4121 assert(Universe::heap() != NULL, "java heap must be initialized to call this encoder"); 4122 assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift"); 4123 4124 if (disjoint || (oop_base == NULL)) { 4125 BLOCK_COMMENT("cOop encoder zeroBase {"); 4126 if (oop_shift == 0) { 4127 if (oop_base != NULL && !only32bitValid) { 4128 z_llgfr(Rdst, Rsrc); // Clear upper bits in case the register will be decoded again. 4129 } else { 4130 lgr_if_needed(Rdst, Rsrc); 4131 } 4132 } else { 4133 z_srlg(Rdst, Rsrc, oop_shift); 4134 if (oop_base != NULL && !only32bitValid) { 4135 z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again. 4136 } 4137 } 4138 BLOCK_COMMENT("} cOop encoder zeroBase"); 4139 return; 4140 } 4141 4142 bool used_R0 = false; 4143 bool used_R1 = false; 4144 4145 BLOCK_COMMENT("cOop encoder general {"); 4146 assert_different_registers(Rdst, Z_R1); 4147 assert_different_registers(Rsrc, Rbase); 4148 if (maybeNULL) { 4149 Label done; 4150 // We reorder shifting and subtracting, so that we can compare 4151 // and shift in parallel: 4152 // 4153 // cycle 0: potential LoadN, base = <const> 4154 // cycle 1: base = !base dst = src >> 3, cmp cr = (src != 0) 4155 // cycle 2: if (cr) br, dst = dst + base + offset 4156 4157 // Get oop_base components. 4158 if (pow2_offset == -1) { 4159 if (Rdst == Rbase) { 4160 if (Rdst == Z_R1 || Rsrc == Z_R1) { 4161 Rbase = Z_R0; 4162 used_R0 = true; 4163 } else { 4164 Rdst = Z_R1; 4165 used_R1 = true; 4166 } 4167 } 4168 if (Rbase == Z_R1) { 4169 used_R1 = true; 4170 } 4171 pow2_offset = get_oop_base_complement(Rbase, ((uint64_t)(intptr_t)oop_base) >> oop_shift); 4172 } 4173 assert_different_registers(Rdst, Rbase); 4174 4175 // Check for NULL oop (must be left alone) and shift. 4176 if (oop_shift != 0) { // Shift out alignment bits 4177 if (((intptr_t)oop_base&0xc000000000000000L) == 0L) { // We are sure: no single address will have the leftmost bit set. 4178 z_srag(Rdst, Rsrc, oop_shift); // Arithmetic shift sets the condition code. 4179 } else { 4180 z_srlg(Rdst, Rsrc, oop_shift); 4181 z_ltgr(Rsrc, Rsrc); // This is the recommended way of testing for zero. 4182 // This probably is faster, as it does not write a register. No! 4183 // z_cghi(Rsrc, 0); 4184 } 4185 } else { 4186 z_ltgr(Rdst, Rsrc); // Move NULL to result register. 4187 } 4188 z_bre(done); 4189 4190 // Subtract oop_base components. 4191 if ((Rdst == Z_R0) || (Rbase == Z_R0)) { 4192 z_algr(Rdst, Rbase); 4193 if (pow2_offset != 0) { add2reg(Rdst, pow2_offset); } 4194 } else { 4195 add2reg_with_index(Rdst, pow2_offset, Rbase, Rdst); 4196 } 4197 if (!only32bitValid) { 4198 z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again. 4199 } 4200 bind(done); 4201 4202 } else { // not null 4203 // Get oop_base components. 4204 if (pow2_offset == -1) { 4205 pow2_offset = get_oop_base_complement(Rbase, (uint64_t)(intptr_t)oop_base); 4206 } 4207 4208 // Subtract oop_base components and shift. 4209 if (Rdst == Z_R0 || Rsrc == Z_R0 || Rbase == Z_R0) { 4210 // Don't use lay instruction. 4211 if (Rdst == Rsrc) { 4212 z_algr(Rdst, Rbase); 4213 } else { 4214 lgr_if_needed(Rdst, Rbase); 4215 z_algr(Rdst, Rsrc); 4216 } 4217 if (pow2_offset != 0) add2reg(Rdst, pow2_offset); 4218 } else { 4219 add2reg_with_index(Rdst, pow2_offset, Rbase, Rsrc); 4220 } 4221 if (oop_shift != 0) { // Shift out alignment bits. 4222 z_srlg(Rdst, Rdst, oop_shift); 4223 } 4224 if (!only32bitValid) { 4225 z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again. 4226 } 4227 } 4228 #ifdef ASSERT 4229 if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb01bUL, 2); } 4230 if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb02bUL, 2); } 4231 #endif 4232 BLOCK_COMMENT("} cOop encoder general"); 4233 } 4234 4235 //------------------------------------------------- 4236 // decode compressed oop. Generally usable decoder. 4237 //------------------------------------------------- 4238 // Rsrc - contains compressed oop on entry. 4239 // Rdst - contains regular oop on exit. 4240 // Rdst and Rsrc may indicate same register. 4241 // Rdst must not be the same register as Rbase, if Rbase was preloaded (before call). 4242 // Rdst can be the same register as Rbase. Then, either Z_R0 or Z_R1 must be available as scratch. 4243 // Rbase - register to use for the base 4244 // pow2_offset - offset of base to nice value. If -1, base must be loaded. 4245 // For performance, it is good to 4246 // - avoid Z_R0 for any of the argument registers. 4247 // - keep Rdst and Rsrc distinct from Rbase. Rdst == Rsrc is ok for performance. 4248 // - avoid Z_R1 for Rdst if Rdst == Rbase. 4249 void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, Register Rbase, int pow2_offset) { 4250 4251 const address oop_base = Universe::narrow_oop_base(); 4252 const int oop_shift = Universe::narrow_oop_shift(); 4253 const bool disjoint = Universe::narrow_oop_base_disjoint(); 4254 4255 assert(UseCompressedOops, "must be on to call this method"); 4256 assert(Universe::heap() != NULL, "java heap must be initialized to call this decoder"); 4257 assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), 4258 "cOop encoder detected bad shift"); 4259 4260 // cOops are always loaded zero-extended from memory. No explicit zero-extension necessary. 4261 4262 if (oop_base != NULL) { 4263 unsigned int oop_base_hl = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xffff; 4264 unsigned int oop_base_hh = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 48)) & 0xffff; 4265 unsigned int oop_base_hf = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xFFFFffff; 4266 if (disjoint && (oop_base_hl == 0 || oop_base_hh == 0)) { 4267 BLOCK_COMMENT("cOop decoder disjointBase {"); 4268 // We do not need to load the base. Instead, we can install the upper bits 4269 // with an OR instead of an ADD. 4270 Label done; 4271 4272 // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set. 4273 if (maybeNULL) { // NULL ptr must be preserved! 4274 z_slag(Rdst, Rsrc, oop_shift); // Arithmetic shift sets the condition code. 4275 z_bre(done); 4276 } else { 4277 z_sllg(Rdst, Rsrc, oop_shift); // Logical shift leaves condition code alone. 4278 } 4279 if ((oop_base_hl != 0) && (oop_base_hh != 0)) { 4280 z_oihf(Rdst, oop_base_hf); 4281 } else if (oop_base_hl != 0) { 4282 z_oihl(Rdst, oop_base_hl); 4283 } else { 4284 assert(oop_base_hh != 0, "not heapbased mode"); 4285 z_oihh(Rdst, oop_base_hh); 4286 } 4287 bind(done); 4288 BLOCK_COMMENT("} cOop decoder disjointBase"); 4289 } else { 4290 BLOCK_COMMENT("cOop decoder general {"); 4291 // There are three decode steps: 4292 // scale oop offset (shift left) 4293 // get base (in reg) and pow2_offset (constant) 4294 // add base, pow2_offset, and oop offset 4295 // The following register overlap situations may exist: 4296 // Rdst == Rsrc, Rbase any other 4297 // not a problem. Scaling in-place leaves Rbase undisturbed. 4298 // Loading Rbase does not impact the scaled offset. 4299 // Rdst == Rbase, Rsrc any other 4300 // scaling would destroy a possibly preloaded Rbase. Loading Rbase 4301 // would destroy the scaled offset. 4302 // Remedy: use Rdst_tmp if Rbase has been preloaded. 4303 // use Rbase_tmp if base has to be loaded. 4304 // Rsrc == Rbase, Rdst any other 4305 // Only possible without preloaded Rbase. 4306 // Loading Rbase does not destroy compressed oop because it was scaled into Rdst before. 4307 // Rsrc == Rbase, Rdst == Rbase 4308 // Only possible without preloaded Rbase. 4309 // Loading Rbase would destroy compressed oop. Scaling in-place is ok. 4310 // Remedy: use Rbase_tmp. 4311 // 4312 Label done; 4313 Register Rdst_tmp = Rdst; 4314 Register Rbase_tmp = Rbase; 4315 bool used_R0 = false; 4316 bool used_R1 = false; 4317 bool base_preloaded = pow2_offset >= 0; 4318 guarantee(!(base_preloaded && (Rsrc == Rbase)), "Register clash, check caller"); 4319 assert(oop_shift != 0, "room for optimization"); 4320 4321 // Check if we need to use scratch registers. 4322 if (Rdst == Rbase) { 4323 assert(!(((Rdst == Z_R0) && (Rsrc == Z_R1)) || ((Rdst == Z_R1) && (Rsrc == Z_R0))), "need a scratch reg"); 4324 if (Rdst != Rsrc) { 4325 if (base_preloaded) { Rdst_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; } 4326 else { Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; } 4327 } else { 4328 Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; 4329 } 4330 } 4331 if (base_preloaded) lgr_if_needed(Rbase_tmp, Rbase); 4332 4333 // Scale oop and check for NULL. 4334 // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set. 4335 if (maybeNULL) { // NULL ptr must be preserved! 4336 z_slag(Rdst_tmp, Rsrc, oop_shift); // Arithmetic shift sets the condition code. 4337 z_bre(done); 4338 } else { 4339 z_sllg(Rdst_tmp, Rsrc, oop_shift); // Logical shift leaves condition code alone. 4340 } 4341 4342 // Get oop_base components. 4343 if (!base_preloaded) { 4344 pow2_offset = get_oop_base(Rbase_tmp, (uint64_t)(intptr_t)oop_base); 4345 } 4346 4347 // Add up all components. 4348 if ((Rbase_tmp == Z_R0) || (Rdst_tmp == Z_R0)) { 4349 z_algr(Rdst_tmp, Rbase_tmp); 4350 if (pow2_offset != 0) { add2reg(Rdst_tmp, pow2_offset); } 4351 } else { 4352 add2reg_with_index(Rdst_tmp, pow2_offset, Rbase_tmp, Rdst_tmp); 4353 } 4354 4355 bind(done); 4356 lgr_if_needed(Rdst, Rdst_tmp); 4357 #ifdef ASSERT 4358 if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb03bUL, 2); } 4359 if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb04bUL, 2); } 4360 #endif 4361 BLOCK_COMMENT("} cOop decoder general"); 4362 } 4363 } else { 4364 BLOCK_COMMENT("cOop decoder zeroBase {"); 4365 if (oop_shift == 0) { 4366 lgr_if_needed(Rdst, Rsrc); 4367 } else { 4368 z_sllg(Rdst, Rsrc, oop_shift); 4369 } 4370 BLOCK_COMMENT("} cOop decoder zeroBase"); 4371 } 4372 } 4373 4374 // ((OopHandle)result).resolve(); 4375 void MacroAssembler::resolve_oop_handle(Register result) { 4376 // OopHandle::resolve is an indirection. 4377 z_lg(result, 0, result); 4378 } 4379 4380 void MacroAssembler::load_mirror(Register mirror, Register method) { 4381 mem2reg_opt(mirror, Address(method, Method::const_offset())); 4382 mem2reg_opt(mirror, Address(mirror, ConstMethod::constants_offset())); 4383 mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes())); 4384 mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset())); 4385 resolve_oop_handle(mirror); 4386 } 4387 4388 //--------------------------------------------------------------- 4389 //--- Operations on arrays. 4390 //--------------------------------------------------------------- 4391 4392 // Compiler ensures base is doubleword aligned and cnt is #doublewords. 4393 // Emitter does not KILL cnt and base arguments, since they need to be copied to 4394 // work registers anyway. 4395 // Actually, only r0, r1, and r5 are killed. 4396 unsigned int MacroAssembler::Clear_Array(Register cnt_arg, Register base_pointer_arg, Register src_addr, Register src_len) { 4397 // Src_addr is evenReg. 4398 // Src_len is odd_Reg. 4399 4400 int block_start = offset(); 4401 Register tmp_reg = src_len; // Holds target instr addr for EX. 4402 Register dst_len = Z_R1; // Holds dst len for MVCLE. 4403 Register dst_addr = Z_R0; // Holds dst addr for MVCLE. 4404 4405 Label doXC, doMVCLE, done; 4406 4407 BLOCK_COMMENT("Clear_Array {"); 4408 4409 // Check for zero len and convert to long. 4410 z_ltgfr(src_len, cnt_arg); // Remember casted value for doSTG case. 4411 z_bre(done); // Nothing to do if len == 0. 4412 4413 // Prefetch data to be cleared. 4414 if (VM_Version::has_Prefetch()) { 4415 z_pfd(0x02, 0, Z_R0, base_pointer_arg); 4416 z_pfd(0x02, 256, Z_R0, base_pointer_arg); 4417 } 4418 4419 z_sllg(dst_len, src_len, 3); // #bytes to clear. 4420 z_cghi(src_len, 32); // Check for len <= 256 bytes (<=32 DW). 4421 z_brnh(doXC); // If so, use executed XC to clear. 4422 4423 // MVCLE: initialize long arrays (general case). 4424 bind(doMVCLE); 4425 z_lgr(dst_addr, base_pointer_arg); 4426 clear_reg(src_len, true, false); // Src len of MVCLE is zero. 4427 4428 MacroAssembler::move_long_ext(dst_addr, src_addr, 0); 4429 z_bru(done); 4430 4431 // XC: initialize short arrays. 4432 Label XC_template; // Instr template, never exec directly! 4433 bind(XC_template); 4434 z_xc(0,0,base_pointer_arg,0,base_pointer_arg); 4435 4436 bind(doXC); 4437 add2reg(dst_len, -1); // Get #bytes-1 for EXECUTE. 4438 if (VM_Version::has_ExecuteExtensions()) { 4439 z_exrl(dst_len, XC_template); // Execute XC with var. len. 4440 } else { 4441 z_larl(tmp_reg, XC_template); 4442 z_ex(dst_len,0,Z_R0,tmp_reg); // Execute XC with var. len. 4443 } 4444 // z_bru(done); // fallthru 4445 4446 bind(done); 4447 4448 BLOCK_COMMENT("} Clear_Array"); 4449 4450 int block_end = offset(); 4451 return block_end - block_start; 4452 } 4453 4454 // Compiler ensures base is doubleword aligned and cnt is count of doublewords. 4455 // Emitter does not KILL any arguments nor work registers. 4456 // Emitter generates up to 16 XC instructions, depending on the array length. 4457 unsigned int MacroAssembler::Clear_Array_Const(long cnt, Register base) { 4458 int block_start = offset(); 4459 int off; 4460 int lineSize_Bytes = AllocatePrefetchStepSize; 4461 int lineSize_DW = AllocatePrefetchStepSize>>LogBytesPerWord; 4462 bool doPrefetch = VM_Version::has_Prefetch(); 4463 int XC_maxlen = 256; 4464 int numXCInstr = cnt > 0 ? (cnt*BytesPerWord-1)/XC_maxlen+1 : 0; 4465 4466 BLOCK_COMMENT("Clear_Array_Const {"); 4467 assert(cnt*BytesPerWord <= 4096, "ClearArrayConst can handle 4k only"); 4468 4469 // Do less prefetching for very short arrays. 4470 if (numXCInstr > 0) { 4471 // Prefetch only some cache lines, then begin clearing. 4472 if (doPrefetch) { 4473 if (cnt*BytesPerWord <= lineSize_Bytes/4) { // If less than 1/4 of a cache line to clear, 4474 z_pfd(0x02, 0, Z_R0, base); // prefetch just the first cache line. 4475 } else { 4476 assert(XC_maxlen == lineSize_Bytes, "ClearArrayConst needs 256B cache lines"); 4477 for (off = 0; (off < AllocatePrefetchLines) && (off <= numXCInstr); off ++) { 4478 z_pfd(0x02, off*lineSize_Bytes, Z_R0, base); 4479 } 4480 } 4481 } 4482 4483 for (off=0; off<(numXCInstr-1); off++) { 4484 z_xc(off*XC_maxlen, XC_maxlen-1, base, off*XC_maxlen, base); 4485 4486 // Prefetch some cache lines in advance. 4487 if (doPrefetch && (off <= numXCInstr-AllocatePrefetchLines)) { 4488 z_pfd(0x02, (off+AllocatePrefetchLines)*lineSize_Bytes, Z_R0, base); 4489 } 4490 } 4491 if (off*XC_maxlen < cnt*BytesPerWord) { 4492 z_xc(off*XC_maxlen, (cnt*BytesPerWord-off*XC_maxlen)-1, base, off*XC_maxlen, base); 4493 } 4494 } 4495 BLOCK_COMMENT("} Clear_Array_Const"); 4496 4497 int block_end = offset(); 4498 return block_end - block_start; 4499 } 4500 4501 // Compiler ensures base is doubleword aligned and cnt is #doublewords. 4502 // Emitter does not KILL cnt and base arguments, since they need to be copied to 4503 // work registers anyway. 4504 // Actually, only r0, r1, r4, and r5 (which are work registers) are killed. 4505 // 4506 // For very large arrays, exploit MVCLE H/W support. 4507 // MVCLE instruction automatically exploits H/W-optimized page mover. 4508 // - Bytes up to next page boundary are cleared with a series of XC to self. 4509 // - All full pages are cleared with the page mover H/W assist. 4510 // - Remaining bytes are again cleared by a series of XC to self. 4511 // 4512 unsigned int MacroAssembler::Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register src_addr, Register src_len) { 4513 // Src_addr is evenReg. 4514 // Src_len is odd_Reg. 4515 4516 int block_start = offset(); 4517 Register dst_len = Z_R1; // Holds dst len for MVCLE. 4518 Register dst_addr = Z_R0; // Holds dst addr for MVCLE. 4519 4520 BLOCK_COMMENT("Clear_Array_Const_Big {"); 4521 4522 // Get len to clear. 4523 load_const_optimized(dst_len, (long)cnt*8L); // in Bytes = #DW*8 4524 4525 // Prepare other args to MVCLE. 4526 z_lgr(dst_addr, base_pointer_arg); 4527 // Indicate unused result. 4528 (void) clear_reg(src_len, true, false); // Src len of MVCLE is zero. 4529 4530 // Clear. 4531 MacroAssembler::move_long_ext(dst_addr, src_addr, 0); 4532 BLOCK_COMMENT("} Clear_Array_Const_Big"); 4533 4534 int block_end = offset(); 4535 return block_end - block_start; 4536 } 4537 4538 // Allocator. 4539 unsigned int MacroAssembler::CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg, 4540 Register cnt_reg, 4541 Register tmp1_reg, Register tmp2_reg) { 4542 // Tmp1 is oddReg. 4543 // Tmp2 is evenReg. 4544 4545 int block_start = offset(); 4546 Label doMVC, doMVCLE, done, MVC_template; 4547 4548 BLOCK_COMMENT("CopyRawMemory_AlignedDisjoint {"); 4549 4550 // Check for zero len and convert to long. 4551 z_ltgfr(cnt_reg, cnt_reg); // Remember casted value for doSTG case. 4552 z_bre(done); // Nothing to do if len == 0. 4553 4554 z_sllg(Z_R1, cnt_reg, 3); // Dst len in bytes. calc early to have the result ready. 4555 4556 z_cghi(cnt_reg, 32); // Check for len <= 256 bytes (<=32 DW). 4557 z_brnh(doMVC); // If so, use executed MVC to clear. 4558 4559 bind(doMVCLE); // A lot of data (more than 256 bytes). 4560 // Prep dest reg pair. 4561 z_lgr(Z_R0, dst_reg); // dst addr 4562 // Dst len already in Z_R1. 4563 // Prep src reg pair. 4564 z_lgr(tmp2_reg, src_reg); // src addr 4565 z_lgr(tmp1_reg, Z_R1); // Src len same as dst len. 4566 4567 // Do the copy. 4568 move_long_ext(Z_R0, tmp2_reg, 0xb0); // Bypass cache. 4569 z_bru(done); // All done. 4570 4571 bind(MVC_template); // Just some data (not more than 256 bytes). 4572 z_mvc(0, 0, dst_reg, 0, src_reg); 4573 4574 bind(doMVC); 4575 4576 if (VM_Version::has_ExecuteExtensions()) { 4577 add2reg(Z_R1, -1); 4578 } else { 4579 add2reg(tmp1_reg, -1, Z_R1); 4580 z_larl(Z_R1, MVC_template); 4581 } 4582 4583 if (VM_Version::has_Prefetch()) { 4584 z_pfd(1, 0,Z_R0,src_reg); 4585 z_pfd(2, 0,Z_R0,dst_reg); 4586 // z_pfd(1,256,Z_R0,src_reg); // Assume very short copy. 4587 // z_pfd(2,256,Z_R0,dst_reg); 4588 } 4589 4590 if (VM_Version::has_ExecuteExtensions()) { 4591 z_exrl(Z_R1, MVC_template); 4592 } else { 4593 z_ex(tmp1_reg, 0, Z_R0, Z_R1); 4594 } 4595 4596 bind(done); 4597 4598 BLOCK_COMMENT("} CopyRawMemory_AlignedDisjoint"); 4599 4600 int block_end = offset(); 4601 return block_end - block_start; 4602 } 4603 4604 //------------------------------------------------------ 4605 // Special String Intrinsics. Implementation 4606 //------------------------------------------------------ 4607 4608 // Intrinsics for CompactStrings 4609 4610 // Compress char[] to byte[]. 4611 // Restores: src, dst 4612 // Uses: cnt 4613 // Kills: tmp, Z_R0, Z_R1. 4614 // Early clobber: result. 4615 // Note: 4616 // cnt is signed int. Do not rely on high word! 4617 // counts # characters, not bytes. 4618 // The result is the number of characters copied before the first incompatible character was found. 4619 // If precise is true, the processing stops exactly at this point. Otherwise, the result may be off 4620 // by a few bytes. The result always indicates the number of copied characters. 4621 // When used as a character index, the returned value points to the first incompatible character. 4622 // 4623 // Note: Does not behave exactly like package private StringUTF16 compress java implementation in case of failure: 4624 // - Different number of characters may have been written to dead array (if precise is false). 4625 // - Returns a number <cnt instead of 0. (Result gets compared with cnt.) 4626 unsigned int MacroAssembler::string_compress(Register result, Register src, Register dst, Register cnt, 4627 Register tmp, bool precise) { 4628 assert_different_registers(Z_R0, Z_R1, result, src, dst, cnt, tmp); 4629 4630 if (precise) { 4631 BLOCK_COMMENT("encode_iso_array {"); 4632 } else { 4633 BLOCK_COMMENT("string_compress {"); 4634 } 4635 int block_start = offset(); 4636 4637 Register Rsrc = src; 4638 Register Rdst = dst; 4639 Register Rix = tmp; 4640 Register Rcnt = cnt; 4641 Register Rmask = result; // holds incompatibility check mask until result value is stored. 4642 Label ScalarShortcut, AllDone; 4643 4644 z_iilf(Rmask, 0xFF00FF00); 4645 z_iihf(Rmask, 0xFF00FF00); 4646 4647 #if 0 // Sacrifice shortcuts for code compactness 4648 { 4649 //---< shortcuts for short strings (very frequent) >--- 4650 // Strings with 4 and 8 characters were fond to occur very frequently. 4651 // Therefore, we handle them right away with minimal overhead. 4652 Label skipShortcut, skip4Shortcut, skip8Shortcut; 4653 Register Rout = Z_R0; 4654 z_chi(Rcnt, 4); 4655 z_brne(skip4Shortcut); // 4 characters are very frequent 4656 z_lg(Z_R0, 0, Rsrc); // Treat exactly 4 characters specially. 4657 if (VM_Version::has_DistinctOpnds()) { 4658 Rout = Z_R0; 4659 z_ngrk(Rix, Z_R0, Rmask); 4660 } else { 4661 Rout = Rix; 4662 z_lgr(Rix, Z_R0); 4663 z_ngr(Z_R0, Rmask); 4664 } 4665 z_brnz(skipShortcut); 4666 z_stcmh(Rout, 5, 0, Rdst); 4667 z_stcm(Rout, 5, 2, Rdst); 4668 z_lgfr(result, Rcnt); 4669 z_bru(AllDone); 4670 bind(skip4Shortcut); 4671 4672 z_chi(Rcnt, 8); 4673 z_brne(skip8Shortcut); // There's more to do... 4674 z_lmg(Z_R0, Z_R1, 0, Rsrc); // Treat exactly 8 characters specially. 4675 if (VM_Version::has_DistinctOpnds()) { 4676 Rout = Z_R0; 4677 z_ogrk(Rix, Z_R0, Z_R1); 4678 z_ngr(Rix, Rmask); 4679 } else { 4680 Rout = Rix; 4681 z_lgr(Rix, Z_R0); 4682 z_ogr(Z_R0, Z_R1); 4683 z_ngr(Z_R0, Rmask); 4684 } 4685 z_brnz(skipShortcut); 4686 z_stcmh(Rout, 5, 0, Rdst); 4687 z_stcm(Rout, 5, 2, Rdst); 4688 z_stcmh(Z_R1, 5, 4, Rdst); 4689 z_stcm(Z_R1, 5, 6, Rdst); 4690 z_lgfr(result, Rcnt); 4691 z_bru(AllDone); 4692 4693 bind(skip8Shortcut); 4694 clear_reg(Z_R0, true, false); // #characters already processed (none). Precond for scalar loop. 4695 z_brl(ScalarShortcut); // Just a few characters 4696 4697 bind(skipShortcut); 4698 } 4699 #endif 4700 clear_reg(Z_R0); // make sure register is properly initialized. 4701 4702 if (VM_Version::has_VectorFacility()) { 4703 const int min_vcnt = 32; // Minimum #characters required to use vector instructions. 4704 // Otherwise just do nothing in vector mode. 4705 // Must be multiple of 2*(vector register length in chars (8 HW = 128 bits)). 4706 const int log_min_vcnt = exact_log2(min_vcnt); 4707 Label VectorLoop, VectorDone, VectorBreak; 4708 4709 VectorRegister Vtmp1 = Z_V16; 4710 VectorRegister Vtmp2 = Z_V17; 4711 VectorRegister Vmask = Z_V18; 4712 VectorRegister Vzero = Z_V19; 4713 VectorRegister Vsrc_first = Z_V20; 4714 VectorRegister Vsrc_last = Z_V23; 4715 4716 assert((Vsrc_last->encoding() - Vsrc_first->encoding() + 1) == min_vcnt/8, "logic error"); 4717 assert(VM_Version::has_DistinctOpnds(), "Assumption when has_VectorFacility()"); 4718 z_srak(Rix, Rcnt, log_min_vcnt); // # vector loop iterations 4719 z_brz(VectorDone); // not enough data for vector loop 4720 4721 z_vzero(Vzero); // all zeroes 4722 z_vgmh(Vmask, 0, 7); // generate 0xff00 mask for all 2-byte elements 4723 z_sllg(Z_R0, Rix, log_min_vcnt); // remember #chars that will be processed by vector loop 4724 4725 bind(VectorLoop); 4726 z_vlm(Vsrc_first, Vsrc_last, 0, Rsrc); 4727 add2reg(Rsrc, min_vcnt*2); 4728 4729 //---< check for incompatible character >--- 4730 z_vo(Vtmp1, Z_V20, Z_V21); 4731 z_vo(Vtmp2, Z_V22, Z_V23); 4732 z_vo(Vtmp1, Vtmp1, Vtmp2); 4733 z_vn(Vtmp1, Vtmp1, Vmask); 4734 z_vceqhs(Vtmp1, Vtmp1, Vzero); // high half of all chars must be zero for successful compress. 4735 z_bvnt(VectorBreak); // break vector loop if not all vector elements compare eq -> incompatible character found. 4736 // re-process data from current iteration in break handler. 4737 4738 //---< pack & store characters >--- 4739 z_vpkh(Vtmp1, Z_V20, Z_V21); // pack (src1, src2) -> tmp1 4740 z_vpkh(Vtmp2, Z_V22, Z_V23); // pack (src3, src4) -> tmp2 4741 z_vstm(Vtmp1, Vtmp2, 0, Rdst); // store packed string 4742 add2reg(Rdst, min_vcnt); 4743 4744 z_brct(Rix, VectorLoop); 4745 4746 z_bru(VectorDone); 4747 4748 bind(VectorBreak); 4749 add2reg(Rsrc, -min_vcnt*2); // Fix Rsrc. Rsrc was already updated, but Rdst and Rix are not. 4750 z_sll(Rix, log_min_vcnt); // # chars processed so far in VectorLoop, excl. current iteration. 4751 z_sr(Z_R0, Rix); // correct # chars processed in total. 4752 4753 bind(VectorDone); 4754 } 4755 4756 { 4757 const int min_cnt = 8; // Minimum #characters required to use unrolled loop. 4758 // Otherwise just do nothing in unrolled loop. 4759 // Must be multiple of 8. 4760 const int log_min_cnt = exact_log2(min_cnt); 4761 Label UnrolledLoop, UnrolledDone, UnrolledBreak; 4762 4763 if (VM_Version::has_DistinctOpnds()) { 4764 z_srk(Rix, Rcnt, Z_R0); // remaining # chars to compress in unrolled loop 4765 } else { 4766 z_lr(Rix, Rcnt); 4767 z_sr(Rix, Z_R0); 4768 } 4769 z_sra(Rix, log_min_cnt); // unrolled loop count 4770 z_brz(UnrolledDone); 4771 4772 bind(UnrolledLoop); 4773 z_lmg(Z_R0, Z_R1, 0, Rsrc); 4774 if (precise) { 4775 z_ogr(Z_R1, Z_R0); // check all 8 chars for incompatibility 4776 z_ngr(Z_R1, Rmask); 4777 z_brnz(UnrolledBreak); 4778 4779 z_lg(Z_R1, 8, Rsrc); // reload destroyed register 4780 z_stcmh(Z_R0, 5, 0, Rdst); 4781 z_stcm(Z_R0, 5, 2, Rdst); 4782 } else { 4783 z_stcmh(Z_R0, 5, 0, Rdst); 4784 z_stcm(Z_R0, 5, 2, Rdst); 4785 4786 z_ogr(Z_R0, Z_R1); 4787 z_ngr(Z_R0, Rmask); 4788 z_brnz(UnrolledBreak); 4789 } 4790 z_stcmh(Z_R1, 5, 4, Rdst); 4791 z_stcm(Z_R1, 5, 6, Rdst); 4792 4793 add2reg(Rsrc, min_cnt*2); 4794 add2reg(Rdst, min_cnt); 4795 z_brct(Rix, UnrolledLoop); 4796 4797 z_lgfr(Z_R0, Rcnt); // # chars processed in total after unrolled loop. 4798 z_nilf(Z_R0, ~(min_cnt-1)); 4799 z_tmll(Rcnt, min_cnt-1); 4800 z_brnaz(ScalarShortcut); // if all bits zero, there is nothing left to do for scalar loop. 4801 // Rix == 0 in all cases. 4802 z_sllg(Z_R1, Rcnt, 1); // # src bytes already processed. Only lower 32 bits are valid! 4803 // Z_R1 contents must be treated as unsigned operand! For huge strings, 4804 // (Rcnt >= 2**30), the value may spill into the sign bit by sllg. 4805 z_lgfr(result, Rcnt); // all characters processed. 4806 z_slgfr(Rdst, Rcnt); // restore ptr 4807 z_slgfr(Rsrc, Z_R1); // restore ptr, double the element count for Rsrc restore 4808 z_bru(AllDone); 4809 4810 bind(UnrolledBreak); 4811 z_lgfr(Z_R0, Rcnt); // # chars processed in total after unrolled loop 4812 z_nilf(Z_R0, ~(min_cnt-1)); 4813 z_sll(Rix, log_min_cnt); // # chars not yet processed in UnrolledLoop (due to break), broken iteration not included. 4814 z_sr(Z_R0, Rix); // fix # chars processed OK so far. 4815 if (!precise) { 4816 z_lgfr(result, Z_R0); 4817 z_sllg(Z_R1, Z_R0, 1); // # src bytes already processed. Only lower 32 bits are valid! 4818 // Z_R1 contents must be treated as unsigned operand! For huge strings, 4819 // (Rcnt >= 2**30), the value may spill into the sign bit by sllg. 4820 z_aghi(result, min_cnt/2); // min_cnt/2 characters have already been written 4821 // but ptrs were not updated yet. 4822 z_slgfr(Rdst, Z_R0); // restore ptr 4823 z_slgfr(Rsrc, Z_R1); // restore ptr, double the element count for Rsrc restore 4824 z_bru(AllDone); 4825 } 4826 bind(UnrolledDone); 4827 } 4828 4829 { 4830 Label ScalarLoop, ScalarDone, ScalarBreak; 4831 4832 bind(ScalarShortcut); 4833 z_ltgfr(result, Rcnt); 4834 z_brz(AllDone); 4835 4836 #if 0 // Sacrifice shortcuts for code compactness 4837 { 4838 //---< Special treatment for very short strings (one or two characters) >--- 4839 // For these strings, we are sure that the above code was skipped. 4840 // Thus, no registers were modified, register restore is not required. 4841 Label ScalarDoit, Scalar2Char; 4842 z_chi(Rcnt, 2); 4843 z_brh(ScalarDoit); 4844 z_llh(Z_R1, 0, Z_R0, Rsrc); 4845 z_bre(Scalar2Char); 4846 z_tmll(Z_R1, 0xff00); 4847 z_lghi(result, 0); // cnt == 1, first char invalid, no chars successfully processed 4848 z_brnaz(AllDone); 4849 z_stc(Z_R1, 0, Z_R0, Rdst); 4850 z_lghi(result, 1); 4851 z_bru(AllDone); 4852 4853 bind(Scalar2Char); 4854 z_llh(Z_R0, 2, Z_R0, Rsrc); 4855 z_tmll(Z_R1, 0xff00); 4856 z_lghi(result, 0); // cnt == 2, first char invalid, no chars successfully processed 4857 z_brnaz(AllDone); 4858 z_stc(Z_R1, 0, Z_R0, Rdst); 4859 z_tmll(Z_R0, 0xff00); 4860 z_lghi(result, 1); // cnt == 2, second char invalid, one char successfully processed 4861 z_brnaz(AllDone); 4862 z_stc(Z_R0, 1, Z_R0, Rdst); 4863 z_lghi(result, 2); 4864 z_bru(AllDone); 4865 4866 bind(ScalarDoit); 4867 } 4868 #endif 4869 4870 if (VM_Version::has_DistinctOpnds()) { 4871 z_srk(Rix, Rcnt, Z_R0); // remaining # chars to compress in unrolled loop 4872 } else { 4873 z_lr(Rix, Rcnt); 4874 z_sr(Rix, Z_R0); 4875 } 4876 z_lgfr(result, Rcnt); // # processed characters (if all runs ok). 4877 z_brz(ScalarDone); // uses CC from Rix calculation 4878 4879 bind(ScalarLoop); 4880 z_llh(Z_R1, 0, Z_R0, Rsrc); 4881 z_tmll(Z_R1, 0xff00); 4882 z_brnaz(ScalarBreak); 4883 z_stc(Z_R1, 0, Z_R0, Rdst); 4884 add2reg(Rsrc, 2); 4885 add2reg(Rdst, 1); 4886 z_brct(Rix, ScalarLoop); 4887 4888 z_bru(ScalarDone); 4889 4890 bind(ScalarBreak); 4891 z_sr(result, Rix); 4892 4893 bind(ScalarDone); 4894 z_sgfr(Rdst, result); // restore ptr 4895 z_sgfr(Rsrc, result); // restore ptr, double the element count for Rsrc restore 4896 z_sgfr(Rsrc, result); 4897 } 4898 bind(AllDone); 4899 4900 if (precise) { 4901 BLOCK_COMMENT("} encode_iso_array"); 4902 } else { 4903 BLOCK_COMMENT("} string_compress"); 4904 } 4905 return offset() - block_start; 4906 } 4907 4908 // Inflate byte[] to char[]. 4909 unsigned int MacroAssembler::string_inflate_trot(Register src, Register dst, Register cnt, Register tmp) { 4910 int block_start = offset(); 4911 4912 BLOCK_COMMENT("string_inflate {"); 4913 4914 Register stop_char = Z_R0; 4915 Register table = Z_R1; 4916 Register src_addr = tmp; 4917 4918 assert_different_registers(Z_R0, Z_R1, tmp, src, dst, cnt); 4919 assert(dst->encoding()%2 == 0, "must be even reg"); 4920 assert(cnt->encoding()%2 == 1, "must be odd reg"); 4921 assert(cnt->encoding() - dst->encoding() == 1, "must be even/odd pair"); 4922 4923 StubRoutines::zarch::generate_load_trot_table_addr(this, table); // kills Z_R0 (if ASSERT) 4924 clear_reg(stop_char); // Stop character. Not used here, but initialized to have a defined value. 4925 lgr_if_needed(src_addr, src); 4926 z_llgfr(cnt, cnt); // # src characters, must be a positive simm32. 4927 4928 translate_ot(dst, src_addr, /* mask = */ 0x0001); 4929 4930 BLOCK_COMMENT("} string_inflate"); 4931 4932 return offset() - block_start; 4933 } 4934 4935 // Inflate byte[] to char[]. 4936 // Restores: src, dst 4937 // Uses: cnt 4938 // Kills: tmp, Z_R0, Z_R1. 4939 // Note: 4940 // cnt is signed int. Do not rely on high word! 4941 // counts # characters, not bytes. 4942 unsigned int MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp) { 4943 assert_different_registers(Z_R0, Z_R1, src, dst, cnt, tmp); 4944 4945 BLOCK_COMMENT("string_inflate {"); 4946 int block_start = offset(); 4947 4948 Register Rcnt = cnt; // # characters (src: bytes, dst: char (2-byte)), remaining after current loop. 4949 Register Rix = tmp; // loop index 4950 Register Rsrc = src; // addr(src array) 4951 Register Rdst = dst; // addr(dst array) 4952 Label ScalarShortcut, AllDone; 4953 4954 #if 0 // Sacrifice shortcuts for code compactness 4955 { 4956 //---< shortcuts for short strings (very frequent) >--- 4957 Label skipShortcut, skip4Shortcut; 4958 z_ltr(Rcnt, Rcnt); // absolutely nothing to do for strings of len == 0. 4959 z_brz(AllDone); 4960 clear_reg(Z_R0); // make sure registers are properly initialized. 4961 clear_reg(Z_R1); 4962 z_chi(Rcnt, 4); 4963 z_brne(skip4Shortcut); // 4 characters are very frequent 4964 z_icm(Z_R0, 5, 0, Rsrc); // Treat exactly 4 characters specially. 4965 z_icm(Z_R1, 5, 2, Rsrc); 4966 z_stm(Z_R0, Z_R1, 0, Rdst); 4967 z_bru(AllDone); 4968 bind(skip4Shortcut); 4969 4970 z_chi(Rcnt, 8); 4971 z_brh(skipShortcut); // There's a lot to do... 4972 z_lgfr(Z_R0, Rcnt); // remaining #characters (<= 8). Precond for scalar loop. 4973 // This does not destroy the "register cleared" state of Z_R0. 4974 z_brl(ScalarShortcut); // Just a few characters 4975 z_icmh(Z_R0, 5, 0, Rsrc); // Treat exactly 8 characters specially. 4976 z_icmh(Z_R1, 5, 4, Rsrc); 4977 z_icm(Z_R0, 5, 2, Rsrc); 4978 z_icm(Z_R1, 5, 6, Rsrc); 4979 z_stmg(Z_R0, Z_R1, 0, Rdst); 4980 z_bru(AllDone); 4981 bind(skipShortcut); 4982 } 4983 #endif 4984 clear_reg(Z_R0); // make sure register is properly initialized. 4985 4986 if (VM_Version::has_VectorFacility()) { 4987 const int min_vcnt = 32; // Minimum #characters required to use vector instructions. 4988 // Otherwise just do nothing in vector mode. 4989 // Must be multiple of vector register length (16 bytes = 128 bits). 4990 const int log_min_vcnt = exact_log2(min_vcnt); 4991 Label VectorLoop, VectorDone; 4992 4993 assert(VM_Version::has_DistinctOpnds(), "Assumption when has_VectorFacility()"); 4994 z_srak(Rix, Rcnt, log_min_vcnt); // calculate # vector loop iterations 4995 z_brz(VectorDone); // skip if none 4996 4997 z_sllg(Z_R0, Rix, log_min_vcnt); // remember #chars that will be processed by vector loop 4998 4999 bind(VectorLoop); 5000 z_vlm(Z_V20, Z_V21, 0, Rsrc); // get next 32 characters (single-byte) 5001 add2reg(Rsrc, min_vcnt); 5002 5003 z_vuplhb(Z_V22, Z_V20); // V2 <- (expand) V0(high) 5004 z_vupllb(Z_V23, Z_V20); // V3 <- (expand) V0(low) 5005 z_vuplhb(Z_V24, Z_V21); // V4 <- (expand) V1(high) 5006 z_vupllb(Z_V25, Z_V21); // V5 <- (expand) V1(low) 5007 z_vstm(Z_V22, Z_V25, 0, Rdst); // store next 32 bytes 5008 add2reg(Rdst, min_vcnt*2); 5009 5010 z_brct(Rix, VectorLoop); 5011 5012 bind(VectorDone); 5013 } 5014 5015 const int min_cnt = 8; // Minimum #characters required to use unrolled scalar loop. 5016 // Otherwise just do nothing in unrolled scalar mode. 5017 // Must be multiple of 8. 5018 { 5019 const int log_min_cnt = exact_log2(min_cnt); 5020 Label UnrolledLoop, UnrolledDone; 5021 5022 5023 if (VM_Version::has_DistinctOpnds()) { 5024 z_srk(Rix, Rcnt, Z_R0); // remaining # chars to process in unrolled loop 5025 } else { 5026 z_lr(Rix, Rcnt); 5027 z_sr(Rix, Z_R0); 5028 } 5029 z_sra(Rix, log_min_cnt); // unrolled loop count 5030 z_brz(UnrolledDone); 5031 5032 clear_reg(Z_R0); 5033 clear_reg(Z_R1); 5034 5035 bind(UnrolledLoop); 5036 z_icmh(Z_R0, 5, 0, Rsrc); 5037 z_icmh(Z_R1, 5, 4, Rsrc); 5038 z_icm(Z_R0, 5, 2, Rsrc); 5039 z_icm(Z_R1, 5, 6, Rsrc); 5040 add2reg(Rsrc, min_cnt); 5041 5042 z_stmg(Z_R0, Z_R1, 0, Rdst); 5043 5044 add2reg(Rdst, min_cnt*2); 5045 z_brct(Rix, UnrolledLoop); 5046 5047 bind(UnrolledDone); 5048 z_lgfr(Z_R0, Rcnt); // # chars left over after unrolled loop. 5049 z_nilf(Z_R0, min_cnt-1); 5050 z_brnz(ScalarShortcut); // if zero, there is nothing left to do for scalar loop. 5051 // Rix == 0 in all cases. 5052 z_sgfr(Z_R0, Rcnt); // negative # characters the ptrs have been advanced previously. 5053 z_agr(Rdst, Z_R0); // restore ptr, double the element count for Rdst restore. 5054 z_agr(Rdst, Z_R0); 5055 z_agr(Rsrc, Z_R0); // restore ptr. 5056 z_bru(AllDone); 5057 } 5058 5059 { 5060 bind(ScalarShortcut); 5061 // Z_R0 must contain remaining # characters as 64-bit signed int here. 5062 // register contents is preserved over scalar processing (for register fixup). 5063 5064 #if 0 // Sacrifice shortcuts for code compactness 5065 { 5066 Label ScalarDefault; 5067 z_chi(Rcnt, 2); 5068 z_brh(ScalarDefault); 5069 z_llc(Z_R0, 0, Z_R0, Rsrc); // 6 bytes 5070 z_sth(Z_R0, 0, Z_R0, Rdst); // 4 bytes 5071 z_brl(AllDone); 5072 z_llc(Z_R0, 1, Z_R0, Rsrc); // 6 bytes 5073 z_sth(Z_R0, 2, Z_R0, Rdst); // 4 bytes 5074 z_bru(AllDone); 5075 bind(ScalarDefault); 5076 } 5077 #endif 5078 5079 Label CodeTable; 5080 // Some comments on Rix calculation: 5081 // - Rcnt is small, therefore no bits shifted out of low word (sll(g) instructions). 5082 // - high word of both Rix and Rcnt may contain garbage 5083 // - the final lngfr takes care of that garbage, extending the sign to high word 5084 z_sllg(Rix, Z_R0, 2); // calculate 10*Rix = (4*Rix + Rix)*2 5085 z_ar(Rix, Z_R0); 5086 z_larl(Z_R1, CodeTable); 5087 z_sll(Rix, 1); 5088 z_lngfr(Rix, Rix); // ix range: [0..7], after inversion & mult: [-(7*12)..(0*12)]. 5089 z_bc(Assembler::bcondAlways, 0, Rix, Z_R1); 5090 5091 z_llc(Z_R1, 6, Z_R0, Rsrc); // 6 bytes 5092 z_sth(Z_R1, 12, Z_R0, Rdst); // 4 bytes 5093 5094 z_llc(Z_R1, 5, Z_R0, Rsrc); 5095 z_sth(Z_R1, 10, Z_R0, Rdst); 5096 5097 z_llc(Z_R1, 4, Z_R0, Rsrc); 5098 z_sth(Z_R1, 8, Z_R0, Rdst); 5099 5100 z_llc(Z_R1, 3, Z_R0, Rsrc); 5101 z_sth(Z_R1, 6, Z_R0, Rdst); 5102 5103 z_llc(Z_R1, 2, Z_R0, Rsrc); 5104 z_sth(Z_R1, 4, Z_R0, Rdst); 5105 5106 z_llc(Z_R1, 1, Z_R0, Rsrc); 5107 z_sth(Z_R1, 2, Z_R0, Rdst); 5108 5109 z_llc(Z_R1, 0, Z_R0, Rsrc); 5110 z_sth(Z_R1, 0, Z_R0, Rdst); 5111 bind(CodeTable); 5112 5113 z_chi(Rcnt, 8); // no fixup for small strings. Rdst, Rsrc were not modified. 5114 z_brl(AllDone); 5115 5116 z_sgfr(Z_R0, Rcnt); // # characters the ptrs have been advanced previously. 5117 z_agr(Rdst, Z_R0); // restore ptr, double the element count for Rdst restore. 5118 z_agr(Rdst, Z_R0); 5119 z_agr(Rsrc, Z_R0); // restore ptr. 5120 } 5121 bind(AllDone); 5122 5123 BLOCK_COMMENT("} string_inflate"); 5124 return offset() - block_start; 5125 } 5126 5127 // Inflate byte[] to char[], length known at compile time. 5128 // Restores: src, dst 5129 // Kills: tmp, Z_R0, Z_R1. 5130 // Note: 5131 // len is signed int. Counts # characters, not bytes. 5132 unsigned int MacroAssembler::string_inflate_const(Register src, Register dst, Register tmp, int len) { 5133 assert_different_registers(Z_R0, Z_R1, src, dst, tmp); 5134 5135 BLOCK_COMMENT("string_inflate_const {"); 5136 int block_start = offset(); 5137 5138 Register Rix = tmp; // loop index 5139 Register Rsrc = src; // addr(src array) 5140 Register Rdst = dst; // addr(dst array) 5141 Label ScalarShortcut, AllDone; 5142 int nprocessed = 0; 5143 int src_off = 0; // compensate for saved (optimized away) ptr advancement. 5144 int dst_off = 0; // compensate for saved (optimized away) ptr advancement. 5145 bool restore_inputs = false; 5146 bool workreg_clear = false; 5147 5148 if ((len >= 32) && VM_Version::has_VectorFacility()) { 5149 const int min_vcnt = 32; // Minimum #characters required to use vector instructions. 5150 // Otherwise just do nothing in vector mode. 5151 // Must be multiple of vector register length (16 bytes = 128 bits). 5152 const int log_min_vcnt = exact_log2(min_vcnt); 5153 const int iterations = (len - nprocessed) >> log_min_vcnt; 5154 nprocessed += iterations << log_min_vcnt; 5155 Label VectorLoop; 5156 5157 if (iterations == 1) { 5158 z_vlm(Z_V20, Z_V21, 0+src_off, Rsrc); // get next 32 characters (single-byte) 5159 z_vuplhb(Z_V22, Z_V20); // V2 <- (expand) V0(high) 5160 z_vupllb(Z_V23, Z_V20); // V3 <- (expand) V0(low) 5161 z_vuplhb(Z_V24, Z_V21); // V4 <- (expand) V1(high) 5162 z_vupllb(Z_V25, Z_V21); // V5 <- (expand) V1(low) 5163 z_vstm(Z_V22, Z_V25, 0+dst_off, Rdst); // store next 32 bytes 5164 5165 src_off += min_vcnt; 5166 dst_off += min_vcnt*2; 5167 } else { 5168 restore_inputs = true; 5169 5170 z_lgfi(Rix, len>>log_min_vcnt); 5171 bind(VectorLoop); 5172 z_vlm(Z_V20, Z_V21, 0, Rsrc); // get next 32 characters (single-byte) 5173 add2reg(Rsrc, min_vcnt); 5174 5175 z_vuplhb(Z_V22, Z_V20); // V2 <- (expand) V0(high) 5176 z_vupllb(Z_V23, Z_V20); // V3 <- (expand) V0(low) 5177 z_vuplhb(Z_V24, Z_V21); // V4 <- (expand) V1(high) 5178 z_vupllb(Z_V25, Z_V21); // V5 <- (expand) V1(low) 5179 z_vstm(Z_V22, Z_V25, 0, Rdst); // store next 32 bytes 5180 add2reg(Rdst, min_vcnt*2); 5181 5182 z_brct(Rix, VectorLoop); 5183 } 5184 } 5185 5186 if (((len-nprocessed) >= 16) && VM_Version::has_VectorFacility()) { 5187 const int min_vcnt = 16; // Minimum #characters required to use vector instructions. 5188 // Otherwise just do nothing in vector mode. 5189 // Must be multiple of vector register length (16 bytes = 128 bits). 5190 const int log_min_vcnt = exact_log2(min_vcnt); 5191 const int iterations = (len - nprocessed) >> log_min_vcnt; 5192 nprocessed += iterations << log_min_vcnt; 5193 assert(iterations == 1, "must be!"); 5194 5195 z_vl(Z_V20, 0+src_off, Z_R0, Rsrc); // get next 16 characters (single-byte) 5196 z_vuplhb(Z_V22, Z_V20); // V2 <- (expand) V0(high) 5197 z_vupllb(Z_V23, Z_V20); // V3 <- (expand) V0(low) 5198 z_vstm(Z_V22, Z_V23, 0+dst_off, Rdst); // store next 32 bytes 5199 5200 src_off += min_vcnt; 5201 dst_off += min_vcnt*2; 5202 } 5203 5204 if ((len-nprocessed) > 8) { 5205 const int min_cnt = 8; // Minimum #characters required to use unrolled scalar loop. 5206 // Otherwise just do nothing in unrolled scalar mode. 5207 // Must be multiple of 8. 5208 const int log_min_cnt = exact_log2(min_cnt); 5209 const int iterations = (len - nprocessed) >> log_min_cnt; 5210 nprocessed += iterations << log_min_cnt; 5211 5212 //---< avoid loop overhead/ptr increment for small # iterations >--- 5213 if (iterations <= 2) { 5214 clear_reg(Z_R0); 5215 clear_reg(Z_R1); 5216 workreg_clear = true; 5217 5218 z_icmh(Z_R0, 5, 0+src_off, Rsrc); 5219 z_icmh(Z_R1, 5, 4+src_off, Rsrc); 5220 z_icm(Z_R0, 5, 2+src_off, Rsrc); 5221 z_icm(Z_R1, 5, 6+src_off, Rsrc); 5222 z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst); 5223 5224 src_off += min_cnt; 5225 dst_off += min_cnt*2; 5226 } 5227 5228 if (iterations == 2) { 5229 z_icmh(Z_R0, 5, 0+src_off, Rsrc); 5230 z_icmh(Z_R1, 5, 4+src_off, Rsrc); 5231 z_icm(Z_R0, 5, 2+src_off, Rsrc); 5232 z_icm(Z_R1, 5, 6+src_off, Rsrc); 5233 z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst); 5234 5235 src_off += min_cnt; 5236 dst_off += min_cnt*2; 5237 } 5238 5239 if (iterations > 2) { 5240 Label UnrolledLoop; 5241 restore_inputs = true; 5242 5243 clear_reg(Z_R0); 5244 clear_reg(Z_R1); 5245 workreg_clear = true; 5246 5247 z_lgfi(Rix, iterations); 5248 bind(UnrolledLoop); 5249 z_icmh(Z_R0, 5, 0, Rsrc); 5250 z_icmh(Z_R1, 5, 4, Rsrc); 5251 z_icm(Z_R0, 5, 2, Rsrc); 5252 z_icm(Z_R1, 5, 6, Rsrc); 5253 add2reg(Rsrc, min_cnt); 5254 5255 z_stmg(Z_R0, Z_R1, 0, Rdst); 5256 add2reg(Rdst, min_cnt*2); 5257 5258 z_brct(Rix, UnrolledLoop); 5259 } 5260 } 5261 5262 if ((len-nprocessed) > 0) { 5263 switch (len-nprocessed) { 5264 case 8: 5265 if (!workreg_clear) { 5266 clear_reg(Z_R0); 5267 clear_reg(Z_R1); 5268 } 5269 z_icmh(Z_R0, 5, 0+src_off, Rsrc); 5270 z_icmh(Z_R1, 5, 4+src_off, Rsrc); 5271 z_icm(Z_R0, 5, 2+src_off, Rsrc); 5272 z_icm(Z_R1, 5, 6+src_off, Rsrc); 5273 z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst); 5274 break; 5275 case 7: 5276 if (!workreg_clear) { 5277 clear_reg(Z_R0); 5278 clear_reg(Z_R1); 5279 } 5280 clear_reg(Rix); 5281 z_icm(Z_R0, 5, 0+src_off, Rsrc); 5282 z_icm(Z_R1, 5, 2+src_off, Rsrc); 5283 z_icm(Rix, 5, 4+src_off, Rsrc); 5284 z_stm(Z_R0, Z_R1, 0+dst_off, Rdst); 5285 z_llc(Z_R0, 6+src_off, Z_R0, Rsrc); 5286 z_st(Rix, 8+dst_off, Z_R0, Rdst); 5287 z_sth(Z_R0, 12+dst_off, Z_R0, Rdst); 5288 break; 5289 case 6: 5290 if (!workreg_clear) { 5291 clear_reg(Z_R0); 5292 clear_reg(Z_R1); 5293 } 5294 clear_reg(Rix); 5295 z_icm(Z_R0, 5, 0+src_off, Rsrc); 5296 z_icm(Z_R1, 5, 2+src_off, Rsrc); 5297 z_icm(Rix, 5, 4+src_off, Rsrc); 5298 z_stm(Z_R0, Z_R1, 0+dst_off, Rdst); 5299 z_st(Rix, 8+dst_off, Z_R0, Rdst); 5300 break; 5301 case 5: 5302 if (!workreg_clear) { 5303 clear_reg(Z_R0); 5304 clear_reg(Z_R1); 5305 } 5306 z_icm(Z_R0, 5, 0+src_off, Rsrc); 5307 z_icm(Z_R1, 5, 2+src_off, Rsrc); 5308 z_llc(Rix, 4+src_off, Z_R0, Rsrc); 5309 z_stm(Z_R0, Z_R1, 0+dst_off, Rdst); 5310 z_sth(Rix, 8+dst_off, Z_R0, Rdst); 5311 break; 5312 case 4: 5313 if (!workreg_clear) { 5314 clear_reg(Z_R0); 5315 clear_reg(Z_R1); 5316 } 5317 z_icm(Z_R0, 5, 0+src_off, Rsrc); 5318 z_icm(Z_R1, 5, 2+src_off, Rsrc); 5319 z_stm(Z_R0, Z_R1, 0+dst_off, Rdst); 5320 break; 5321 case 3: 5322 if (!workreg_clear) { 5323 clear_reg(Z_R0); 5324 } 5325 z_llc(Z_R1, 2+src_off, Z_R0, Rsrc); 5326 z_icm(Z_R0, 5, 0+src_off, Rsrc); 5327 z_sth(Z_R1, 4+dst_off, Z_R0, Rdst); 5328 z_st(Z_R0, 0+dst_off, Rdst); 5329 break; 5330 case 2: 5331 z_llc(Z_R0, 0+src_off, Z_R0, Rsrc); 5332 z_llc(Z_R1, 1+src_off, Z_R0, Rsrc); 5333 z_sth(Z_R0, 0+dst_off, Z_R0, Rdst); 5334 z_sth(Z_R1, 2+dst_off, Z_R0, Rdst); 5335 break; 5336 case 1: 5337 z_llc(Z_R0, 0+src_off, Z_R0, Rsrc); 5338 z_sth(Z_R0, 0+dst_off, Z_R0, Rdst); 5339 break; 5340 default: 5341 guarantee(false, "Impossible"); 5342 break; 5343 } 5344 src_off += len-nprocessed; 5345 dst_off += (len-nprocessed)*2; 5346 nprocessed = len; 5347 } 5348 5349 //---< restore modified input registers >--- 5350 if ((nprocessed > 0) && restore_inputs) { 5351 z_agfi(Rsrc, -(nprocessed-src_off)); 5352 if (nprocessed < 1000000000) { // avoid int overflow 5353 z_agfi(Rdst, -(nprocessed*2-dst_off)); 5354 } else { 5355 z_agfi(Rdst, -(nprocessed-dst_off)); 5356 z_agfi(Rdst, -nprocessed); 5357 } 5358 } 5359 5360 BLOCK_COMMENT("} string_inflate_const"); 5361 return offset() - block_start; 5362 } 5363 5364 // Kills src. 5365 unsigned int MacroAssembler::has_negatives(Register result, Register src, Register cnt, 5366 Register odd_reg, Register even_reg, Register tmp) { 5367 int block_start = offset(); 5368 Label Lloop1, Lloop2, Lslow, Lnotfound, Ldone; 5369 const Register addr = src, mask = tmp; 5370 5371 BLOCK_COMMENT("has_negatives {"); 5372 5373 z_llgfr(Z_R1, cnt); // Number of bytes to read. (Must be a positive simm32.) 5374 z_llilf(mask, 0x80808080); 5375 z_lhi(result, 1); // Assume true. 5376 // Last possible addr for fast loop. 5377 z_lay(odd_reg, -16, Z_R1, src); 5378 z_chi(cnt, 16); 5379 z_brl(Lslow); 5380 5381 // ind1: index, even_reg: index increment, odd_reg: index limit 5382 z_iihf(mask, 0x80808080); 5383 z_lghi(even_reg, 16); 5384 5385 bind(Lloop1); // 16 bytes per iteration. 5386 z_lg(Z_R0, Address(addr)); 5387 z_lg(Z_R1, Address(addr, 8)); 5388 z_ogr(Z_R0, Z_R1); 5389 z_ngr(Z_R0, mask); 5390 z_brne(Ldone); // If found return 1. 5391 z_brxlg(addr, even_reg, Lloop1); 5392 5393 bind(Lslow); 5394 z_aghi(odd_reg, 16-1); // Last possible addr for slow loop. 5395 z_lghi(even_reg, 1); 5396 z_cgr(addr, odd_reg); 5397 z_brh(Lnotfound); 5398 5399 bind(Lloop2); // 1 byte per iteration. 5400 z_cli(Address(addr), 0x80); 5401 z_brnl(Ldone); // If found return 1. 5402 z_brxlg(addr, even_reg, Lloop2); 5403 5404 bind(Lnotfound); 5405 z_lhi(result, 0); 5406 5407 bind(Ldone); 5408 5409 BLOCK_COMMENT("} has_negatives"); 5410 5411 return offset() - block_start; 5412 } 5413 5414 // kill: cnt1, cnt2, odd_reg, even_reg; early clobber: result 5415 unsigned int MacroAssembler::string_compare(Register str1, Register str2, 5416 Register cnt1, Register cnt2, 5417 Register odd_reg, Register even_reg, Register result, int ae) { 5418 int block_start = offset(); 5419 5420 assert_different_registers(str1, cnt1, cnt2, odd_reg, even_reg, result); 5421 assert_different_registers(str2, cnt1, cnt2, odd_reg, even_reg, result); 5422 5423 // If strings are equal up to min length, return the length difference. 5424 const Register diff = result, // Pre-set result with length difference. 5425 min = cnt1, // min number of bytes 5426 tmp = cnt2; 5427 5428 // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a) 5429 // we interchange str1 and str2 in the UL case and negate the result. 5430 // Like this, str1 is always latin1 encoded, except for the UU case. 5431 // In addition, we need 0 (or sign which is 0) extend when using 64 bit register. 5432 const bool used_as_LU = (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL); 5433 5434 BLOCK_COMMENT("string_compare {"); 5435 5436 if (used_as_LU) { 5437 z_srl(cnt2, 1); 5438 } 5439 5440 // See if the lengths are different, and calculate min in cnt1. 5441 // Save diff in case we need it for a tie-breaker. 5442 5443 // diff = cnt1 - cnt2 5444 if (VM_Version::has_DistinctOpnds()) { 5445 z_srk(diff, cnt1, cnt2); 5446 } else { 5447 z_lr(diff, cnt1); 5448 z_sr(diff, cnt2); 5449 } 5450 if (str1 != str2) { 5451 if (VM_Version::has_LoadStoreConditional()) { 5452 z_locr(min, cnt2, Assembler::bcondHigh); 5453 } else { 5454 Label Lskip; 5455 z_brl(Lskip); // min ok if cnt1 < cnt2 5456 z_lr(min, cnt2); // min = cnt2 5457 bind(Lskip); 5458 } 5459 } 5460 5461 if (ae == StrIntrinsicNode::UU) { 5462 z_sra(diff, 1); 5463 } 5464 if (str1 != str2) { 5465 Label Ldone; 5466 if (used_as_LU) { 5467 // Loop which searches the first difference character by character. 5468 Label Lloop; 5469 const Register ind1 = Z_R1, 5470 ind2 = min; 5471 int stride1 = 1, stride2 = 2; // See comment above. 5472 5473 // ind1: index, even_reg: index increment, odd_reg: index limit 5474 z_llilf(ind1, (unsigned int)(-stride1)); 5475 z_lhi(even_reg, stride1); 5476 add2reg(odd_reg, -stride1, min); 5477 clear_reg(ind2); // kills min 5478 5479 bind(Lloop); 5480 z_brxh(ind1, even_reg, Ldone); 5481 z_llc(tmp, Address(str1, ind1)); 5482 z_llh(Z_R0, Address(str2, ind2)); 5483 z_ahi(ind2, stride2); 5484 z_sr(tmp, Z_R0); 5485 z_bre(Lloop); 5486 5487 z_lr(result, tmp); 5488 5489 } else { 5490 // Use clcle in fast loop (only for same encoding). 5491 z_lgr(Z_R0, str1); 5492 z_lgr(even_reg, str2); 5493 z_llgfr(Z_R1, min); 5494 z_llgfr(odd_reg, min); 5495 5496 if (ae == StrIntrinsicNode::LL) { 5497 compare_long_ext(Z_R0, even_reg, 0); 5498 } else { 5499 compare_long_uni(Z_R0, even_reg, 0); 5500 } 5501 z_bre(Ldone); 5502 z_lgr(Z_R1, Z_R0); 5503 if (ae == StrIntrinsicNode::LL) { 5504 z_llc(Z_R0, Address(even_reg)); 5505 z_llc(result, Address(Z_R1)); 5506 } else { 5507 z_llh(Z_R0, Address(even_reg)); 5508 z_llh(result, Address(Z_R1)); 5509 } 5510 z_sr(result, Z_R0); 5511 } 5512 5513 // Otherwise, return the difference between the first mismatched chars. 5514 bind(Ldone); 5515 } 5516 5517 if (ae == StrIntrinsicNode::UL) { 5518 z_lcr(result, result); // Negate result (see note above). 5519 } 5520 5521 BLOCK_COMMENT("} string_compare"); 5522 5523 return offset() - block_start; 5524 } 5525 5526 unsigned int MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, Register limit, 5527 Register odd_reg, Register even_reg, Register result, bool is_byte) { 5528 int block_start = offset(); 5529 5530 BLOCK_COMMENT("array_equals {"); 5531 5532 assert_different_registers(ary1, limit, odd_reg, even_reg); 5533 assert_different_registers(ary2, limit, odd_reg, even_reg); 5534 5535 Label Ldone, Ldone_true, Ldone_false, Lclcle, CLC_template; 5536 int base_offset = 0; 5537 5538 if (ary1 != ary2) { 5539 if (is_array_equ) { 5540 base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR); 5541 5542 // Return true if the same array. 5543 compareU64_and_branch(ary1, ary2, Assembler::bcondEqual, Ldone_true); 5544 5545 // Return false if one of them is NULL. 5546 compareU64_and_branch(ary1, (intptr_t)0, Assembler::bcondEqual, Ldone_false); 5547 compareU64_and_branch(ary2, (intptr_t)0, Assembler::bcondEqual, Ldone_false); 5548 5549 // Load the lengths of arrays. 5550 z_llgf(odd_reg, Address(ary1, arrayOopDesc::length_offset_in_bytes())); 5551 5552 // Return false if the two arrays are not equal length. 5553 z_c(odd_reg, Address(ary2, arrayOopDesc::length_offset_in_bytes())); 5554 z_brne(Ldone_false); 5555 5556 // string len in bytes (right operand) 5557 if (!is_byte) { 5558 z_chi(odd_reg, 128); 5559 z_sll(odd_reg, 1); // preserves flags 5560 z_brh(Lclcle); 5561 } else { 5562 compareU32_and_branch(odd_reg, (intptr_t)256, Assembler::bcondHigh, Lclcle); 5563 } 5564 } else { 5565 z_llgfr(odd_reg, limit); // Need to zero-extend prior to using the value. 5566 compareU32_and_branch(limit, (intptr_t)256, Assembler::bcondHigh, Lclcle); 5567 } 5568 5569 5570 // Use clc instruction for up to 256 bytes. 5571 { 5572 Register str1_reg = ary1, 5573 str2_reg = ary2; 5574 if (is_array_equ) { 5575 str1_reg = Z_R1; 5576 str2_reg = even_reg; 5577 add2reg(str1_reg, base_offset, ary1); // string addr (left operand) 5578 add2reg(str2_reg, base_offset, ary2); // string addr (right operand) 5579 } 5580 z_ahi(odd_reg, -1); // Clc uses decremented limit. Also compare result to 0. 5581 z_brl(Ldone_true); 5582 // Note: We could jump to the template if equal. 5583 5584 assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware"); 5585 z_exrl(odd_reg, CLC_template); 5586 z_bre(Ldone_true); 5587 // fall through 5588 5589 bind(Ldone_false); 5590 clear_reg(result); 5591 z_bru(Ldone); 5592 5593 bind(CLC_template); 5594 z_clc(0, 0, str1_reg, 0, str2_reg); 5595 } 5596 5597 // Use clcle instruction. 5598 { 5599 bind(Lclcle); 5600 add2reg(even_reg, base_offset, ary2); // string addr (right operand) 5601 add2reg(Z_R0, base_offset, ary1); // string addr (left operand) 5602 5603 z_lgr(Z_R1, odd_reg); // string len in bytes (left operand) 5604 if (is_byte) { 5605 compare_long_ext(Z_R0, even_reg, 0); 5606 } else { 5607 compare_long_uni(Z_R0, even_reg, 0); 5608 } 5609 z_lghi(result, 0); // Preserve flags. 5610 z_brne(Ldone); 5611 } 5612 } 5613 // fall through 5614 5615 bind(Ldone_true); 5616 z_lghi(result, 1); // All characters are equal. 5617 bind(Ldone); 5618 5619 BLOCK_COMMENT("} array_equals"); 5620 5621 return offset() - block_start; 5622 } 5623 5624 // kill: haycnt, needlecnt, odd_reg, even_reg; early clobber: result 5625 unsigned int MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt, 5626 Register needle, Register needlecnt, int needlecntval, 5627 Register odd_reg, Register even_reg, int ae) { 5628 int block_start = offset(); 5629 5630 // Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite! 5631 assert(ae != StrIntrinsicNode::LU, "Invalid encoding"); 5632 const int h_csize = (ae == StrIntrinsicNode::LL) ? 1 : 2; 5633 const int n_csize = (ae == StrIntrinsicNode::UU) ? 2 : 1; 5634 Label L_needle1, L_Found, L_NotFound; 5635 5636 BLOCK_COMMENT("string_indexof {"); 5637 5638 if (needle == haystack) { 5639 z_lhi(result, 0); 5640 } else { 5641 5642 // Load first character of needle (R0 used by search_string instructions). 5643 if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); } 5644 5645 // Compute last haystack addr to use if no match gets found. 5646 if (needlecnt != noreg) { // variable needlecnt 5647 z_ahi(needlecnt, -1); // Remaining characters after first one. 5648 z_sr(haycnt, needlecnt); // Compute index succeeding last element to compare. 5649 if (n_csize == 2) { z_sll(needlecnt, 1); } // In bytes. 5650 } else { // constant needlecnt 5651 assert((needlecntval & 0x7fff) == needlecntval, "must be positive simm16 immediate"); 5652 // Compute index succeeding last element to compare. 5653 if (needlecntval != 1) { z_ahi(haycnt, 1 - needlecntval); } 5654 } 5655 5656 z_llgfr(haycnt, haycnt); // Clear high half. 5657 z_lgr(result, haystack); // Final result will be computed from needle start pointer. 5658 if (h_csize == 2) { z_sll(haycnt, 1); } // Scale to number of bytes. 5659 z_agr(haycnt, haystack); // Point to address succeeding last element (haystack+scale*(haycnt-needlecnt+1)). 5660 5661 if (h_csize != n_csize) { 5662 assert(ae == StrIntrinsicNode::UL, "Invalid encoding"); 5663 5664 if (needlecnt != noreg || needlecntval != 1) { 5665 if (needlecnt != noreg) { 5666 compare32_and_branch(needlecnt, (intptr_t)0, Assembler::bcondEqual, L_needle1); 5667 } 5668 5669 // Main Loop: UL version (now we have at least 2 characters). 5670 Label L_OuterLoop, L_InnerLoop, L_Skip; 5671 bind(L_OuterLoop); // Search for 1st 2 characters. 5672 z_lgr(Z_R1, haycnt); 5673 MacroAssembler::search_string_uni(Z_R1, result); 5674 z_brc(Assembler::bcondNotFound, L_NotFound); 5675 z_lgr(result, Z_R1); 5676 5677 z_lghi(Z_R1, n_csize); 5678 z_lghi(even_reg, h_csize); 5679 bind(L_InnerLoop); 5680 z_llgc(odd_reg, Address(needle, Z_R1)); 5681 z_ch(odd_reg, Address(result, even_reg)); 5682 z_brne(L_Skip); 5683 if (needlecnt != noreg) { z_cr(Z_R1, needlecnt); } else { z_chi(Z_R1, needlecntval - 1); } 5684 z_brnl(L_Found); 5685 z_aghi(Z_R1, n_csize); 5686 z_aghi(even_reg, h_csize); 5687 z_bru(L_InnerLoop); 5688 5689 bind(L_Skip); 5690 z_aghi(result, h_csize); // This is the new address we want to use for comparing. 5691 z_bru(L_OuterLoop); 5692 } 5693 5694 } else { 5695 const intptr_t needle_bytes = (n_csize == 2) ? ((needlecntval - 1) << 1) : (needlecntval - 1); 5696 Label L_clcle; 5697 5698 if (needlecnt != noreg || (needlecntval != 1 && needle_bytes <= 256)) { 5699 if (needlecnt != noreg) { 5700 compare32_and_branch(needlecnt, 256, Assembler::bcondHigh, L_clcle); 5701 z_ahi(needlecnt, -1); // remaining bytes -1 (for CLC) 5702 z_brl(L_needle1); 5703 } 5704 5705 // Main Loop: clc version (now we have at least 2 characters). 5706 Label L_OuterLoop, CLC_template; 5707 bind(L_OuterLoop); // Search for 1st 2 characters. 5708 z_lgr(Z_R1, haycnt); 5709 if (h_csize == 1) { 5710 MacroAssembler::search_string(Z_R1, result); 5711 } else { 5712 MacroAssembler::search_string_uni(Z_R1, result); 5713 } 5714 z_brc(Assembler::bcondNotFound, L_NotFound); 5715 z_lgr(result, Z_R1); 5716 5717 if (needlecnt != noreg) { 5718 assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware"); 5719 z_exrl(needlecnt, CLC_template); 5720 } else { 5721 z_clc(h_csize, needle_bytes -1, Z_R1, n_csize, needle); 5722 } 5723 z_bre(L_Found); 5724 z_aghi(result, h_csize); // This is the new address we want to use for comparing. 5725 z_bru(L_OuterLoop); 5726 5727 if (needlecnt != noreg) { 5728 bind(CLC_template); 5729 z_clc(h_csize, 0, Z_R1, n_csize, needle); 5730 } 5731 } 5732 5733 if (needlecnt != noreg || needle_bytes > 256) { 5734 bind(L_clcle); 5735 5736 // Main Loop: clcle version (now we have at least 256 bytes). 5737 Label L_OuterLoop, CLC_template; 5738 bind(L_OuterLoop); // Search for 1st 2 characters. 5739 z_lgr(Z_R1, haycnt); 5740 if (h_csize == 1) { 5741 MacroAssembler::search_string(Z_R1, result); 5742 } else { 5743 MacroAssembler::search_string_uni(Z_R1, result); 5744 } 5745 z_brc(Assembler::bcondNotFound, L_NotFound); 5746 5747 add2reg(Z_R0, n_csize, needle); 5748 add2reg(even_reg, h_csize, Z_R1); 5749 z_lgr(result, Z_R1); 5750 if (needlecnt != noreg) { 5751 z_llgfr(Z_R1, needlecnt); // needle len in bytes (left operand) 5752 z_llgfr(odd_reg, needlecnt); 5753 } else { 5754 load_const_optimized(Z_R1, needle_bytes); 5755 if (Immediate::is_simm16(needle_bytes)) { z_lghi(odd_reg, needle_bytes); } else { z_lgr(odd_reg, Z_R1); } 5756 } 5757 if (h_csize == 1) { 5758 compare_long_ext(Z_R0, even_reg, 0); 5759 } else { 5760 compare_long_uni(Z_R0, even_reg, 0); 5761 } 5762 z_bre(L_Found); 5763 5764 if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); } // Reload. 5765 z_aghi(result, h_csize); // This is the new address we want to use for comparing. 5766 z_bru(L_OuterLoop); 5767 } 5768 } 5769 5770 if (needlecnt != noreg || needlecntval == 1) { 5771 bind(L_needle1); 5772 5773 // Single needle character version. 5774 if (h_csize == 1) { 5775 MacroAssembler::search_string(haycnt, result); 5776 } else { 5777 MacroAssembler::search_string_uni(haycnt, result); 5778 } 5779 z_lgr(result, haycnt); 5780 z_brc(Assembler::bcondFound, L_Found); 5781 } 5782 5783 bind(L_NotFound); 5784 add2reg(result, -1, haystack); // Return -1. 5785 5786 bind(L_Found); // Return index (or -1 in fallthrough case). 5787 z_sgr(result, haystack); 5788 if (h_csize == 2) { z_srag(result, result, exact_log2(sizeof(jchar))); } 5789 } 5790 BLOCK_COMMENT("} string_indexof"); 5791 5792 return offset() - block_start; 5793 } 5794 5795 // early clobber: result 5796 unsigned int MacroAssembler::string_indexof_char(Register result, Register haystack, Register haycnt, 5797 Register needle, jchar needleChar, Register odd_reg, Register even_reg, bool is_byte) { 5798 int block_start = offset(); 5799 5800 BLOCK_COMMENT("string_indexof_char {"); 5801 5802 if (needle == haystack) { 5803 z_lhi(result, 0); 5804 } else { 5805 5806 Label Ldone; 5807 5808 z_llgfr(odd_reg, haycnt); // Preset loop ctr/searchrange end. 5809 if (needle == noreg) { 5810 load_const_optimized(Z_R0, (unsigned long)needleChar); 5811 } else { 5812 if (is_byte) { 5813 z_llgcr(Z_R0, needle); // First (and only) needle char. 5814 } else { 5815 z_llghr(Z_R0, needle); // First (and only) needle char. 5816 } 5817 } 5818 5819 if (!is_byte) { 5820 z_agr(odd_reg, odd_reg); // Calc #bytes to be processed with SRSTU. 5821 } 5822 5823 z_lgr(even_reg, haystack); // haystack addr 5824 z_agr(odd_reg, haystack); // First char after range end. 5825 z_lghi(result, -1); 5826 5827 if (is_byte) { 5828 MacroAssembler::search_string(odd_reg, even_reg); 5829 } else { 5830 MacroAssembler::search_string_uni(odd_reg, even_reg); 5831 } 5832 z_brc(Assembler::bcondNotFound, Ldone); 5833 if (is_byte) { 5834 if (VM_Version::has_DistinctOpnds()) { 5835 z_sgrk(result, odd_reg, haystack); 5836 } else { 5837 z_sgr(odd_reg, haystack); 5838 z_lgr(result, odd_reg); 5839 } 5840 } else { 5841 z_slgr(odd_reg, haystack); 5842 z_srlg(result, odd_reg, exact_log2(sizeof(jchar))); 5843 } 5844 5845 bind(Ldone); 5846 } 5847 BLOCK_COMMENT("} string_indexof_char"); 5848 5849 return offset() - block_start; 5850 } 5851 5852 5853 //------------------------------------------------- 5854 // Constants (scalar and oop) in constant pool 5855 //------------------------------------------------- 5856 5857 // Add a non-relocated constant to the CP. 5858 int MacroAssembler::store_const_in_toc(AddressLiteral& val) { 5859 long value = val.value(); 5860 address tocPos = long_constant(value); 5861 5862 if (tocPos != NULL) { 5863 int tocOffset = (int)(tocPos - code()->consts()->start()); 5864 return tocOffset; 5865 } 5866 // Address_constant returned NULL, so no constant entry has been created. 5867 // In that case, we return a "fatal" offset, just in case that subsequently 5868 // generated access code is executed. 5869 return -1; 5870 } 5871 5872 // Returns the TOC offset where the address is stored. 5873 // Add a relocated constant to the CP. 5874 int MacroAssembler::store_oop_in_toc(AddressLiteral& oop) { 5875 // Use RelocationHolder::none for the constant pool entry. 5876 // Otherwise we will end up with a failing NativeCall::verify(x), 5877 // where x is the address of the constant pool entry. 5878 address tocPos = address_constant((address)oop.value(), RelocationHolder::none); 5879 5880 if (tocPos != NULL) { 5881 int tocOffset = (int)(tocPos - code()->consts()->start()); 5882 RelocationHolder rsp = oop.rspec(); 5883 Relocation *rel = rsp.reloc(); 5884 5885 // Store toc_offset in relocation, used by call_far_patchable. 5886 if ((relocInfo::relocType)rel->type() == relocInfo::runtime_call_w_cp_type) { 5887 ((runtime_call_w_cp_Relocation *)(rel))->set_constant_pool_offset(tocOffset); 5888 } 5889 // Relocate at the load's pc. 5890 relocate(rsp); 5891 5892 return tocOffset; 5893 } 5894 // Address_constant returned NULL, so no constant entry has been created 5895 // in that case, we return a "fatal" offset, just in case that subsequently 5896 // generated access code is executed. 5897 return -1; 5898 } 5899 5900 bool MacroAssembler::load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc) { 5901 int tocOffset = store_const_in_toc(a); 5902 if (tocOffset == -1) return false; 5903 address tocPos = tocOffset + code()->consts()->start(); 5904 assert((address)code()->consts()->start() != NULL, "Please add CP address"); 5905 5906 load_long_pcrelative(dst, tocPos); 5907 return true; 5908 } 5909 5910 bool MacroAssembler::load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc) { 5911 int tocOffset = store_oop_in_toc(a); 5912 if (tocOffset == -1) return false; 5913 address tocPos = tocOffset + code()->consts()->start(); 5914 assert((address)code()->consts()->start() != NULL, "Please add CP address"); 5915 5916 load_addr_pcrelative(dst, tocPos); 5917 return true; 5918 } 5919 5920 // If the instruction sequence at the given pc is a load_const_from_toc 5921 // sequence, return the value currently stored at the referenced position 5922 // in the TOC. 5923 intptr_t MacroAssembler::get_const_from_toc(address pc) { 5924 5925 assert(is_load_const_from_toc(pc), "must be load_const_from_pool"); 5926 5927 long offset = get_load_const_from_toc_offset(pc); 5928 address dataLoc = NULL; 5929 if (is_load_const_from_toc_pcrelative(pc)) { 5930 dataLoc = pc + offset; 5931 } else { 5932 CodeBlob* cb = CodeCache::find_blob_unsafe(pc); // Else we get assertion if nmethod is zombie. 5933 assert(cb && cb->is_nmethod(), "sanity"); 5934 nmethod* nm = (nmethod*)cb; 5935 dataLoc = nm->ctable_begin() + offset; 5936 } 5937 return *(intptr_t *)dataLoc; 5938 } 5939 5940 // If the instruction sequence at the given pc is a load_const_from_toc 5941 // sequence, copy the passed-in new_data value into the referenced 5942 // position in the TOC. 5943 void MacroAssembler::set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb) { 5944 assert(is_load_const_from_toc(pc), "must be load_const_from_pool"); 5945 5946 long offset = MacroAssembler::get_load_const_from_toc_offset(pc); 5947 address dataLoc = NULL; 5948 if (is_load_const_from_toc_pcrelative(pc)) { 5949 dataLoc = pc+offset; 5950 } else { 5951 nmethod* nm = CodeCache::find_nmethod(pc); 5952 assert((cb == NULL) || (nm == (nmethod*)cb), "instruction address should be in CodeBlob"); 5953 dataLoc = nm->ctable_begin() + offset; 5954 } 5955 if (*(unsigned long *)dataLoc != new_data) { // Prevent cache invalidation: update only if necessary. 5956 *(unsigned long *)dataLoc = new_data; 5957 } 5958 } 5959 5960 // Dynamic TOC. Getter must only be called if "a" is a load_const_from_toc 5961 // site. Verify by calling is_load_const_from_toc() before!! 5962 // Offset is +/- 2**32 -> use long. 5963 long MacroAssembler::get_load_const_from_toc_offset(address a) { 5964 assert(is_load_const_from_toc_pcrelative(a), "expected pc relative load"); 5965 // expected code sequence: 5966 // z_lgrl(t, simm32); len = 6 5967 unsigned long inst; 5968 unsigned int len = get_instruction(a, &inst); 5969 return get_pcrel_offset(inst); 5970 } 5971 5972 //********************************************************************************** 5973 // inspection of generated instruction sequences for a particular pattern 5974 //********************************************************************************** 5975 5976 bool MacroAssembler::is_load_const_from_toc_pcrelative(address a) { 5977 #ifdef ASSERT 5978 unsigned long inst; 5979 unsigned int len = get_instruction(a+2, &inst); 5980 if ((len == 6) && is_load_pcrelative_long(a) && is_call_pcrelative_long(inst)) { 5981 const int range = 128; 5982 Assembler::dump_code_range(tty, a, range, "instr(a) == z_lgrl && instr(a+2) == z_brasl"); 5983 VM_Version::z_SIGSEGV(); 5984 } 5985 #endif 5986 // expected code sequence: 5987 // z_lgrl(t, relAddr32); len = 6 5988 //TODO: verify accessed data is in CP, if possible. 5989 return is_load_pcrelative_long(a); // TODO: might be too general. Currently, only lgrl is used. 5990 } 5991 5992 bool MacroAssembler::is_load_const_from_toc_call(address a) { 5993 return is_load_const_from_toc(a) && is_call_byregister(a + load_const_from_toc_size()); 5994 } 5995 5996 bool MacroAssembler::is_load_const_call(address a) { 5997 return is_load_const(a) && is_call_byregister(a + load_const_size()); 5998 } 5999 6000 //------------------------------------------------- 6001 // Emitters for some really CICS instructions 6002 //------------------------------------------------- 6003 6004 void MacroAssembler::move_long_ext(Register dst, Register src, unsigned int pad) { 6005 assert(dst->encoding()%2==0, "must be an even/odd register pair"); 6006 assert(src->encoding()%2==0, "must be an even/odd register pair"); 6007 assert(pad<256, "must be a padding BYTE"); 6008 6009 Label retry; 6010 bind(retry); 6011 Assembler::z_mvcle(dst, src, pad); 6012 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6013 } 6014 6015 void MacroAssembler::compare_long_ext(Register left, Register right, unsigned int pad) { 6016 assert(left->encoding() % 2 == 0, "must be an even/odd register pair"); 6017 assert(right->encoding() % 2 == 0, "must be an even/odd register pair"); 6018 assert(pad<256, "must be a padding BYTE"); 6019 6020 Label retry; 6021 bind(retry); 6022 Assembler::z_clcle(left, right, pad, Z_R0); 6023 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6024 } 6025 6026 void MacroAssembler::compare_long_uni(Register left, Register right, unsigned int pad) { 6027 assert(left->encoding() % 2 == 0, "must be an even/odd register pair"); 6028 assert(right->encoding() % 2 == 0, "must be an even/odd register pair"); 6029 assert(pad<=0xfff, "must be a padding HALFWORD"); 6030 assert(VM_Version::has_ETF2(), "instruction must be available"); 6031 6032 Label retry; 6033 bind(retry); 6034 Assembler::z_clclu(left, right, pad, Z_R0); 6035 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6036 } 6037 6038 void MacroAssembler::search_string(Register end, Register start) { 6039 assert(end->encoding() != 0, "end address must not be in R0"); 6040 assert(start->encoding() != 0, "start address must not be in R0"); 6041 6042 Label retry; 6043 bind(retry); 6044 Assembler::z_srst(end, start); 6045 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6046 } 6047 6048 void MacroAssembler::search_string_uni(Register end, Register start) { 6049 assert(end->encoding() != 0, "end address must not be in R0"); 6050 assert(start->encoding() != 0, "start address must not be in R0"); 6051 assert(VM_Version::has_ETF3(), "instruction must be available"); 6052 6053 Label retry; 6054 bind(retry); 6055 Assembler::z_srstu(end, start); 6056 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6057 } 6058 6059 void MacroAssembler::kmac(Register srcBuff) { 6060 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 6061 assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair"); 6062 6063 Label retry; 6064 bind(retry); 6065 Assembler::z_kmac(Z_R0, srcBuff); 6066 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6067 } 6068 6069 void MacroAssembler::kimd(Register srcBuff) { 6070 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 6071 assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair"); 6072 6073 Label retry; 6074 bind(retry); 6075 Assembler::z_kimd(Z_R0, srcBuff); 6076 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6077 } 6078 6079 void MacroAssembler::klmd(Register srcBuff) { 6080 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 6081 assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair"); 6082 6083 Label retry; 6084 bind(retry); 6085 Assembler::z_klmd(Z_R0, srcBuff); 6086 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6087 } 6088 6089 void MacroAssembler::km(Register dstBuff, Register srcBuff) { 6090 // DstBuff and srcBuff are allowed to be the same register (encryption in-place). 6091 // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block. 6092 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 6093 assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register"); 6094 assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair"); 6095 6096 Label retry; 6097 bind(retry); 6098 Assembler::z_km(dstBuff, srcBuff); 6099 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6100 } 6101 6102 void MacroAssembler::kmc(Register dstBuff, Register srcBuff) { 6103 // DstBuff and srcBuff are allowed to be the same register (encryption in-place). 6104 // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block. 6105 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 6106 assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register"); 6107 assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair"); 6108 6109 Label retry; 6110 bind(retry); 6111 Assembler::z_kmc(dstBuff, srcBuff); 6112 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6113 } 6114 6115 void MacroAssembler::cksm(Register crcBuff, Register srcBuff) { 6116 assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair"); 6117 6118 Label retry; 6119 bind(retry); 6120 Assembler::z_cksm(crcBuff, srcBuff); 6121 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6122 } 6123 6124 void MacroAssembler::translate_oo(Register r1, Register r2, uint m3) { 6125 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); 6126 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); 6127 6128 Label retry; 6129 bind(retry); 6130 Assembler::z_troo(r1, r2, m3); 6131 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6132 } 6133 6134 void MacroAssembler::translate_ot(Register r1, Register r2, uint m3) { 6135 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); 6136 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); 6137 6138 Label retry; 6139 bind(retry); 6140 Assembler::z_trot(r1, r2, m3); 6141 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6142 } 6143 6144 void MacroAssembler::translate_to(Register r1, Register r2, uint m3) { 6145 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); 6146 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); 6147 6148 Label retry; 6149 bind(retry); 6150 Assembler::z_trto(r1, r2, m3); 6151 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6152 } 6153 6154 void MacroAssembler::translate_tt(Register r1, Register r2, uint m3) { 6155 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); 6156 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); 6157 6158 Label retry; 6159 bind(retry); 6160 Assembler::z_trtt(r1, r2, m3); 6161 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 6162 } 6163 6164 6165 void MacroAssembler::generate_type_profiling(const Register Rdata, 6166 const Register Rreceiver_klass, 6167 const Register Rwanted_receiver_klass, 6168 const Register Rmatching_row, 6169 bool is_virtual_call) { 6170 const int row_size = in_bytes(ReceiverTypeData::receiver_offset(1)) - 6171 in_bytes(ReceiverTypeData::receiver_offset(0)); 6172 const int num_rows = ReceiverTypeData::row_limit(); 6173 NearLabel found_free_row; 6174 NearLabel do_increment; 6175 NearLabel found_no_slot; 6176 6177 BLOCK_COMMENT("type profiling {"); 6178 6179 // search for: 6180 // a) The type given in Rwanted_receiver_klass. 6181 // b) The *first* empty row. 6182 6183 // First search for a) only, just running over b) with no regard. 6184 // This is possible because 6185 // wanted_receiver_class == receiver_class && wanted_receiver_class == 0 6186 // is never true (receiver_class can't be zero). 6187 for (int row_num = 0; row_num < num_rows; row_num++) { 6188 // Row_offset should be a well-behaved positive number. The generated code relies 6189 // on that wrt constant code size. Add2reg can handle all row_offset values, but 6190 // will have to vary generated code size. 6191 int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num)); 6192 assert(Displacement::is_shortDisp(row_offset), "Limitation of generated code"); 6193 6194 // Is Rwanted_receiver_klass in this row? 6195 if (VM_Version::has_CompareBranch()) { 6196 z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata); 6197 // Rmatching_row = Rdata + row_offset; 6198 add2reg(Rmatching_row, row_offset, Rdata); 6199 // if (*row_recv == (intptr_t) receiver_klass) goto fill_existing_slot; 6200 compare64_and_branch(Rwanted_receiver_klass, Rreceiver_klass, Assembler::bcondEqual, do_increment); 6201 } else { 6202 add2reg(Rmatching_row, row_offset, Rdata); 6203 z_cg(Rreceiver_klass, row_offset, Z_R0, Rdata); 6204 z_bre(do_increment); 6205 } 6206 } 6207 6208 // Now that we did not find a match, let's search for b). 6209 6210 // We could save the first calculation of Rmatching_row if we woud search for a) in reverse order. 6211 // We would then end up here with Rmatching_row containing the value for row_num == 0. 6212 // We would not see much benefit, if any at all, because the CPU can schedule 6213 // two instructions together with a branch anyway. 6214 for (int row_num = 0; row_num < num_rows; row_num++) { 6215 int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num)); 6216 6217 // Has this row a zero receiver_klass, i.e. is it empty? 6218 if (VM_Version::has_CompareBranch()) { 6219 z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata); 6220 // Rmatching_row = Rdata + row_offset 6221 add2reg(Rmatching_row, row_offset, Rdata); 6222 // if (*row_recv == (intptr_t) 0) goto found_free_row 6223 compare64_and_branch(Rwanted_receiver_klass, (intptr_t)0, Assembler::bcondEqual, found_free_row); 6224 } else { 6225 add2reg(Rmatching_row, row_offset, Rdata); 6226 load_and_test_long(Rwanted_receiver_klass, Address(Rdata, row_offset)); 6227 z_bre(found_free_row); // zero -> Found a free row. 6228 } 6229 } 6230 6231 // No match, no empty row found. 6232 // Increment total counter to indicate polymorphic case. 6233 if (is_virtual_call) { 6234 add2mem_64(Address(Rdata, CounterData::count_offset()), 1, Rmatching_row); 6235 } 6236 z_bru(found_no_slot); 6237 6238 // Here we found an empty row, but we have not found Rwanted_receiver_klass. 6239 // Rmatching_row holds the address to the first empty row. 6240 bind(found_free_row); 6241 // Store receiver_klass into empty slot. 6242 z_stg(Rreceiver_klass, 0, Z_R0, Rmatching_row); 6243 6244 // Increment the counter of Rmatching_row. 6245 bind(do_increment); 6246 ByteSize counter_offset = ReceiverTypeData::receiver_count_offset(0) - ReceiverTypeData::receiver_offset(0); 6247 add2mem_64(Address(Rmatching_row, counter_offset), 1, Rdata); 6248 6249 bind(found_no_slot); 6250 6251 BLOCK_COMMENT("} type profiling"); 6252 } 6253 6254 //--------------------------------------- 6255 // Helpers for Intrinsic Emitters 6256 //--------------------------------------- 6257 6258 /** 6259 * uint32_t crc; 6260 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 6261 */ 6262 void MacroAssembler::fold_byte_crc32(Register crc, Register val, Register table, Register tmp) { 6263 assert_different_registers(crc, table, tmp); 6264 assert_different_registers(val, table); 6265 if (crc == val) { // Must rotate first to use the unmodified value. 6266 rotate_then_insert(tmp, val, 56-2, 63-2, 2, true); // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest. 6267 z_srl(crc, 8); // Unsigned shift, clear leftmost 8 bits. 6268 } else { 6269 z_srl(crc, 8); // Unsigned shift, clear leftmost 8 bits. 6270 rotate_then_insert(tmp, val, 56-2, 63-2, 2, true); // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest. 6271 } 6272 z_x(crc, Address(table, tmp, 0)); 6273 } 6274 6275 /** 6276 * uint32_t crc; 6277 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 6278 */ 6279 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 6280 fold_byte_crc32(crc, crc, table, tmp); 6281 } 6282 6283 /** 6284 * Emits code to update CRC-32 with a byte value according to constants in table. 6285 * 6286 * @param [in,out]crc Register containing the crc. 6287 * @param [in]val Register containing the byte to fold into the CRC. 6288 * @param [in]table Register containing the table of crc constants. 6289 * 6290 * uint32_t crc; 6291 * val = crc_table[(val ^ crc) & 0xFF]; 6292 * crc = val ^ (crc >> 8); 6293 */ 6294 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 6295 z_xr(val, crc); 6296 fold_byte_crc32(crc, val, table, val); 6297 } 6298 6299 6300 /** 6301 * @param crc register containing existing CRC (32-bit) 6302 * @param buf register pointing to input byte buffer (byte*) 6303 * @param len register containing number of bytes 6304 * @param table register pointing to CRC table 6305 */ 6306 void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, Register data) { 6307 assert_different_registers(crc, buf, len, table, data); 6308 6309 Label L_mainLoop, L_done; 6310 const int mainLoop_stepping = 1; 6311 6312 // Process all bytes in a single-byte loop. 6313 z_ltr(len, len); 6314 z_brnh(L_done); 6315 6316 bind(L_mainLoop); 6317 z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register. 6318 add2reg(buf, mainLoop_stepping); // Advance buffer position. 6319 update_byte_crc32(crc, data, table); 6320 z_brct(len, L_mainLoop); // Iterate. 6321 6322 bind(L_done); 6323 } 6324 6325 /** 6326 * Emits code to update CRC-32 with a 4-byte value according to constants in table. 6327 * Implementation according to jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.c. 6328 * 6329 */ 6330 void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc, 6331 Register t0, Register t1, Register t2, Register t3) { 6332 // This is what we implement (the DOBIG4 part): 6333 // 6334 // #define DOBIG4 c ^= *++buf4; \ 6335 // c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \ 6336 // crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24] 6337 // #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4 6338 // Pre-calculate (constant) column offsets, use columns 4..7 for big-endian. 6339 const int ix0 = 4*(4*CRC32_COLUMN_SIZE); 6340 const int ix1 = 5*(4*CRC32_COLUMN_SIZE); 6341 const int ix2 = 6*(4*CRC32_COLUMN_SIZE); 6342 const int ix3 = 7*(4*CRC32_COLUMN_SIZE); 6343 6344 // XOR crc with next four bytes of buffer. 6345 lgr_if_needed(t0, crc); 6346 z_x(t0, Address(buf, bufDisp)); 6347 if (bufInc != 0) { 6348 add2reg(buf, bufInc); 6349 } 6350 6351 // Chop crc into 4 single-byte pieces, shifted left 2 bits, to form the table indices. 6352 rotate_then_insert(t3, t0, 56-2, 63-2, 2, true); // ((c >> 0) & 0xff) << 2 6353 rotate_then_insert(t2, t0, 56-2, 63-2, 2-8, true); // ((c >> 8) & 0xff) << 2 6354 rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true); // ((c >> 16) & 0xff) << 2 6355 rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true); // ((c >> 24) & 0xff) << 2 6356 6357 // XOR indexed table values to calculate updated crc. 6358 z_ly(t2, Address(table, t2, (intptr_t)ix1)); 6359 z_ly(t0, Address(table, t0, (intptr_t)ix3)); 6360 z_xy(t2, Address(table, t3, (intptr_t)ix0)); 6361 z_xy(t0, Address(table, t1, (intptr_t)ix2)); 6362 z_xr(t0, t2); // Now t0 contains the updated CRC value. 6363 lgr_if_needed(crc, t0); 6364 } 6365 6366 /** 6367 * @param crc register containing existing CRC (32-bit) 6368 * @param buf register pointing to input byte buffer (byte*) 6369 * @param len register containing number of bytes 6370 * @param table register pointing to CRC table 6371 * 6372 * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller! 6373 */ 6374 void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table, 6375 Register t0, Register t1, Register t2, Register t3, 6376 bool invertCRC) { 6377 assert_different_registers(crc, buf, len, table); 6378 6379 Label L_mainLoop, L_tail; 6380 Register data = t0; 6381 Register ctr = Z_R0; 6382 const int mainLoop_stepping = 8; 6383 const int tailLoop_stepping = 1; 6384 const int log_stepping = exact_log2(mainLoop_stepping); 6385 6386 // Don't test for len <= 0 here. This pathological case should not occur anyway. 6387 // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles. 6388 // The situation itself is detected and handled correctly by the conditional branches 6389 // following aghi(len, -stepping) and aghi(len, +stepping). 6390 6391 if (invertCRC) { 6392 not_(crc, noreg, false); // 1s complement of crc 6393 } 6394 6395 #if 0 6396 { 6397 // Pre-mainLoop alignment did not show any positive effect on performance. 6398 // We leave the code in for reference. Maybe the vector instructions in z13 depend on alignment. 6399 6400 z_cghi(len, mainLoop_stepping); // Alignment is useless for short data streams. 6401 z_brnh(L_tail); 6402 6403 // Align buf to word (4-byte) boundary. 6404 z_lcr(ctr, buf); 6405 rotate_then_insert(ctr, ctr, 62, 63, 0, true); // TODO: should set cc 6406 z_sgfr(len, ctr); // Remaining len after alignment. 6407 6408 update_byteLoop_crc32(crc, buf, ctr, table, data); 6409 } 6410 #endif 6411 6412 // Check for short (<mainLoop_stepping bytes) buffer. 6413 z_srag(ctr, len, log_stepping); 6414 z_brnh(L_tail); 6415 6416 z_lrvr(crc, crc); // Revert byte order because we are dealing with big-endian data. 6417 rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop 6418 6419 BIND(L_mainLoop); 6420 update_1word_crc32(crc, buf, table, 0, 0, crc, t1, t2, t3); 6421 update_1word_crc32(crc, buf, table, 4, mainLoop_stepping, crc, t1, t2, t3); 6422 z_brct(ctr, L_mainLoop); // Iterate. 6423 6424 z_lrvr(crc, crc); // Revert byte order back to original. 6425 6426 // Process last few (<8) bytes of buffer. 6427 BIND(L_tail); 6428 update_byteLoop_crc32(crc, buf, len, table, data); 6429 6430 if (invertCRC) { 6431 not_(crc, noreg, false); // 1s complement of crc 6432 } 6433 } 6434 6435 /** 6436 * @param crc register containing existing CRC (32-bit) 6437 * @param buf register pointing to input byte buffer (byte*) 6438 * @param len register containing number of bytes 6439 * @param table register pointing to CRC table 6440 * 6441 * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller! 6442 */ 6443 void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table, 6444 Register t0, Register t1, Register t2, Register t3, 6445 bool invertCRC) { 6446 assert_different_registers(crc, buf, len, table); 6447 6448 Label L_mainLoop, L_tail; 6449 Register data = t0; 6450 Register ctr = Z_R0; 6451 const int mainLoop_stepping = 4; 6452 const int log_stepping = exact_log2(mainLoop_stepping); 6453 6454 // Don't test for len <= 0 here. This pathological case should not occur anyway. 6455 // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles. 6456 // The situation itself is detected and handled correctly by the conditional branches 6457 // following aghi(len, -stepping) and aghi(len, +stepping). 6458 6459 if (invertCRC) { 6460 not_(crc, noreg, false); // 1s complement of crc 6461 } 6462 6463 // Check for short (<4 bytes) buffer. 6464 z_srag(ctr, len, log_stepping); 6465 z_brnh(L_tail); 6466 6467 z_lrvr(crc, crc); // Revert byte order because we are dealing with big-endian data. 6468 rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop 6469 6470 BIND(L_mainLoop); 6471 update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3); 6472 z_brct(ctr, L_mainLoop); // Iterate. 6473 6474 z_lrvr(crc, crc); // Revert byte order back to original. 6475 6476 // Process last few (<8) bytes of buffer. 6477 BIND(L_tail); 6478 update_byteLoop_crc32(crc, buf, len, table, data); 6479 6480 if (invertCRC) { 6481 not_(crc, noreg, false); // 1s complement of crc 6482 } 6483 } 6484 6485 /** 6486 * @param crc register containing existing CRC (32-bit) 6487 * @param buf register pointing to input byte buffer (byte*) 6488 * @param len register containing number of bytes 6489 * @param table register pointing to CRC table 6490 */ 6491 void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table, 6492 Register t0, Register t1, Register t2, Register t3, 6493 bool invertCRC) { 6494 assert_different_registers(crc, buf, len, table); 6495 Register data = t0; 6496 6497 if (invertCRC) { 6498 not_(crc, noreg, false); // 1s complement of crc 6499 } 6500 6501 update_byteLoop_crc32(crc, buf, len, table, data); 6502 6503 if (invertCRC) { 6504 not_(crc, noreg, false); // 1s complement of crc 6505 } 6506 } 6507 6508 void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp, 6509 bool invertCRC) { 6510 assert_different_registers(crc, buf, len, table, tmp); 6511 6512 if (invertCRC) { 6513 not_(crc, noreg, false); // 1s complement of crc 6514 } 6515 6516 z_llgc(tmp, Address(buf, (intptr_t)0)); // Current byte of input buffer (zero extended). Avoids garbage in upper half of register. 6517 update_byte_crc32(crc, tmp, table); 6518 6519 if (invertCRC) { 6520 not_(crc, noreg, false); // 1s complement of crc 6521 } 6522 } 6523 6524 void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table, 6525 bool invertCRC) { 6526 assert_different_registers(crc, val, table); 6527 6528 if (invertCRC) { 6529 not_(crc, noreg, false); // 1s complement of crc 6530 } 6531 6532 update_byte_crc32(crc, val, table); 6533 6534 if (invertCRC) { 6535 not_(crc, noreg, false); // 1s complement of crc 6536 } 6537 } 6538 6539 // 6540 // Code for BigInteger::multiplyToLen() intrinsic. 6541 // 6542 6543 // dest_lo += src1 + src2 6544 // dest_hi += carry1 + carry2 6545 // Z_R7 is destroyed ! 6546 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, 6547 Register src1, Register src2) { 6548 clear_reg(Z_R7); 6549 z_algr(dest_lo, src1); 6550 z_alcgr(dest_hi, Z_R7); 6551 z_algr(dest_lo, src2); 6552 z_alcgr(dest_hi, Z_R7); 6553 } 6554 6555 // Multiply 64 bit by 64 bit first loop. 6556 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, 6557 Register x_xstart, 6558 Register y, Register y_idx, 6559 Register z, 6560 Register carry, 6561 Register product, 6562 Register idx, Register kdx) { 6563 // jlong carry, x[], y[], z[]; 6564 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) { 6565 // huge_128 product = y[idx] * x[xstart] + carry; 6566 // z[kdx] = (jlong)product; 6567 // carry = (jlong)(product >>> 64); 6568 // } 6569 // z[xstart] = carry; 6570 6571 Label L_first_loop, L_first_loop_exit; 6572 Label L_one_x, L_one_y, L_multiply; 6573 6574 z_aghi(xstart, -1); 6575 z_brl(L_one_x); // Special case: length of x is 1. 6576 6577 // Load next two integers of x. 6578 z_sllg(Z_R1_scratch, xstart, LogBytesPerInt); 6579 mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0)); 6580 6581 6582 bind(L_first_loop); 6583 6584 z_aghi(idx, -1); 6585 z_brl(L_first_loop_exit); 6586 z_aghi(idx, -1); 6587 z_brl(L_one_y); 6588 6589 // Load next two integers of y. 6590 z_sllg(Z_R1_scratch, idx, LogBytesPerInt); 6591 mem2reg_opt(y_idx, Address(y, Z_R1_scratch, 0)); 6592 6593 6594 bind(L_multiply); 6595 6596 Register multiplicand = product->successor(); 6597 Register product_low = multiplicand; 6598 6599 lgr_if_needed(multiplicand, x_xstart); 6600 z_mlgr(product, y_idx); // multiplicand * y_idx -> product::multiplicand 6601 clear_reg(Z_R7); 6602 z_algr(product_low, carry); // Add carry to result. 6603 z_alcgr(product, Z_R7); // Add carry of the last addition. 6604 add2reg(kdx, -2); 6605 6606 // Store result. 6607 z_sllg(Z_R7, kdx, LogBytesPerInt); 6608 reg2mem_opt(product_low, Address(z, Z_R7, 0)); 6609 lgr_if_needed(carry, product); 6610 z_bru(L_first_loop); 6611 6612 6613 bind(L_one_y); // Load one 32 bit portion of y as (0,value). 6614 6615 clear_reg(y_idx); 6616 mem2reg_opt(y_idx, Address(y, (intptr_t) 0), false); 6617 z_bru(L_multiply); 6618 6619 6620 bind(L_one_x); // Load one 32 bit portion of x as (0,value). 6621 6622 clear_reg(x_xstart); 6623 mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false); 6624 z_bru(L_first_loop); 6625 6626 bind(L_first_loop_exit); 6627 } 6628 6629 // Multiply 64 bit by 64 bit and add 128 bit. 6630 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, 6631 Register z, 6632 Register yz_idx, Register idx, 6633 Register carry, Register product, 6634 int offset) { 6635 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; 6636 // z[kdx] = (jlong)product; 6637 6638 Register multiplicand = product->successor(); 6639 Register product_low = multiplicand; 6640 6641 z_sllg(Z_R7, idx, LogBytesPerInt); 6642 mem2reg_opt(yz_idx, Address(y, Z_R7, offset)); 6643 6644 lgr_if_needed(multiplicand, x_xstart); 6645 z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand 6646 mem2reg_opt(yz_idx, Address(z, Z_R7, offset)); 6647 6648 add2_with_carry(product, product_low, carry, yz_idx); 6649 6650 z_sllg(Z_R7, idx, LogBytesPerInt); 6651 reg2mem_opt(product_low, Address(z, Z_R7, offset)); 6652 6653 } 6654 6655 // Multiply 128 bit by 128 bit. Unrolled inner loop. 6656 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, 6657 Register y, Register z, 6658 Register yz_idx, Register idx, 6659 Register jdx, 6660 Register carry, Register product, 6661 Register carry2) { 6662 // jlong carry, x[], y[], z[]; 6663 // int kdx = ystart+1; 6664 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 6665 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; 6666 // z[kdx+idx+1] = (jlong)product; 6667 // jlong carry2 = (jlong)(product >>> 64); 6668 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; 6669 // z[kdx+idx] = (jlong)product; 6670 // carry = (jlong)(product >>> 64); 6671 // } 6672 // idx += 2; 6673 // if (idx > 0) { 6674 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry; 6675 // z[kdx+idx] = (jlong)product; 6676 // carry = (jlong)(product >>> 64); 6677 // } 6678 6679 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 6680 6681 // scale the index 6682 lgr_if_needed(jdx, idx); 6683 and_imm(jdx, 0xfffffffffffffffcL); 6684 rshift(jdx, 2); 6685 6686 6687 bind(L_third_loop); 6688 6689 z_aghi(jdx, -1); 6690 z_brl(L_third_loop_exit); 6691 add2reg(idx, -4); 6692 6693 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8); 6694 lgr_if_needed(carry2, product); 6695 6696 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0); 6697 lgr_if_needed(carry, product); 6698 z_bru(L_third_loop); 6699 6700 6701 bind(L_third_loop_exit); // Handle any left-over operand parts. 6702 6703 and_imm(idx, 0x3); 6704 z_brz(L_post_third_loop_done); 6705 6706 Label L_check_1; 6707 6708 z_aghi(idx, -2); 6709 z_brl(L_check_1); 6710 6711 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0); 6712 lgr_if_needed(carry, product); 6713 6714 6715 bind(L_check_1); 6716 6717 add2reg(idx, 0x2); 6718 and_imm(idx, 0x1); 6719 z_aghi(idx, -1); 6720 z_brl(L_post_third_loop_done); 6721 6722 Register multiplicand = product->successor(); 6723 Register product_low = multiplicand; 6724 6725 z_sllg(Z_R7, idx, LogBytesPerInt); 6726 clear_reg(yz_idx); 6727 mem2reg_opt(yz_idx, Address(y, Z_R7, 0), false); 6728 lgr_if_needed(multiplicand, x_xstart); 6729 z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand 6730 clear_reg(yz_idx); 6731 mem2reg_opt(yz_idx, Address(z, Z_R7, 0), false); 6732 6733 add2_with_carry(product, product_low, yz_idx, carry); 6734 6735 z_sllg(Z_R7, idx, LogBytesPerInt); 6736 reg2mem_opt(product_low, Address(z, Z_R7, 0), false); 6737 rshift(product_low, 32); 6738 6739 lshift(product, 32); 6740 z_ogr(product_low, product); 6741 lgr_if_needed(carry, product_low); 6742 6743 bind(L_post_third_loop_done); 6744 } 6745 6746 void MacroAssembler::multiply_to_len(Register x, Register xlen, 6747 Register y, Register ylen, 6748 Register z, 6749 Register tmp1, Register tmp2, 6750 Register tmp3, Register tmp4, 6751 Register tmp5) { 6752 ShortBranchVerifier sbv(this); 6753 6754 assert_different_registers(x, xlen, y, ylen, z, 6755 tmp1, tmp2, tmp3, tmp4, tmp5, Z_R1_scratch, Z_R7); 6756 assert_different_registers(x, xlen, y, ylen, z, 6757 tmp1, tmp2, tmp3, tmp4, tmp5, Z_R8); 6758 6759 z_stmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP); 6760 6761 // In openJdk, we store the argument as 32-bit value to slot. 6762 Address zlen(Z_SP, _z_abi(remaining_cargs)); // Int in long on big endian. 6763 6764 const Register idx = tmp1; 6765 const Register kdx = tmp2; 6766 const Register xstart = tmp3; 6767 6768 const Register y_idx = tmp4; 6769 const Register carry = tmp5; 6770 const Register product = Z_R0_scratch; 6771 const Register x_xstart = Z_R8; 6772 6773 // First Loop. 6774 // 6775 // final static long LONG_MASK = 0xffffffffL; 6776 // int xstart = xlen - 1; 6777 // int ystart = ylen - 1; 6778 // long carry = 0; 6779 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 6780 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 6781 // z[kdx] = (int)product; 6782 // carry = product >>> 32; 6783 // } 6784 // z[xstart] = (int)carry; 6785 // 6786 6787 lgr_if_needed(idx, ylen); // idx = ylen 6788 z_llgf(kdx, zlen); // C2 does not respect int to long conversion for stub calls, thus load zero-extended. 6789 clear_reg(carry); // carry = 0 6790 6791 Label L_done; 6792 6793 lgr_if_needed(xstart, xlen); 6794 z_aghi(xstart, -1); 6795 z_brl(L_done); 6796 6797 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 6798 6799 NearLabel L_second_loop; 6800 compare64_and_branch(kdx, RegisterOrConstant((intptr_t) 0), bcondEqual, L_second_loop); 6801 6802 NearLabel L_carry; 6803 z_aghi(kdx, -1); 6804 z_brz(L_carry); 6805 6806 // Store lower 32 bits of carry. 6807 z_sllg(Z_R1_scratch, kdx, LogBytesPerInt); 6808 reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); 6809 rshift(carry, 32); 6810 z_aghi(kdx, -1); 6811 6812 6813 bind(L_carry); 6814 6815 // Store upper 32 bits of carry. 6816 z_sllg(Z_R1_scratch, kdx, LogBytesPerInt); 6817 reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); 6818 6819 // Second and third (nested) loops. 6820 // 6821 // for (int i = xstart-1; i >= 0; i--) { // Second loop 6822 // carry = 0; 6823 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 6824 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 6825 // (z[k] & LONG_MASK) + carry; 6826 // z[k] = (int)product; 6827 // carry = product >>> 32; 6828 // } 6829 // z[i] = (int)carry; 6830 // } 6831 // 6832 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx 6833 6834 const Register jdx = tmp1; 6835 6836 bind(L_second_loop); 6837 6838 clear_reg(carry); // carry = 0; 6839 lgr_if_needed(jdx, ylen); // j = ystart+1 6840 6841 z_aghi(xstart, -1); // i = xstart-1; 6842 z_brl(L_done); 6843 6844 // Use free slots in the current stackframe instead of push/pop. 6845 Address zsave(Z_SP, _z_abi(carg_1)); 6846 reg2mem_opt(z, zsave); 6847 6848 6849 Label L_last_x; 6850 6851 z_sllg(Z_R1_scratch, xstart, LogBytesPerInt); 6852 load_address(z, Address(z, Z_R1_scratch, 4)); // z = z + k - j 6853 z_aghi(xstart, -1); // i = xstart-1; 6854 z_brl(L_last_x); 6855 6856 z_sllg(Z_R1_scratch, xstart, LogBytesPerInt); 6857 mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0)); 6858 6859 6860 Label L_third_loop_prologue; 6861 6862 bind(L_third_loop_prologue); 6863 6864 Address xsave(Z_SP, _z_abi(carg_2)); 6865 Address xlensave(Z_SP, _z_abi(carg_3)); 6866 Address ylensave(Z_SP, _z_abi(carg_4)); 6867 6868 reg2mem_opt(x, xsave); 6869 reg2mem_opt(xstart, xlensave); 6870 reg2mem_opt(ylen, ylensave); 6871 6872 6873 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x); 6874 6875 mem2reg_opt(z, zsave); 6876 mem2reg_opt(x, xsave); 6877 mem2reg_opt(xlen, xlensave); // This is the decrement of the loop counter! 6878 mem2reg_opt(ylen, ylensave); 6879 6880 add2reg(tmp3, 1, xlen); 6881 z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt); 6882 reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); 6883 z_aghi(tmp3, -1); 6884 z_brl(L_done); 6885 6886 rshift(carry, 32); 6887 z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt); 6888 reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); 6889 z_bru(L_second_loop); 6890 6891 // Next infrequent code is moved outside loops. 6892 bind(L_last_x); 6893 6894 clear_reg(x_xstart); 6895 mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false); 6896 z_bru(L_third_loop_prologue); 6897 6898 bind(L_done); 6899 6900 z_lmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP); 6901 } 6902 6903 #ifndef PRODUCT 6904 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false). 6905 void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) { 6906 Label ok; 6907 if (check_equal) { 6908 z_bre(ok); 6909 } else { 6910 z_brne(ok); 6911 } 6912 stop(msg, id); 6913 bind(ok); 6914 } 6915 6916 // Assert if CC indicates "low". 6917 void MacroAssembler::asm_assert_low(const char *msg, int id) { 6918 Label ok; 6919 z_brnl(ok); 6920 stop(msg, id); 6921 bind(ok); 6922 } 6923 6924 // Assert if CC indicates "high". 6925 void MacroAssembler::asm_assert_high(const char *msg, int id) { 6926 Label ok; 6927 z_brnh(ok); 6928 stop(msg, id); 6929 bind(ok); 6930 } 6931 6932 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false) 6933 // generate non-relocatable code. 6934 void MacroAssembler::asm_assert_static(bool check_equal, const char *msg, int id) { 6935 Label ok; 6936 if (check_equal) { z_bre(ok); } 6937 else { z_brne(ok); } 6938 stop_static(msg, id); 6939 bind(ok); 6940 } 6941 6942 void MacroAssembler::asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset, 6943 Register mem_base, const char* msg, int id) { 6944 switch (size) { 6945 case 4: 6946 load_and_test_int(Z_R0, Address(mem_base, mem_offset)); 6947 break; 6948 case 8: 6949 load_and_test_long(Z_R0, Address(mem_base, mem_offset)); 6950 break; 6951 default: 6952 ShouldNotReachHere(); 6953 } 6954 if (allow_relocation) { asm_assert(check_equal, msg, id); } 6955 else { asm_assert_static(check_equal, msg, id); } 6956 } 6957 6958 // Check the condition 6959 // expected_size == FP - SP 6960 // after transformation: 6961 // expected_size - FP + SP == 0 6962 // Destroys Register expected_size if no tmp register is passed. 6963 void MacroAssembler::asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) { 6964 if (tmp == noreg) { 6965 tmp = expected_size; 6966 } else { 6967 if (tmp != expected_size) { 6968 z_lgr(tmp, expected_size); 6969 } 6970 z_algr(tmp, Z_SP); 6971 z_slg(tmp, 0, Z_R0, Z_SP); 6972 asm_assert_eq(msg, id); 6973 } 6974 } 6975 #endif // !PRODUCT 6976 6977 void MacroAssembler::verify_thread() { 6978 if (VerifyThread) { 6979 unimplemented("", 117); 6980 } 6981 } 6982 6983 // Plausibility check for oops. 6984 void MacroAssembler::verify_oop(Register oop, const char* msg) { 6985 if (!VerifyOops) return; 6986 6987 BLOCK_COMMENT("verify_oop {"); 6988 Register tmp = Z_R0; 6989 unsigned int nbytes_save = 5*BytesPerWord; 6990 address entry = StubRoutines::verify_oop_subroutine_entry_address(); 6991 6992 save_return_pc(); 6993 push_frame_abi160(nbytes_save); 6994 z_stmg(Z_R1, Z_R5, frame::z_abi_160_size, Z_SP); 6995 6996 z_lgr(Z_ARG2, oop); 6997 load_const(Z_ARG1, (address) msg); 6998 load_const(Z_R1, entry); 6999 z_lg(Z_R1, 0, Z_R1); 7000 call_c(Z_R1); 7001 7002 z_lmg(Z_R1, Z_R5, frame::z_abi_160_size, Z_SP); 7003 pop_frame(); 7004 restore_return_pc(); 7005 7006 BLOCK_COMMENT("} verify_oop "); 7007 } 7008 7009 const char* MacroAssembler::stop_types[] = { 7010 "stop", 7011 "untested", 7012 "unimplemented", 7013 "shouldnotreachhere" 7014 }; 7015 7016 static void stop_on_request(const char* tp, const char* msg) { 7017 tty->print("Z assembly code requires stop: (%s) %s\n", tp, msg); 7018 guarantee(false, "Z assembly code requires stop: %s", msg); 7019 } 7020 7021 void MacroAssembler::stop(int type, const char* msg, int id) { 7022 BLOCK_COMMENT(err_msg("stop: %s {", msg)); 7023 7024 // Setup arguments. 7025 load_const(Z_ARG1, (void*) stop_types[type%stop_end]); 7026 load_const(Z_ARG2, (void*) msg); 7027 get_PC(Z_R14); // Following code pushes a frame without entering a new function. Use current pc as return address. 7028 save_return_pc(); // Saves return pc Z_R14. 7029 push_frame_abi160(0); 7030 call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2); 7031 // The plain disassembler does not recognize illtrap. It instead displays 7032 // a 32-bit value. Issueing two illtraps assures the disassembler finds 7033 // the proper beginning of the next instruction. 7034 z_illtrap(); // Illegal instruction. 7035 z_illtrap(); // Illegal instruction. 7036 7037 BLOCK_COMMENT(" } stop"); 7038 } 7039 7040 // Special version of stop() for code size reduction. 7041 // Reuses the previously generated call sequence, if any. 7042 // Generates the call sequence on its own, if necessary. 7043 // Note: This code will work only in non-relocatable code! 7044 // The relative address of the data elements (arg1, arg2) must not change. 7045 // The reentry point must not move relative to it's users. This prerequisite 7046 // should be given for "hand-written" code, if all chain calls are in the same code blob. 7047 // Generated code must not undergo any transformation, e.g. ShortenBranches, to be safe. 7048 address MacroAssembler::stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation) { 7049 BLOCK_COMMENT(err_msg("stop_chain(%s,%s): %s {", reentry==NULL?"init":"cont", allow_relocation?"reloc ":"static", msg)); 7050 7051 // Setup arguments. 7052 if (allow_relocation) { 7053 // Relocatable version (for comparison purposes). Remove after some time. 7054 load_const(Z_ARG1, (void*) stop_types[type%stop_end]); 7055 load_const(Z_ARG2, (void*) msg); 7056 } else { 7057 load_absolute_address(Z_ARG1, (address)stop_types[type%stop_end]); 7058 load_absolute_address(Z_ARG2, (address)msg); 7059 } 7060 if ((reentry != NULL) && RelAddr::is_in_range_of_RelAddr16(reentry, pc())) { 7061 BLOCK_COMMENT("branch to reentry point:"); 7062 z_brc(bcondAlways, reentry); 7063 } else { 7064 BLOCK_COMMENT("reentry point:"); 7065 reentry = pc(); // Re-entry point for subsequent stop calls. 7066 save_return_pc(); // Saves return pc Z_R14. 7067 push_frame_abi160(0); 7068 if (allow_relocation) { 7069 reentry = NULL; // Prevent reentry if code relocation is allowed. 7070 call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2); 7071 } else { 7072 call_VM_leaf_static(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2); 7073 } 7074 z_illtrap(); // Illegal instruction as emergency stop, should the above call return. 7075 } 7076 BLOCK_COMMENT(" } stop_chain"); 7077 7078 return reentry; 7079 } 7080 7081 // Special version of stop() for code size reduction. 7082 // Assumes constant relative addresses for data and runtime call. 7083 void MacroAssembler::stop_static(int type, const char* msg, int id) { 7084 stop_chain(NULL, type, msg, id, false); 7085 } 7086 7087 void MacroAssembler::stop_subroutine() { 7088 unimplemented("stop_subroutine", 710); 7089 } 7090 7091 // Prints msg to stdout from within generated code.. 7092 void MacroAssembler::warn(const char* msg) { 7093 RegisterSaver::save_live_registers(this, RegisterSaver::all_registers, Z_R14); 7094 load_absolute_address(Z_R1, (address) warning); 7095 load_absolute_address(Z_ARG1, (address) msg); 7096 (void) call(Z_R1); 7097 RegisterSaver::restore_live_registers(this, RegisterSaver::all_registers); 7098 } 7099 7100 #ifndef PRODUCT 7101 7102 // Write pattern 0x0101010101010101 in region [low-before, high+after]. 7103 void MacroAssembler::zap_from_to(Register low, Register high, Register val, Register addr, int before, int after) { 7104 if (!ZapEmptyStackFields) return; 7105 BLOCK_COMMENT("zap memory region {"); 7106 load_const_optimized(val, 0x0101010101010101); 7107 int size = before + after; 7108 if (low == high && size < 5 && size > 0) { 7109 int offset = -before*BytesPerWord; 7110 for (int i = 0; i < size; ++i) { 7111 z_stg(val, Address(low, offset)); 7112 offset +=(1*BytesPerWord); 7113 } 7114 } else { 7115 add2reg(addr, -before*BytesPerWord, low); 7116 if (after) { 7117 #ifdef ASSERT 7118 jlong check = after * BytesPerWord; 7119 assert(Immediate::is_simm32(check) && Immediate::is_simm32(-check), "value not encodable !"); 7120 #endif 7121 add2reg(high, after * BytesPerWord); 7122 } 7123 NearLabel loop; 7124 bind(loop); 7125 z_stg(val, Address(addr)); 7126 add2reg(addr, 8); 7127 compare64_and_branch(addr, high, bcondNotHigh, loop); 7128 if (after) { 7129 add2reg(high, -after * BytesPerWord); 7130 } 7131 } 7132 BLOCK_COMMENT("} zap memory region"); 7133 } 7134 #endif // !PRODUCT 7135 7136 SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value, Register _rscratch) { 7137 _masm = masm; 7138 _masm->load_absolute_address(_rscratch, (address)flag_addr); 7139 _masm->load_and_test_int(_rscratch, Address(_rscratch)); 7140 if (value) { 7141 _masm->z_brne(_label); // Skip if true, i.e. != 0. 7142 } else { 7143 _masm->z_bre(_label); // Skip if false, i.e. == 0. 7144 } 7145 } 7146 7147 SkipIfEqual::~SkipIfEqual() { 7148 _masm->bind(_label); 7149 }