1 /* 2 * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/codeBuffer.hpp" 28 #include "asm/macroAssembler.inline.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "gc/shared/collectedHeap.inline.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "gc/shared/cardTableModRefBS.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/klass.inline.hpp" 36 #include "opto/compile.hpp" 37 #include "opto/intrinsicnode.hpp" 38 #include "opto/matcher.hpp" 39 #include "prims/methodHandles.hpp" 40 #include "registerSaver_s390.hpp" 41 #include "runtime/biasedLocking.hpp" 42 #include "runtime/icache.hpp" 43 #include "runtime/interfaceSupport.hpp" 44 #include "runtime/objectMonitor.hpp" 45 #include "runtime/os.hpp" 46 #include "runtime/sharedRuntime.hpp" 47 #include "runtime/stubRoutines.hpp" 48 #include "utilities/events.hpp" 49 #include "utilities/macros.hpp" 50 #if INCLUDE_ALL_GCS 51 #include "gc/g1/g1CollectedHeap.inline.hpp" 52 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 53 #include "gc/g1/heapRegion.hpp" 54 #endif 55 56 #include <ucontext.h> 57 58 #define BLOCK_COMMENT(str) block_comment(str) 59 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 60 61 // Move 32-bit register if destination and source are different. 62 void MacroAssembler::lr_if_needed(Register rd, Register rs) { 63 if (rs != rd) { z_lr(rd, rs); } 64 } 65 66 // Move register if destination and source are different. 67 void MacroAssembler::lgr_if_needed(Register rd, Register rs) { 68 if (rs != rd) { z_lgr(rd, rs); } 69 } 70 71 // Zero-extend 32-bit register into 64-bit register if destination and source are different. 72 void MacroAssembler::llgfr_if_needed(Register rd, Register rs) { 73 if (rs != rd) { z_llgfr(rd, rs); } 74 } 75 76 // Move float register if destination and source are different. 77 void MacroAssembler::ldr_if_needed(FloatRegister rd, FloatRegister rs) { 78 if (rs != rd) { z_ldr(rd, rs); } 79 } 80 81 // Move integer register if destination and source are different. 82 // It is assumed that shorter-than-int types are already 83 // appropriately sign-extended. 84 void MacroAssembler::move_reg_if_needed(Register dst, BasicType dst_type, Register src, 85 BasicType src_type) { 86 assert((dst_type != T_FLOAT) && (dst_type != T_DOUBLE), "use move_freg for float types"); 87 assert((src_type != T_FLOAT) && (src_type != T_DOUBLE), "use move_freg for float types"); 88 89 if (dst_type == src_type) { 90 lgr_if_needed(dst, src); // Just move all 64 bits. 91 return; 92 } 93 94 switch (dst_type) { 95 // Do not support these types for now. 96 // case T_BOOLEAN: 97 case T_BYTE: // signed byte 98 switch (src_type) { 99 case T_INT: 100 z_lgbr(dst, src); 101 break; 102 default: 103 ShouldNotReachHere(); 104 } 105 return; 106 107 case T_CHAR: 108 case T_SHORT: 109 switch (src_type) { 110 case T_INT: 111 if (dst_type == T_CHAR) { 112 z_llghr(dst, src); 113 } else { 114 z_lghr(dst, src); 115 } 116 break; 117 default: 118 ShouldNotReachHere(); 119 } 120 return; 121 122 case T_INT: 123 switch (src_type) { 124 case T_BOOLEAN: 125 case T_BYTE: 126 case T_CHAR: 127 case T_SHORT: 128 case T_INT: 129 case T_LONG: 130 case T_OBJECT: 131 case T_ARRAY: 132 case T_VOID: 133 case T_ADDRESS: 134 lr_if_needed(dst, src); 135 // llgfr_if_needed(dst, src); // zero-extend (in case we need to find a bug). 136 return; 137 138 default: 139 assert(false, "non-integer src type"); 140 return; 141 } 142 case T_LONG: 143 switch (src_type) { 144 case T_BOOLEAN: 145 case T_BYTE: 146 case T_CHAR: 147 case T_SHORT: 148 case T_INT: 149 z_lgfr(dst, src); // sign extension 150 return; 151 152 case T_LONG: 153 case T_OBJECT: 154 case T_ARRAY: 155 case T_VOID: 156 case T_ADDRESS: 157 lgr_if_needed(dst, src); 158 return; 159 160 default: 161 assert(false, "non-integer src type"); 162 return; 163 } 164 return; 165 case T_OBJECT: 166 case T_ARRAY: 167 case T_VOID: 168 case T_ADDRESS: 169 switch (src_type) { 170 // These types don't make sense to be converted to pointers: 171 // case T_BOOLEAN: 172 // case T_BYTE: 173 // case T_CHAR: 174 // case T_SHORT: 175 176 case T_INT: 177 z_llgfr(dst, src); // zero extension 178 return; 179 180 case T_LONG: 181 case T_OBJECT: 182 case T_ARRAY: 183 case T_VOID: 184 case T_ADDRESS: 185 lgr_if_needed(dst, src); 186 return; 187 188 default: 189 assert(false, "non-integer src type"); 190 return; 191 } 192 return; 193 default: 194 assert(false, "non-integer dst type"); 195 return; 196 } 197 } 198 199 // Move float register if destination and source are different. 200 void MacroAssembler::move_freg_if_needed(FloatRegister dst, BasicType dst_type, 201 FloatRegister src, BasicType src_type) { 202 assert((dst_type == T_FLOAT) || (dst_type == T_DOUBLE), "use move_reg for int types"); 203 assert((src_type == T_FLOAT) || (src_type == T_DOUBLE), "use move_reg for int types"); 204 if (dst_type == src_type) { 205 ldr_if_needed(dst, src); // Just move all 64 bits. 206 } else { 207 switch (dst_type) { 208 case T_FLOAT: 209 assert(src_type == T_DOUBLE, "invalid float type combination"); 210 z_ledbr(dst, src); 211 return; 212 case T_DOUBLE: 213 assert(src_type == T_FLOAT, "invalid float type combination"); 214 z_ldebr(dst, src); 215 return; 216 default: 217 assert(false, "non-float dst type"); 218 return; 219 } 220 } 221 } 222 223 // Optimized emitter for reg to mem operations. 224 // Uses modern instructions if running on modern hardware, classic instructions 225 // otherwise. Prefers (usually shorter) classic instructions if applicable. 226 // Data register (reg) cannot be used as work register. 227 // 228 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default). 229 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs! 230 void MacroAssembler::freg2mem_opt(FloatRegister reg, 231 int64_t disp, 232 Register index, 233 Register base, 234 void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register), 235 void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register), 236 Register scratch) { 237 index = (index == noreg) ? Z_R0 : index; 238 if (Displacement::is_shortDisp(disp)) { 239 (this->*classic)(reg, disp, index, base); 240 } else { 241 if (Displacement::is_validDisp(disp)) { 242 (this->*modern)(reg, disp, index, base); 243 } else { 244 if (scratch != Z_R0 && scratch != Z_R1) { 245 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 246 } else { 247 if (scratch != Z_R0) { // scratch == Z_R1 248 if ((scratch == index) || (index == base)) { 249 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 250 } else { 251 add2reg(scratch, disp, base); 252 (this->*classic)(reg, 0, index, scratch); 253 if (base == scratch) { 254 add2reg(base, -disp); // Restore base. 255 } 256 } 257 } else { // scratch == Z_R0 258 z_lgr(scratch, base); 259 add2reg(base, disp); 260 (this->*classic)(reg, 0, index, base); 261 z_lgr(base, scratch); // Restore base. 262 } 263 } 264 } 265 } 266 } 267 268 void MacroAssembler::freg2mem_opt(FloatRegister reg, const Address &a, bool is_double) { 269 if (is_double) { 270 freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stdy), CLASSIC_FFUN(z_std)); 271 } else { 272 freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stey), CLASSIC_FFUN(z_ste)); 273 } 274 } 275 276 // Optimized emitter for mem to reg operations. 277 // Uses modern instructions if running on modern hardware, classic instructions 278 // otherwise. Prefers (usually shorter) classic instructions if applicable. 279 // data register (reg) cannot be used as work register. 280 // 281 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default). 282 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs! 283 void MacroAssembler::mem2freg_opt(FloatRegister reg, 284 int64_t disp, 285 Register index, 286 Register base, 287 void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register), 288 void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register), 289 Register scratch) { 290 index = (index == noreg) ? Z_R0 : index; 291 if (Displacement::is_shortDisp(disp)) { 292 (this->*classic)(reg, disp, index, base); 293 } else { 294 if (Displacement::is_validDisp(disp)) { 295 (this->*modern)(reg, disp, index, base); 296 } else { 297 if (scratch != Z_R0 && scratch != Z_R1) { 298 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 299 } else { 300 if (scratch != Z_R0) { // scratch == Z_R1 301 if ((scratch == index) || (index == base)) { 302 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 303 } else { 304 add2reg(scratch, disp, base); 305 (this->*classic)(reg, 0, index, scratch); 306 if (base == scratch) { 307 add2reg(base, -disp); // Restore base. 308 } 309 } 310 } else { // scratch == Z_R0 311 z_lgr(scratch, base); 312 add2reg(base, disp); 313 (this->*classic)(reg, 0, index, base); 314 z_lgr(base, scratch); // Restore base. 315 } 316 } 317 } 318 } 319 } 320 321 void MacroAssembler::mem2freg_opt(FloatRegister reg, const Address &a, bool is_double) { 322 if (is_double) { 323 mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ldy), CLASSIC_FFUN(z_ld)); 324 } else { 325 mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ley), CLASSIC_FFUN(z_le)); 326 } 327 } 328 329 // Optimized emitter for reg to mem operations. 330 // Uses modern instructions if running on modern hardware, classic instructions 331 // otherwise. Prefers (usually shorter) classic instructions if applicable. 332 // Data register (reg) cannot be used as work register. 333 // 334 // Don't rely on register locking, instead pass a scratch register 335 // (Z_R0 by default) 336 // CAUTION! passing registers >= Z_R2 may produce bad results on old CPUs! 337 void MacroAssembler::reg2mem_opt(Register reg, 338 int64_t disp, 339 Register index, 340 Register base, 341 void (MacroAssembler::*modern) (Register, int64_t, Register, Register), 342 void (MacroAssembler::*classic)(Register, int64_t, Register, Register), 343 Register scratch) { 344 index = (index == noreg) ? Z_R0 : index; 345 if (Displacement::is_shortDisp(disp)) { 346 (this->*classic)(reg, disp, index, base); 347 } else { 348 if (Displacement::is_validDisp(disp)) { 349 (this->*modern)(reg, disp, index, base); 350 } else { 351 if (scratch != Z_R0 && scratch != Z_R1) { 352 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 353 } else { 354 if (scratch != Z_R0) { // scratch == Z_R1 355 if ((scratch == index) || (index == base)) { 356 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 357 } else { 358 add2reg(scratch, disp, base); 359 (this->*classic)(reg, 0, index, scratch); 360 if (base == scratch) { 361 add2reg(base, -disp); // Restore base. 362 } 363 } 364 } else { // scratch == Z_R0 365 if ((scratch == reg) || (scratch == base) || (reg == base)) { 366 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 367 } else { 368 z_lgr(scratch, base); 369 add2reg(base, disp); 370 (this->*classic)(reg, 0, index, base); 371 z_lgr(base, scratch); // Restore base. 372 } 373 } 374 } 375 } 376 } 377 } 378 379 int MacroAssembler::reg2mem_opt(Register reg, const Address &a, bool is_double) { 380 int store_offset = offset(); 381 if (is_double) { 382 reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_stg), CLASSIC_IFUN(z_stg)); 383 } else { 384 reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_sty), CLASSIC_IFUN(z_st)); 385 } 386 return store_offset; 387 } 388 389 // Optimized emitter for mem to reg operations. 390 // Uses modern instructions if running on modern hardware, classic instructions 391 // otherwise. Prefers (usually shorter) classic instructions if applicable. 392 // Data register (reg) will be used as work register where possible. 393 void MacroAssembler::mem2reg_opt(Register reg, 394 int64_t disp, 395 Register index, 396 Register base, 397 void (MacroAssembler::*modern) (Register, int64_t, Register, Register), 398 void (MacroAssembler::*classic)(Register, int64_t, Register, Register)) { 399 index = (index == noreg) ? Z_R0 : index; 400 if (Displacement::is_shortDisp(disp)) { 401 (this->*classic)(reg, disp, index, base); 402 } else { 403 if (Displacement::is_validDisp(disp)) { 404 (this->*modern)(reg, disp, index, base); 405 } else { 406 if ((reg == index) && (reg == base)) { 407 z_sllg(reg, reg, 1); 408 add2reg(reg, disp); 409 (this->*classic)(reg, 0, noreg, reg); 410 } else if ((reg == index) && (reg != Z_R0)) { 411 add2reg(reg, disp); 412 (this->*classic)(reg, 0, reg, base); 413 } else if (reg == base) { 414 add2reg(reg, disp); 415 (this->*classic)(reg, 0, index, reg); 416 } else if (reg != Z_R0) { 417 add2reg(reg, disp, base); 418 (this->*classic)(reg, 0, index, reg); 419 } else { // reg == Z_R0 && reg != base here 420 add2reg(base, disp); 421 (this->*classic)(reg, 0, index, base); 422 add2reg(base, -disp); 423 } 424 } 425 } 426 } 427 428 void MacroAssembler::mem2reg_opt(Register reg, const Address &a, bool is_double) { 429 if (is_double) { 430 z_lg(reg, a); 431 } else { 432 mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_ly), CLASSIC_IFUN(z_l)); 433 } 434 } 435 436 void MacroAssembler::mem2reg_signed_opt(Register reg, const Address &a) { 437 mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_lgf), CLASSIC_IFUN(z_lgf)); 438 } 439 440 void MacroAssembler::and_imm(Register r, long mask, 441 Register tmp /* = Z_R0 */, 442 bool wide /* = false */) { 443 assert(wide || Immediate::is_simm32(mask), "mask value too large"); 444 445 if (!wide) { 446 z_nilf(r, mask); 447 return; 448 } 449 450 assert(r != tmp, " need a different temporary register !"); 451 load_const_optimized(tmp, mask); 452 z_ngr(r, tmp); 453 } 454 455 // Calculate the 1's complement. 456 // Note: The condition code is neither preserved nor correctly set by this code!!! 457 // Note: (wide == false) does not protect the high order half of the target register 458 // from alteration. It only serves as optimization hint for 32-bit results. 459 void MacroAssembler::not_(Register r1, Register r2, bool wide) { 460 461 if ((r2 == noreg) || (r2 == r1)) { // Calc 1's complement in place. 462 z_xilf(r1, -1); 463 if (wide) { 464 z_xihf(r1, -1); 465 } 466 } else { // Distinct src and dst registers. 467 if (VM_Version::has_DistinctOpnds()) { 468 load_const_optimized(r1, -1); 469 z_xgrk(r1, r2, r1); 470 } else { 471 if (wide) { 472 z_lgr(r1, r2); 473 z_xilf(r1, -1); 474 z_xihf(r1, -1); 475 } else { 476 z_lr(r1, r2); 477 z_xilf(r1, -1); 478 } 479 } 480 } 481 } 482 483 unsigned long MacroAssembler::create_mask(int lBitPos, int rBitPos) { 484 assert(lBitPos >= 0, "zero is leftmost bit position"); 485 assert(rBitPos <= 63, "63 is rightmost bit position"); 486 assert(lBitPos <= rBitPos, "inverted selection interval"); 487 return (lBitPos == 0 ? (unsigned long)(-1L) : ((1UL<<(63-lBitPos+1))-1)) & (~((1UL<<(63-rBitPos))-1)); 488 } 489 490 // Helper function for the "Rotate_then_<logicalOP>" emitters. 491 // Rotate src, then mask register contents such that only bits in range survive. 492 // For oneBits == false, all bits not in range are set to 0. Useful for deleting all bits outside range. 493 // For oneBits == true, all bits not in range are set to 1. Useful for preserving all bits outside range. 494 // The caller must ensure that the selected range only contains bits with defined value. 495 void MacroAssembler::rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos, 496 int nRotate, bool src32bit, bool dst32bit, bool oneBits) { 497 assert(!(dst32bit && lBitPos < 32), "selection interval out of range for int destination"); 498 bool sll4rll = (nRotate >= 0) && (nRotate <= (63-rBitPos)); // Substitute SLL(G) for RLL(G). 499 bool srl4rll = (nRotate < 0) && (-nRotate <= lBitPos); // Substitute SRL(G) for RLL(G). 500 // Pre-determine which parts of dst will be zero after shift/rotate. 501 bool llZero = sll4rll && (nRotate >= 16); 502 bool lhZero = (sll4rll && (nRotate >= 32)) || (srl4rll && (nRotate <= -48)); 503 bool lfZero = llZero && lhZero; 504 bool hlZero = (sll4rll && (nRotate >= 48)) || (srl4rll && (nRotate <= -32)); 505 bool hhZero = (srl4rll && (nRotate <= -16)); 506 bool hfZero = hlZero && hhZero; 507 508 // rotate then mask src operand. 509 // if oneBits == true, all bits outside selected range are 1s. 510 // if oneBits == false, all bits outside selected range are 0s. 511 if (src32bit) { // There might be garbage in the upper 32 bits which will get masked away. 512 if (dst32bit) { 513 z_rll(dst, src, nRotate); // Copy and rotate, upper half of reg remains undisturbed. 514 } else { 515 if (sll4rll) { z_sllg(dst, src, nRotate); } 516 else if (srl4rll) { z_srlg(dst, src, -nRotate); } 517 else { z_rllg(dst, src, nRotate); } 518 } 519 } else { 520 if (sll4rll) { z_sllg(dst, src, nRotate); } 521 else if (srl4rll) { z_srlg(dst, src, -nRotate); } 522 else { z_rllg(dst, src, nRotate); } 523 } 524 525 unsigned long range_mask = create_mask(lBitPos, rBitPos); 526 unsigned int range_mask_h = (unsigned int)(range_mask >> 32); 527 unsigned int range_mask_l = (unsigned int)range_mask; 528 unsigned short range_mask_hh = (unsigned short)(range_mask >> 48); 529 unsigned short range_mask_hl = (unsigned short)(range_mask >> 32); 530 unsigned short range_mask_lh = (unsigned short)(range_mask >> 16); 531 unsigned short range_mask_ll = (unsigned short)range_mask; 532 // Works for z9 and newer H/W. 533 if (oneBits) { 534 if ((~range_mask_l) != 0) { z_oilf(dst, ~range_mask_l); } // All bits outside range become 1s. 535 if (((~range_mask_h) != 0) && !dst32bit) { z_oihf(dst, ~range_mask_h); } 536 } else { 537 // All bits outside range become 0s 538 if (((~range_mask_l) != 0) && !lfZero) { 539 z_nilf(dst, range_mask_l); 540 } 541 if (((~range_mask_h) != 0) && !dst32bit && !hfZero) { 542 z_nihf(dst, range_mask_h); 543 } 544 } 545 } 546 547 // Rotate src, then insert selected range from rotated src into dst. 548 // Clear dst before, if requested. 549 void MacroAssembler::rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos, 550 int nRotate, bool clear_dst) { 551 // This version does not depend on src being zero-extended int2long. 552 nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. 553 z_risbg(dst, src, lBitPos, rBitPos, nRotate, clear_dst); // Rotate, then insert selected, clear the rest. 554 } 555 556 // Rotate src, then and selected range from rotated src into dst. 557 // Set condition code only if so requested. Otherwise it is unpredictable. 558 // See performance note in macroAssembler_s390.hpp for important information. 559 void MacroAssembler::rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos, 560 int nRotate, bool test_only) { 561 guarantee(!test_only, "Emitter not fit for test_only instruction variant."); 562 // This version does not depend on src being zero-extended int2long. 563 nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. 564 z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected. 565 } 566 567 // Rotate src, then or selected range from rotated src into dst. 568 // Set condition code only if so requested. Otherwise it is unpredictable. 569 // See performance note in macroAssembler_s390.hpp for important information. 570 void MacroAssembler::rotate_then_or(Register dst, Register src, int lBitPos, int rBitPos, 571 int nRotate, bool test_only) { 572 guarantee(!test_only, "Emitter not fit for test_only instruction variant."); 573 // This version does not depend on src being zero-extended int2long. 574 nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. 575 z_rosbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected. 576 } 577 578 // Rotate src, then xor selected range from rotated src into dst. 579 // Set condition code only if so requested. Otherwise it is unpredictable. 580 // See performance note in macroAssembler_s390.hpp for important information. 581 void MacroAssembler::rotate_then_xor(Register dst, Register src, int lBitPos, int rBitPos, 582 int nRotate, bool test_only) { 583 guarantee(!test_only, "Emitter not fit for test_only instruction variant."); 584 // This version does not depend on src being zero-extended int2long. 585 nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. 586 z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected. 587 } 588 589 void MacroAssembler::add64(Register r1, RegisterOrConstant inc) { 590 if (inc.is_register()) { 591 z_agr(r1, inc.as_register()); 592 } else { // constant 593 intptr_t imm = inc.as_constant(); 594 add2reg(r1, imm); 595 } 596 } 597 // Helper function to multiply the 64bit contents of a register by a 16bit constant. 598 // The optimization tries to avoid the mghi instruction, since it uses the FPU for 599 // calculation and is thus rather slow. 600 // 601 // There is no handling for special cases, e.g. cval==0 or cval==1. 602 // 603 // Returns len of generated code block. 604 unsigned int MacroAssembler::mul_reg64_const16(Register rval, Register work, int cval) { 605 int block_start = offset(); 606 607 bool sign_flip = cval < 0; 608 cval = sign_flip ? -cval : cval; 609 610 BLOCK_COMMENT("Reg64*Con16 {"); 611 612 int bit1 = cval & -cval; 613 if (bit1 == cval) { 614 z_sllg(rval, rval, exact_log2(bit1)); 615 if (sign_flip) { z_lcgr(rval, rval); } 616 } else { 617 int bit2 = (cval-bit1) & -(cval-bit1); 618 if ((bit1+bit2) == cval) { 619 z_sllg(work, rval, exact_log2(bit1)); 620 z_sllg(rval, rval, exact_log2(bit2)); 621 z_agr(rval, work); 622 if (sign_flip) { z_lcgr(rval, rval); } 623 } else { 624 if (sign_flip) { z_mghi(rval, -cval); } 625 else { z_mghi(rval, cval); } 626 } 627 } 628 BLOCK_COMMENT("} Reg64*Con16"); 629 630 int block_end = offset(); 631 return block_end - block_start; 632 } 633 634 // Generic operation r1 := r2 + imm. 635 // 636 // Should produce the best code for each supported CPU version. 637 // r2 == noreg yields r1 := r1 + imm 638 // imm == 0 emits either no instruction or r1 := r2 ! 639 // NOTES: 1) Don't use this function where fixed sized 640 // instruction sequences are required!!! 641 // 2) Don't use this function if condition code 642 // setting is required! 643 // 3) Despite being declared as int64_t, the parameter imm 644 // must be a simm_32 value (= signed 32-bit integer). 645 void MacroAssembler::add2reg(Register r1, int64_t imm, Register r2) { 646 assert(Immediate::is_simm32(imm), "probably an implicit conversion went wrong"); 647 648 if (r2 == noreg) { r2 = r1; } 649 650 // Handle special case imm == 0. 651 if (imm == 0) { 652 lgr_if_needed(r1, r2); 653 // Nothing else to do. 654 return; 655 } 656 657 if (!PreferLAoverADD || (r2 == Z_R0)) { 658 bool distinctOpnds = VM_Version::has_DistinctOpnds(); 659 660 // Can we encode imm in 16 bits signed? 661 if (Immediate::is_simm16(imm)) { 662 if (r1 == r2) { 663 z_aghi(r1, imm); 664 return; 665 } 666 if (distinctOpnds) { 667 z_aghik(r1, r2, imm); 668 return; 669 } 670 z_lgr(r1, r2); 671 z_aghi(r1, imm); 672 return; 673 } 674 } else { 675 // Can we encode imm in 12 bits unsigned? 676 if (Displacement::is_shortDisp(imm)) { 677 z_la(r1, imm, r2); 678 return; 679 } 680 // Can we encode imm in 20 bits signed? 681 if (Displacement::is_validDisp(imm)) { 682 // Always use LAY instruction, so we don't need the tmp register. 683 z_lay(r1, imm, r2); 684 return; 685 } 686 687 } 688 689 // Can handle it (all possible values) with long immediates. 690 lgr_if_needed(r1, r2); 691 z_agfi(r1, imm); 692 } 693 694 // Generic operation r := b + x + d 695 // 696 // Addition of several operands with address generation semantics - sort of: 697 // - no restriction on the registers. Any register will do for any operand. 698 // - x == noreg: operand will be disregarded. 699 // - b == noreg: will use (contents of) result reg as operand (r := r + d). 700 // - x == Z_R0: just disregard 701 // - b == Z_R0: use as operand. This is not address generation semantics!!! 702 // 703 // The same restrictions as on add2reg() are valid!!! 704 void MacroAssembler::add2reg_with_index(Register r, int64_t d, Register x, Register b) { 705 assert(Immediate::is_simm32(d), "probably an implicit conversion went wrong"); 706 707 if (x == noreg) { x = Z_R0; } 708 if (b == noreg) { b = r; } 709 710 // Handle special case x == R0. 711 if (x == Z_R0) { 712 // Can simply add the immediate value to the base register. 713 add2reg(r, d, b); 714 return; 715 } 716 717 if (!PreferLAoverADD || (b == Z_R0)) { 718 bool distinctOpnds = VM_Version::has_DistinctOpnds(); 719 // Handle special case d == 0. 720 if (d == 0) { 721 if (b == x) { z_sllg(r, b, 1); return; } 722 if (r == x) { z_agr(r, b); return; } 723 if (r == b) { z_agr(r, x); return; } 724 if (distinctOpnds) { z_agrk(r, x, b); return; } 725 z_lgr(r, b); 726 z_agr(r, x); 727 } else { 728 if (x == b) { z_sllg(r, x, 1); } 729 else if (r == x) { z_agr(r, b); } 730 else if (r == b) { z_agr(r, x); } 731 else if (distinctOpnds) { z_agrk(r, x, b); } 732 else { 733 z_lgr(r, b); 734 z_agr(r, x); 735 } 736 add2reg(r, d); 737 } 738 } else { 739 // Can we encode imm in 12 bits unsigned? 740 if (Displacement::is_shortDisp(d)) { 741 z_la(r, d, x, b); 742 return; 743 } 744 // Can we encode imm in 20 bits signed? 745 if (Displacement::is_validDisp(d)) { 746 z_lay(r, d, x, b); 747 return; 748 } 749 z_la(r, 0, x, b); 750 add2reg(r, d); 751 } 752 } 753 754 // Generic emitter (32bit) for direct memory increment. 755 // For optimal code, do not specify Z_R0 as temp register. 756 void MacroAssembler::add2mem_32(const Address &a, int64_t imm, Register tmp) { 757 if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) { 758 z_asi(a, imm); 759 } else { 760 z_lgf(tmp, a); 761 add2reg(tmp, imm); 762 z_st(tmp, a); 763 } 764 } 765 766 void MacroAssembler::add2mem_64(const Address &a, int64_t imm, Register tmp) { 767 if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) { 768 z_agsi(a, imm); 769 } else { 770 z_lg(tmp, a); 771 add2reg(tmp, imm); 772 z_stg(tmp, a); 773 } 774 } 775 776 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) { 777 switch (size_in_bytes) { 778 case 8: z_lg(dst, src); break; 779 case 4: is_signed ? z_lgf(dst, src) : z_llgf(dst, src); break; 780 case 2: is_signed ? z_lgh(dst, src) : z_llgh(dst, src); break; 781 case 1: is_signed ? z_lgb(dst, src) : z_llgc(dst, src); break; 782 default: ShouldNotReachHere(); 783 } 784 } 785 786 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { 787 switch (size_in_bytes) { 788 case 8: z_stg(src, dst); break; 789 case 4: z_st(src, dst); break; 790 case 2: z_sth(src, dst); break; 791 case 1: z_stc(src, dst); break; 792 default: ShouldNotReachHere(); 793 } 794 } 795 796 // Split a si20 offset (20bit, signed) into an ui12 offset (12bit, unsigned) and 797 // a high-order summand in register tmp. 798 // 799 // return value: < 0: No split required, si20 actually has property uimm12. 800 // >= 0: Split performed. Use return value as uimm12 displacement and 801 // tmp as index register. 802 int MacroAssembler::split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate) { 803 assert(Immediate::is_simm20(si20_offset), "sanity"); 804 int lg_off = (int)si20_offset & 0x0fff; // Punch out low-order 12 bits, always positive. 805 int ll_off = (int)si20_offset & ~0x0fff; // Force low-order 12 bits to zero. 806 assert((Displacement::is_shortDisp(si20_offset) && (ll_off == 0)) || 807 !Displacement::is_shortDisp(si20_offset), "unexpected offset values"); 808 assert((lg_off+ll_off) == si20_offset, "offset splitup error"); 809 810 Register work = accumulate? Z_R0 : tmp; 811 812 if (fixed_codelen) { // Len of code = 10 = 4 + 6. 813 z_lghi(work, ll_off>>12); // Implicit sign extension. 814 z_slag(work, work, 12); 815 } else { // Len of code = 0..10. 816 if (ll_off == 0) { return -1; } 817 // ll_off has 8 significant bits (at most) plus sign. 818 if ((ll_off & 0x0000f000) == 0) { // Non-zero bits only in upper halfbyte. 819 z_llilh(work, ll_off >> 16); 820 if (ll_off < 0) { // Sign-extension required. 821 z_lgfr(work, work); 822 } 823 } else { 824 if ((ll_off & 0x000f0000) == 0) { // Non-zero bits only in lower halfbyte. 825 z_llill(work, ll_off); 826 } else { // Non-zero bits in both halfbytes. 827 z_lghi(work, ll_off>>12); // Implicit sign extension. 828 z_slag(work, work, 12); 829 } 830 } 831 } 832 if (accumulate) { z_algr(tmp, work); } // len of code += 4 833 return lg_off; 834 } 835 836 void MacroAssembler::load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) { 837 if (Displacement::is_validDisp(si20)) { 838 z_ley(t, si20, a); 839 } else { 840 // Fixed_codelen = true is a simple way to ensure that the size of load_float_largeoffset 841 // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant 842 // pool loads). 843 bool accumulate = true; 844 bool fixed_codelen = true; 845 Register work; 846 847 if (fixed_codelen) { 848 z_lgr(tmp, a); // Lgr_if_needed not applicable due to fixed_codelen. 849 } else { 850 accumulate = (a == tmp); 851 } 852 work = tmp; 853 854 int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate); 855 if (disp12 < 0) { 856 z_le(t, si20, work); 857 } else { 858 if (accumulate) { 859 z_le(t, disp12, work); 860 } else { 861 z_le(t, disp12, work, a); 862 } 863 } 864 } 865 } 866 867 void MacroAssembler::load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) { 868 if (Displacement::is_validDisp(si20)) { 869 z_ldy(t, si20, a); 870 } else { 871 // Fixed_codelen = true is a simple way to ensure that the size of load_double_largeoffset 872 // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant 873 // pool loads). 874 bool accumulate = true; 875 bool fixed_codelen = true; 876 Register work; 877 878 if (fixed_codelen) { 879 z_lgr(tmp, a); // Lgr_if_needed not applicable due to fixed_codelen. 880 } else { 881 accumulate = (a == tmp); 882 } 883 work = tmp; 884 885 int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate); 886 if (disp12 < 0) { 887 z_ld(t, si20, work); 888 } else { 889 if (accumulate) { 890 z_ld(t, disp12, work); 891 } else { 892 z_ld(t, disp12, work, a); 893 } 894 } 895 } 896 } 897 898 // PCrelative TOC access. 899 // Returns distance (in bytes) from current position to start of consts section. 900 // Returns 0 (zero) if no consts section exists or if it has size zero. 901 long MacroAssembler::toc_distance() { 902 CodeSection* cs = code()->consts(); 903 return (long)((cs != NULL) ? cs->start()-pc() : 0); 904 } 905 906 // Implementation on x86/sparc assumes that constant and instruction section are 907 // adjacent, but this doesn't hold. Two special situations may occur, that we must 908 // be able to handle: 909 // 1. const section may be located apart from the inst section. 910 // 2. const section may be empty 911 // In both cases, we use the const section's start address to compute the "TOC", 912 // this seems to occur only temporarily; in the final step we always seem to end up 913 // with the pc-relatice variant. 914 // 915 // PC-relative offset could be +/-2**32 -> use long for disp 916 // Furthermore: makes no sense to have special code for 917 // adjacent const and inst sections. 918 void MacroAssembler::load_toc(Register Rtoc) { 919 // Simply use distance from start of const section (should be patched in the end). 920 long disp = toc_distance(); 921 922 RelocationHolder rspec = internal_word_Relocation::spec(pc() + disp); 923 relocate(rspec); 924 z_larl(Rtoc, RelAddr::pcrel_off32(disp)); // Offset is in halfwords. 925 } 926 927 // PCrelative TOC access. 928 // Load from anywhere pcrelative (with relocation of load instr) 929 void MacroAssembler::load_long_pcrelative(Register Rdst, address dataLocation) { 930 address pc = this->pc(); 931 ptrdiff_t total_distance = dataLocation - pc; 932 RelocationHolder rspec = internal_word_Relocation::spec(dataLocation); 933 934 assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory"); 935 assert(total_distance != 0, "sanity"); 936 937 // Some extra safety net. 938 if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) { 939 guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "too far away"); 940 } 941 942 (this)->relocate(rspec, relocInfo::pcrel_addr_format); 943 z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance)); 944 } 945 946 947 // PCrelative TOC access. 948 // Load from anywhere pcrelative (with relocation of load instr) 949 // loaded addr has to be relocated when added to constant pool. 950 void MacroAssembler::load_addr_pcrelative(Register Rdst, address addrLocation) { 951 address pc = this->pc(); 952 ptrdiff_t total_distance = addrLocation - pc; 953 RelocationHolder rspec = internal_word_Relocation::spec(addrLocation); 954 955 assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory"); 956 957 // Some extra safety net. 958 if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) { 959 guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "too far away"); 960 } 961 962 (this)->relocate(rspec, relocInfo::pcrel_addr_format); 963 z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance)); 964 } 965 966 // Generic operation: load a value from memory and test. 967 // CondCode indicates the sign (<0, ==0, >0) of the loaded value. 968 void MacroAssembler::load_and_test_byte(Register dst, const Address &a) { 969 z_lb(dst, a); 970 z_ltr(dst, dst); 971 } 972 973 void MacroAssembler::load_and_test_short(Register dst, const Address &a) { 974 int64_t disp = a.disp20(); 975 if (Displacement::is_shortDisp(disp)) { 976 z_lh(dst, a); 977 } else if (Displacement::is_longDisp(disp)) { 978 z_lhy(dst, a); 979 } else { 980 guarantee(false, "displacement out of range"); 981 } 982 z_ltr(dst, dst); 983 } 984 985 void MacroAssembler::load_and_test_int(Register dst, const Address &a) { 986 z_lt(dst, a); 987 } 988 989 void MacroAssembler::load_and_test_int2long(Register dst, const Address &a) { 990 z_ltgf(dst, a); 991 } 992 993 void MacroAssembler::load_and_test_long(Register dst, const Address &a) { 994 z_ltg(dst, a); 995 } 996 997 // Test a bit in memory. 998 void MacroAssembler::testbit(const Address &a, unsigned int bit) { 999 assert(a.index() == noreg, "no index reg allowed in testbit"); 1000 if (bit <= 7) { 1001 z_tm(a.disp() + 3, a.base(), 1 << bit); 1002 } else if (bit <= 15) { 1003 z_tm(a.disp() + 2, a.base(), 1 << (bit - 8)); 1004 } else if (bit <= 23) { 1005 z_tm(a.disp() + 1, a.base(), 1 << (bit - 16)); 1006 } else if (bit <= 31) { 1007 z_tm(a.disp() + 0, a.base(), 1 << (bit - 24)); 1008 } else { 1009 ShouldNotReachHere(); 1010 } 1011 } 1012 1013 // Test a bit in a register. Result is reflected in CC. 1014 void MacroAssembler::testbit(Register r, unsigned int bitPos) { 1015 if (bitPos < 16) { 1016 z_tmll(r, 1U<<bitPos); 1017 } else if (bitPos < 32) { 1018 z_tmlh(r, 1U<<(bitPos-16)); 1019 } else if (bitPos < 48) { 1020 z_tmhl(r, 1U<<(bitPos-32)); 1021 } else if (bitPos < 64) { 1022 z_tmhh(r, 1U<<(bitPos-48)); 1023 } else { 1024 ShouldNotReachHere(); 1025 } 1026 } 1027 1028 // Clear a register, i.e. load const zero into reg. 1029 // Return len (in bytes) of generated instruction(s). 1030 // whole_reg: Clear 64 bits if true, 32 bits otherwise. 1031 // set_cc: Use instruction that sets the condition code, if true. 1032 int MacroAssembler::clear_reg(Register r, bool whole_reg, bool set_cc) { 1033 unsigned int start_off = offset(); 1034 if (whole_reg) { 1035 set_cc ? z_xgr(r, r) : z_laz(r, 0, Z_R0); 1036 } else { // Only 32bit register. 1037 set_cc ? z_xr(r, r) : z_lhi(r, 0); 1038 } 1039 return offset() - start_off; 1040 } 1041 1042 #ifdef ASSERT 1043 int MacroAssembler::preset_reg(Register r, unsigned long pattern, int pattern_len) { 1044 switch (pattern_len) { 1045 case 1: 1046 pattern = (pattern & 0x000000ff) | ((pattern & 0x000000ff)<<8); 1047 case 2: 1048 pattern = (pattern & 0x0000ffff) | ((pattern & 0x0000ffff)<<16); 1049 case 4: 1050 pattern = (pattern & 0xffffffffL) | ((pattern & 0xffffffffL)<<32); 1051 case 8: 1052 return load_const_optimized_rtn_len(r, pattern, true); 1053 break; 1054 default: 1055 guarantee(false, "preset_reg: bad len"); 1056 } 1057 return 0; 1058 } 1059 #endif 1060 1061 // addr: Address descriptor of memory to clear index register will not be used ! 1062 // size: Number of bytes to clear. 1063 // !!! DO NOT USE THEM FOR ATOMIC MEMORY CLEARING !!! 1064 // !!! Use store_const() instead !!! 1065 void MacroAssembler::clear_mem(const Address& addr, unsigned size) { 1066 guarantee(size <= 256, "MacroAssembler::clear_mem: size too large"); 1067 1068 if (size == 1) { 1069 z_mvi(addr, 0); 1070 return; 1071 } 1072 1073 switch (size) { 1074 case 2: z_mvhhi(addr, 0); 1075 return; 1076 case 4: z_mvhi(addr, 0); 1077 return; 1078 case 8: z_mvghi(addr, 0); 1079 return; 1080 default: ; // Fallthru to xc. 1081 } 1082 1083 z_xc(addr, size, addr); 1084 } 1085 1086 void MacroAssembler::align(int modulus) { 1087 while (offset() % modulus != 0) z_nop(); 1088 } 1089 1090 // Special version for non-relocateable code if required alignment 1091 // is larger than CodeEntryAlignment. 1092 void MacroAssembler::align_address(int modulus) { 1093 while ((uintptr_t)pc() % modulus != 0) z_nop(); 1094 } 1095 1096 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 1097 Register temp_reg, 1098 int64_t extra_slot_offset) { 1099 // On Z, we can have index and disp in an Address. So don't call argument_offset, 1100 // which issues an unnecessary add instruction. 1101 int stackElementSize = Interpreter::stackElementSize; 1102 int64_t offset = extra_slot_offset * stackElementSize; 1103 const Register argbase = Z_esp; 1104 if (arg_slot.is_constant()) { 1105 offset += arg_slot.as_constant() * stackElementSize; 1106 return Address(argbase, offset); 1107 } 1108 // else 1109 assert(temp_reg != noreg, "must specify"); 1110 assert(temp_reg != Z_ARG1, "base and index are conflicting"); 1111 z_sllg(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize)); // tempreg = arg_slot << 3 1112 return Address(argbase, temp_reg, offset); 1113 } 1114 1115 1116 //=================================================================== 1117 //=== START C O N S T A N T S I N C O D E S T R E A M === 1118 //=================================================================== 1119 //=== P A T CH A B L E C O N S T A N T S === 1120 //=================================================================== 1121 1122 1123 //--------------------------------------------------- 1124 // Load (patchable) constant into register 1125 //--------------------------------------------------- 1126 1127 1128 // Load absolute address (and try to optimize). 1129 // Note: This method is usable only for position-fixed code, 1130 // referring to a position-fixed target location. 1131 // If not so, relocations and patching must be used. 1132 void MacroAssembler::load_absolute_address(Register d, address addr) { 1133 assert(addr != NULL, "should not happen"); 1134 BLOCK_COMMENT("load_absolute_address:"); 1135 if (addr == NULL) { 1136 z_larl(d, pc()); // Dummy emit for size calc. 1137 return; 1138 } 1139 1140 if (RelAddr::is_in_range_of_RelAddr32(addr, pc())) { 1141 z_larl(d, addr); 1142 return; 1143 } 1144 1145 load_const_optimized(d, (long)addr); 1146 } 1147 1148 // Load a 64bit constant. 1149 // Patchable code sequence, but not atomically patchable. 1150 // Make sure to keep code size constant -> no value-dependent optimizations. 1151 // Do not kill condition code. 1152 void MacroAssembler::load_const(Register t, long x) { 1153 Assembler::z_iihf(t, (int)(x >> 32)); 1154 Assembler::z_iilf(t, (int)(x & 0xffffffff)); 1155 } 1156 1157 // Load a 32bit constant into a 64bit register, sign-extend or zero-extend. 1158 // Patchable code sequence, but not atomically patchable. 1159 // Make sure to keep code size constant -> no value-dependent optimizations. 1160 // Do not kill condition code. 1161 void MacroAssembler::load_const_32to64(Register t, int64_t x, bool sign_extend) { 1162 if (sign_extend) { Assembler::z_lgfi(t, x); } 1163 else { Assembler::z_llilf(t, x); } 1164 } 1165 1166 // Load narrow oop constant, no decompression. 1167 void MacroAssembler::load_narrow_oop(Register t, narrowOop a) { 1168 assert(UseCompressedOops, "must be on to call this method"); 1169 load_const_32to64(t, a, false /*sign_extend*/); 1170 } 1171 1172 // Load narrow klass constant, compression required. 1173 void MacroAssembler::load_narrow_klass(Register t, Klass* k) { 1174 assert(UseCompressedClassPointers, "must be on to call this method"); 1175 narrowKlass encoded_k = Klass::encode_klass(k); 1176 load_const_32to64(t, encoded_k, false /*sign_extend*/); 1177 } 1178 1179 //------------------------------------------------------ 1180 // Compare (patchable) constant with register. 1181 //------------------------------------------------------ 1182 1183 // Compare narrow oop in reg with narrow oop constant, no decompression. 1184 void MacroAssembler::compare_immediate_narrow_oop(Register oop1, narrowOop oop2) { 1185 assert(UseCompressedOops, "must be on to call this method"); 1186 1187 Assembler::z_clfi(oop1, oop2); 1188 } 1189 1190 // Compare narrow oop in reg with narrow oop constant, no decompression. 1191 void MacroAssembler::compare_immediate_narrow_klass(Register klass1, Klass* klass2) { 1192 assert(UseCompressedClassPointers, "must be on to call this method"); 1193 narrowKlass encoded_k = Klass::encode_klass(klass2); 1194 1195 Assembler::z_clfi(klass1, encoded_k); 1196 } 1197 1198 //---------------------------------------------------------- 1199 // Check which kind of load_constant we have here. 1200 //---------------------------------------------------------- 1201 1202 // Detection of CPU version dependent load_const sequence. 1203 // The detection is valid only for code sequences generated by load_const, 1204 // not load_const_optimized. 1205 bool MacroAssembler::is_load_const(address a) { 1206 unsigned long inst1, inst2; 1207 unsigned int len1, len2; 1208 1209 len1 = get_instruction(a, &inst1); 1210 len2 = get_instruction(a + len1, &inst2); 1211 1212 return is_z_iihf(inst1) && is_z_iilf(inst2); 1213 } 1214 1215 // Detection of CPU version dependent load_const_32to64 sequence. 1216 // Mostly used for narrow oops and narrow Klass pointers. 1217 // The detection is valid only for code sequences generated by load_const_32to64. 1218 bool MacroAssembler::is_load_const_32to64(address pos) { 1219 unsigned long inst1, inst2; 1220 unsigned int len1; 1221 1222 len1 = get_instruction(pos, &inst1); 1223 return is_z_llilf(inst1); 1224 } 1225 1226 // Detection of compare_immediate_narrow sequence. 1227 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop. 1228 bool MacroAssembler::is_compare_immediate32(address pos) { 1229 return is_equal(pos, CLFI_ZOPC, RIL_MASK); 1230 } 1231 1232 // Detection of compare_immediate_narrow sequence. 1233 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop. 1234 bool MacroAssembler::is_compare_immediate_narrow_oop(address pos) { 1235 return is_compare_immediate32(pos); 1236 } 1237 1238 // Detection of compare_immediate_narrow sequence. 1239 // The detection is valid only for code sequences generated by compare_immediate_narrow_klass. 1240 bool MacroAssembler::is_compare_immediate_narrow_klass(address pos) { 1241 return is_compare_immediate32(pos); 1242 } 1243 1244 //----------------------------------- 1245 // patch the load_constant 1246 //----------------------------------- 1247 1248 // CPU-version dependend patching of load_const. 1249 void MacroAssembler::patch_const(address a, long x) { 1250 assert(is_load_const(a), "not a load of a constant"); 1251 set_imm32((address)a, (int) ((x >> 32) & 0xffffffff)); 1252 set_imm32((address)(a + 6), (int)(x & 0xffffffff)); 1253 } 1254 1255 // Patching the value of CPU version dependent load_const_32to64 sequence. 1256 // The passed ptr MUST be in compressed format! 1257 int MacroAssembler::patch_load_const_32to64(address pos, int64_t np) { 1258 assert(is_load_const_32to64(pos), "not a load of a narrow ptr (oop or klass)"); 1259 1260 set_imm32(pos, np); 1261 return 6; 1262 } 1263 1264 // Patching the value of CPU version dependent compare_immediate_narrow sequence. 1265 // The passed ptr MUST be in compressed format! 1266 int MacroAssembler::patch_compare_immediate_32(address pos, int64_t np) { 1267 assert(is_compare_immediate32(pos), "not a compressed ptr compare"); 1268 1269 set_imm32(pos, np); 1270 return 6; 1271 } 1272 1273 // Patching the immediate value of CPU version dependent load_narrow_oop sequence. 1274 // The passed ptr must NOT be in compressed format! 1275 int MacroAssembler::patch_load_narrow_oop(address pos, oop o) { 1276 assert(UseCompressedOops, "Can only patch compressed oops"); 1277 1278 narrowOop no = oopDesc::encode_heap_oop(o); 1279 return patch_load_const_32to64(pos, no); 1280 } 1281 1282 // Patching the immediate value of CPU version dependent load_narrow_klass sequence. 1283 // The passed ptr must NOT be in compressed format! 1284 int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) { 1285 assert(UseCompressedClassPointers, "Can only patch compressed klass pointers"); 1286 1287 narrowKlass nk = Klass::encode_klass(k); 1288 return patch_load_const_32to64(pos, nk); 1289 } 1290 1291 // Patching the immediate value of CPU version dependent compare_immediate_narrow_oop sequence. 1292 // The passed ptr must NOT be in compressed format! 1293 int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) { 1294 assert(UseCompressedOops, "Can only patch compressed oops"); 1295 1296 narrowOop no = oopDesc::encode_heap_oop(o); 1297 return patch_compare_immediate_32(pos, no); 1298 } 1299 1300 // Patching the immediate value of CPU version dependent compare_immediate_narrow_klass sequence. 1301 // The passed ptr must NOT be in compressed format! 1302 int MacroAssembler::patch_compare_immediate_narrow_klass(address pos, Klass* k) { 1303 assert(UseCompressedClassPointers, "Can only patch compressed klass pointers"); 1304 1305 narrowKlass nk = Klass::encode_klass(k); 1306 return patch_compare_immediate_32(pos, nk); 1307 } 1308 1309 //------------------------------------------------------------------------ 1310 // Extract the constant from a load_constant instruction stream. 1311 //------------------------------------------------------------------------ 1312 1313 // Get constant from a load_const sequence. 1314 long MacroAssembler::get_const(address a) { 1315 assert(is_load_const(a), "not a load of a constant"); 1316 unsigned long x; 1317 x = (((unsigned long) (get_imm32(a,0) & 0xffffffff)) << 32); 1318 x |= (((unsigned long) (get_imm32(a,1) & 0xffffffff))); 1319 return (long) x; 1320 } 1321 1322 //-------------------------------------- 1323 // Store a constant in memory. 1324 //-------------------------------------- 1325 1326 // General emitter to move a constant to memory. 1327 // The store is atomic. 1328 // o Address must be given in RS format (no index register) 1329 // o Displacement should be 12bit unsigned for efficiency. 20bit signed also supported. 1330 // o Constant can be 1, 2, 4, or 8 bytes, signed or unsigned. 1331 // o Memory slot can be 1, 2, 4, or 8 bytes, signed or unsigned. 1332 // o Memory slot must be at least as wide as constant, will assert otherwise. 1333 // o Signed constants will sign-extend, unsigned constants will zero-extend to slot width. 1334 int MacroAssembler::store_const(const Address &dest, long imm, 1335 unsigned int lm, unsigned int lc, 1336 Register scratch) { 1337 int64_t disp = dest.disp(); 1338 Register base = dest.base(); 1339 assert(!dest.has_index(), "not supported"); 1340 assert((lm==1)||(lm==2)||(lm==4)||(lm==8), "memory length not supported"); 1341 assert((lc==1)||(lc==2)||(lc==4)||(lc==8), "constant length not supported"); 1342 assert(lm>=lc, "memory slot too small"); 1343 assert(lc==8 || Immediate::is_simm(imm, lc*8), "const out of range"); 1344 assert(Displacement::is_validDisp(disp), "displacement out of range"); 1345 1346 bool is_shortDisp = Displacement::is_shortDisp(disp); 1347 int store_offset = -1; 1348 1349 // For target len == 1 it's easy. 1350 if (lm == 1) { 1351 store_offset = offset(); 1352 if (is_shortDisp) { 1353 z_mvi(disp, base, imm); 1354 return store_offset; 1355 } else { 1356 z_mviy(disp, base, imm); 1357 return store_offset; 1358 } 1359 } 1360 1361 // All the "good stuff" takes an unsigned displacement. 1362 if (is_shortDisp) { 1363 // NOTE: Cannot use clear_mem for imm==0, because it is not atomic. 1364 1365 store_offset = offset(); 1366 switch (lm) { 1367 case 2: // Lc == 1 handled correctly here, even for unsigned. Instruction does no widening. 1368 z_mvhhi(disp, base, imm); 1369 return store_offset; 1370 case 4: 1371 if (Immediate::is_simm16(imm)) { 1372 z_mvhi(disp, base, imm); 1373 return store_offset; 1374 } 1375 break; 1376 case 8: 1377 if (Immediate::is_simm16(imm)) { 1378 z_mvghi(disp, base, imm); 1379 return store_offset; 1380 } 1381 break; 1382 default: 1383 ShouldNotReachHere(); 1384 break; 1385 } 1386 } 1387 1388 // Can't optimize, so load value and store it. 1389 guarantee(scratch != noreg, " need a scratch register here !"); 1390 if (imm != 0) { 1391 load_const_optimized(scratch, imm); // Preserves CC anyway. 1392 } else { 1393 // Leave CC alone!! 1394 (void) clear_reg(scratch, true, false); // Indicate unused result. 1395 } 1396 1397 store_offset = offset(); 1398 if (is_shortDisp) { 1399 switch (lm) { 1400 case 2: 1401 z_sth(scratch, disp, Z_R0, base); 1402 return store_offset; 1403 case 4: 1404 z_st(scratch, disp, Z_R0, base); 1405 return store_offset; 1406 case 8: 1407 z_stg(scratch, disp, Z_R0, base); 1408 return store_offset; 1409 default: 1410 ShouldNotReachHere(); 1411 break; 1412 } 1413 } else { 1414 switch (lm) { 1415 case 2: 1416 z_sthy(scratch, disp, Z_R0, base); 1417 return store_offset; 1418 case 4: 1419 z_sty(scratch, disp, Z_R0, base); 1420 return store_offset; 1421 case 8: 1422 z_stg(scratch, disp, Z_R0, base); 1423 return store_offset; 1424 default: 1425 ShouldNotReachHere(); 1426 break; 1427 } 1428 } 1429 return -1; // should not reach here 1430 } 1431 1432 //=================================================================== 1433 //=== N O T P A T CH A B L E C O N S T A N T S === 1434 //=================================================================== 1435 1436 // Load constant x into register t with a fast instrcution sequence 1437 // depending on the bits in x. Preserves CC under all circumstances. 1438 int MacroAssembler::load_const_optimized_rtn_len(Register t, long x, bool emit) { 1439 if (x == 0) { 1440 int len; 1441 if (emit) { 1442 len = clear_reg(t, true, false); 1443 } else { 1444 len = 4; 1445 } 1446 return len; 1447 } 1448 1449 if (Immediate::is_simm16(x)) { 1450 if (emit) { z_lghi(t, x); } 1451 return 4; 1452 } 1453 1454 // 64 bit value: | part1 | part2 | part3 | part4 | 1455 // At least one part is not zero! 1456 int part1 = ((x >> 32) & 0xffff0000) >> 16; 1457 int part2 = (x >> 32) & 0x0000ffff; 1458 int part3 = (x & 0xffff0000) >> 16; 1459 int part4 = (x & 0x0000ffff); 1460 1461 // Lower word only (unsigned). 1462 if ((part1 == 0) && (part2 == 0)) { 1463 if (part3 == 0) { 1464 if (emit) z_llill(t, part4); 1465 return 4; 1466 } 1467 if (part4 == 0) { 1468 if (emit) z_llilh(t, part3); 1469 return 4; 1470 } 1471 if (emit) z_llilf(t, (int)(x & 0xffffffff)); 1472 return 6; 1473 } 1474 1475 // Upper word only. 1476 if ((part3 == 0) && (part4 == 0)) { 1477 if (part1 == 0) { 1478 if (emit) z_llihl(t, part2); 1479 return 4; 1480 } 1481 if (part2 == 0) { 1482 if (emit) z_llihh(t, part1); 1483 return 4; 1484 } 1485 if (emit) z_llihf(t, (int)(x >> 32)); 1486 return 6; 1487 } 1488 1489 // Lower word only (signed). 1490 if ((part1 == 0x0000ffff) && (part2 == 0x0000ffff) && ((part3 & 0x00008000) != 0)) { 1491 if (emit) z_lgfi(t, (int)(x & 0xffffffff)); 1492 return 6; 1493 } 1494 1495 int len = 0; 1496 1497 if ((part1 == 0) || (part2 == 0)) { 1498 if (part1 == 0) { 1499 if (emit) z_llihl(t, part2); 1500 len += 4; 1501 } else { 1502 if (emit) z_llihh(t, part1); 1503 len += 4; 1504 } 1505 } else { 1506 if (emit) z_llihf(t, (int)(x >> 32)); 1507 len += 6; 1508 } 1509 1510 if ((part3 == 0) || (part4 == 0)) { 1511 if (part3 == 0) { 1512 if (emit) z_iill(t, part4); 1513 len += 4; 1514 } else { 1515 if (emit) z_iilh(t, part3); 1516 len += 4; 1517 } 1518 } else { 1519 if (emit) z_iilf(t, (int)(x & 0xffffffff)); 1520 len += 6; 1521 } 1522 return len; 1523 } 1524 1525 //===================================================================== 1526 //=== H I G H E R L E V E L B R A N C H E M I T T E R S === 1527 //===================================================================== 1528 1529 // Note: In the worst case, one of the scratch registers is destroyed!!! 1530 void MacroAssembler::compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { 1531 // Right operand is constant. 1532 if (x2.is_constant()) { 1533 jlong value = x2.as_constant(); 1534 compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/true); 1535 return; 1536 } 1537 1538 // Right operand is in register. 1539 compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/true); 1540 } 1541 1542 // Note: In the worst case, one of the scratch registers is destroyed!!! 1543 void MacroAssembler::compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { 1544 // Right operand is constant. 1545 if (x2.is_constant()) { 1546 jlong value = x2.as_constant(); 1547 compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/false); 1548 return; 1549 } 1550 1551 // Right operand is in register. 1552 compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/false); 1553 } 1554 1555 // Note: In the worst case, one of the scratch registers is destroyed!!! 1556 void MacroAssembler::compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { 1557 // Right operand is constant. 1558 if (x2.is_constant()) { 1559 jlong value = x2.as_constant(); 1560 compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/true); 1561 return; 1562 } 1563 1564 // Right operand is in register. 1565 compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/true); 1566 } 1567 1568 void MacroAssembler::compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { 1569 // Right operand is constant. 1570 if (x2.is_constant()) { 1571 jlong value = x2.as_constant(); 1572 compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/false); 1573 return; 1574 } 1575 1576 // Right operand is in register. 1577 compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/false); 1578 } 1579 1580 // Generate an optimal branch to the branch target. 1581 // Optimal means that a relative branch (brc or brcl) is used if the 1582 // branch distance is short enough. Loading the target address into a 1583 // register and branching via reg is used as fallback only. 1584 // 1585 // Used registers: 1586 // Z_R1 - work reg. Holds branch target address. 1587 // Used in fallback case only. 1588 // 1589 // This version of branch_optimized is good for cases where the target address is known 1590 // and constant, i.e. is never changed (no relocation, no patching). 1591 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, address branch_addr) { 1592 address branch_origin = pc(); 1593 1594 if (RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) { 1595 z_brc(cond, branch_addr); 1596 } else if (RelAddr::is_in_range_of_RelAddr32(branch_addr, branch_origin)) { 1597 z_brcl(cond, branch_addr); 1598 } else { 1599 load_const_optimized(Z_R1, branch_addr); // CC must not get killed by load_const_optimized. 1600 z_bcr(cond, Z_R1); 1601 } 1602 } 1603 1604 // This version of branch_optimized is good for cases where the target address 1605 // is potentially not yet known at the time the code is emitted. 1606 // 1607 // One very common case is a branch to an unbound label which is handled here. 1608 // The caller might know (or hope) that the branch distance is short enough 1609 // to be encoded in a 16bit relative address. In this case he will pass a 1610 // NearLabel branch_target. 1611 // Care must be taken with unbound labels. Each call to target(label) creates 1612 // an entry in the patch queue for that label to patch all references of the label 1613 // once it gets bound. Those recorded patch locations must be patchable. Otherwise, 1614 // an assertion fires at patch time. 1615 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& branch_target) { 1616 if (branch_target.is_bound()) { 1617 address branch_addr = target(branch_target); 1618 branch_optimized(cond, branch_addr); 1619 } else { 1620 z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time. 1621 } 1622 } 1623 1624 // Generate an optimal compare and branch to the branch target. 1625 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the 1626 // branch distance is short enough. Loading the target address into a 1627 // register and branching via reg is used as fallback only. 1628 // 1629 // Input: 1630 // r1 - left compare operand 1631 // r2 - right compare operand 1632 void MacroAssembler::compare_and_branch_optimized(Register r1, 1633 Register r2, 1634 Assembler::branch_condition cond, 1635 address branch_addr, 1636 bool len64, 1637 bool has_sign) { 1638 unsigned int casenum = (len64?2:0)+(has_sign?0:1); 1639 1640 address branch_origin = pc(); 1641 if (VM_Version::has_CompareBranch() && RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) { 1642 switch (casenum) { 1643 case 0: z_crj( r1, r2, cond, branch_addr); break; 1644 case 1: z_clrj (r1, r2, cond, branch_addr); break; 1645 case 2: z_cgrj(r1, r2, cond, branch_addr); break; 1646 case 3: z_clgrj(r1, r2, cond, branch_addr); break; 1647 default: ShouldNotReachHere(); break; 1648 } 1649 } else { 1650 switch (casenum) { 1651 case 0: z_cr( r1, r2); break; 1652 case 1: z_clr(r1, r2); break; 1653 case 2: z_cgr(r1, r2); break; 1654 case 3: z_clgr(r1, r2); break; 1655 default: ShouldNotReachHere(); break; 1656 } 1657 branch_optimized(cond, branch_addr); 1658 } 1659 } 1660 1661 // Generate an optimal compare and branch to the branch target. 1662 // Optimal means that a relative branch (clgij, brc or brcl) is used if the 1663 // branch distance is short enough. Loading the target address into a 1664 // register and branching via reg is used as fallback only. 1665 // 1666 // Input: 1667 // r1 - left compare operand (in register) 1668 // x2 - right compare operand (immediate) 1669 void MacroAssembler::compare_and_branch_optimized(Register r1, 1670 jlong x2, 1671 Assembler::branch_condition cond, 1672 Label& branch_target, 1673 bool len64, 1674 bool has_sign) { 1675 address branch_origin = pc(); 1676 bool x2_imm8 = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2)); 1677 bool is_RelAddr16 = (branch_target.is_bound() && 1678 RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin)); 1679 unsigned int casenum = (len64?2:0)+(has_sign?0:1); 1680 1681 if (VM_Version::has_CompareBranch() && is_RelAddr16 && x2_imm8) { 1682 switch (casenum) { 1683 case 0: z_cij( r1, x2, cond, branch_target); break; 1684 case 1: z_clij(r1, x2, cond, branch_target); break; 1685 case 2: z_cgij(r1, x2, cond, branch_target); break; 1686 case 3: z_clgij(r1, x2, cond, branch_target); break; 1687 default: ShouldNotReachHere(); break; 1688 } 1689 return; 1690 } 1691 1692 if (x2 == 0) { 1693 switch (casenum) { 1694 case 0: z_ltr(r1, r1); break; 1695 case 1: z_ltr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication! 1696 case 2: z_ltgr(r1, r1); break; 1697 case 3: z_ltgr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication! 1698 default: ShouldNotReachHere(); break; 1699 } 1700 } else { 1701 if ((has_sign && Immediate::is_simm16(x2)) || (!has_sign && Immediate::is_uimm(x2, 15))) { 1702 switch (casenum) { 1703 case 0: z_chi(r1, x2); break; 1704 case 1: z_chi(r1, x2); break; // positive immediate < 2**15 1705 case 2: z_cghi(r1, x2); break; 1706 case 3: z_cghi(r1, x2); break; // positive immediate < 2**15 1707 default: break; 1708 } 1709 } else if ( (has_sign && Immediate::is_simm32(x2)) || (!has_sign && Immediate::is_uimm32(x2)) ) { 1710 switch (casenum) { 1711 case 0: z_cfi( r1, x2); break; 1712 case 1: z_clfi(r1, x2); break; 1713 case 2: z_cgfi(r1, x2); break; 1714 case 3: z_clgfi(r1, x2); break; 1715 default: ShouldNotReachHere(); break; 1716 } 1717 } else { 1718 // No instruction with immediate operand possible, so load into register. 1719 Register scratch = (r1 != Z_R0) ? Z_R0 : Z_R1; 1720 load_const_optimized(scratch, x2); 1721 switch (casenum) { 1722 case 0: z_cr( r1, scratch); break; 1723 case 1: z_clr(r1, scratch); break; 1724 case 2: z_cgr(r1, scratch); break; 1725 case 3: z_clgr(r1, scratch); break; 1726 default: ShouldNotReachHere(); break; 1727 } 1728 } 1729 } 1730 branch_optimized(cond, branch_target); 1731 } 1732 1733 // Generate an optimal compare and branch to the branch target. 1734 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the 1735 // branch distance is short enough. Loading the target address into a 1736 // register and branching via reg is used as fallback only. 1737 // 1738 // Input: 1739 // r1 - left compare operand 1740 // r2 - right compare operand 1741 void MacroAssembler::compare_and_branch_optimized(Register r1, 1742 Register r2, 1743 Assembler::branch_condition cond, 1744 Label& branch_target, 1745 bool len64, 1746 bool has_sign) { 1747 unsigned int casenum = (len64?2:0)+(has_sign?0:1); 1748 1749 if (branch_target.is_bound()) { 1750 address branch_addr = target(branch_target); 1751 compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign); 1752 } else { 1753 { 1754 switch (casenum) { 1755 case 0: z_cr( r1, r2); break; 1756 case 1: z_clr(r1, r2); break; 1757 case 2: z_cgr(r1, r2); break; 1758 case 3: z_clgr(r1, r2); break; 1759 default: ShouldNotReachHere(); break; 1760 } 1761 branch_optimized(cond, branch_target); 1762 } 1763 } 1764 } 1765 1766 //=========================================================================== 1767 //=== END H I G H E R L E V E L B R A N C H E M I T T E R S === 1768 //=========================================================================== 1769 1770 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { 1771 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1772 int index = oop_recorder()->allocate_metadata_index(obj); 1773 RelocationHolder rspec = metadata_Relocation::spec(index); 1774 return AddressLiteral((address)obj, rspec); 1775 } 1776 1777 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { 1778 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1779 int index = oop_recorder()->find_index(obj); 1780 RelocationHolder rspec = metadata_Relocation::spec(index); 1781 return AddressLiteral((address)obj, rspec); 1782 } 1783 1784 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) { 1785 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1786 int oop_index = oop_recorder()->allocate_oop_index(obj); 1787 return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); 1788 } 1789 1790 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { 1791 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1792 int oop_index = oop_recorder()->find_index(obj); 1793 return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); 1794 } 1795 1796 // NOTE: destroys r 1797 void MacroAssembler::c2bool(Register r, Register t) { 1798 z_lcr(t, r); // t = -r 1799 z_or(r, t); // r = -r OR r 1800 z_srl(r, 31); // Yields 0 if r was 0, 1 otherwise. 1801 } 1802 1803 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 1804 Register tmp, 1805 int offset) { 1806 intptr_t value = *delayed_value_addr; 1807 if (value != 0) { 1808 return RegisterOrConstant(value + offset); 1809 } 1810 1811 BLOCK_COMMENT("delayed_value {"); 1812 // Load indirectly to solve generation ordering problem. 1813 load_absolute_address(tmp, (address) delayed_value_addr); // tmp = a; 1814 z_lg(tmp, 0, tmp); // tmp = *tmp; 1815 1816 #ifdef ASSERT 1817 NearLabel L; 1818 compare64_and_branch(tmp, (intptr_t)0L, Assembler::bcondNotEqual, L); 1819 z_illtrap(); 1820 bind(L); 1821 #endif 1822 1823 if (offset != 0) { 1824 z_agfi(tmp, offset); // tmp = tmp + offset; 1825 } 1826 1827 BLOCK_COMMENT("} delayed_value"); 1828 return RegisterOrConstant(tmp); 1829 } 1830 1831 // Patch instruction `inst' at offset `inst_pos' to refer to `dest_pos' 1832 // and return the resulting instruction. 1833 // Dest_pos and inst_pos are 32 bit only. These parms can only designate 1834 // relative positions. 1835 // Use correct argument types. Do not pre-calculate distance. 1836 unsigned long MacroAssembler::patched_branch(address dest_pos, unsigned long inst, address inst_pos) { 1837 int c = 0; 1838 unsigned long patched_inst = 0; 1839 if (is_call_pcrelative_short(inst) || 1840 is_branch_pcrelative_short(inst) || 1841 is_branchoncount_pcrelative_short(inst) || 1842 is_branchonindex32_pcrelative_short(inst)) { 1843 c = 1; 1844 int m = fmask(15, 0); // simm16(-1, 16, 32); 1845 int v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 32); 1846 patched_inst = (inst & ~m) | v; 1847 } else if (is_compareandbranch_pcrelative_short(inst)) { 1848 c = 2; 1849 long m = fmask(31, 16); // simm16(-1, 16, 48); 1850 long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48); 1851 patched_inst = (inst & ~m) | v; 1852 } else if (is_branchonindex64_pcrelative_short(inst)) { 1853 c = 3; 1854 long m = fmask(31, 16); // simm16(-1, 16, 48); 1855 long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48); 1856 patched_inst = (inst & ~m) | v; 1857 } else if (is_call_pcrelative_long(inst) || is_branch_pcrelative_long(inst)) { 1858 c = 4; 1859 long m = fmask(31, 0); // simm32(-1, 16, 48); 1860 long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48); 1861 patched_inst = (inst & ~m) | v; 1862 } else if (is_pcrelative_long(inst)) { // These are the non-branch pc-relative instructions. 1863 c = 5; 1864 long m = fmask(31, 0); // simm32(-1, 16, 48); 1865 long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48); 1866 patched_inst = (inst & ~m) | v; 1867 } else { 1868 print_dbg_msg(tty, inst, "not a relative branch", 0); 1869 dump_code_range(tty, inst_pos, 32, "not a pcrelative branch"); 1870 ShouldNotReachHere(); 1871 } 1872 1873 long new_off = get_pcrel_offset(patched_inst); 1874 if (new_off != (dest_pos-inst_pos)) { 1875 tty->print_cr("case %d: dest_pos = %p, inst_pos = %p, disp = %ld(%12.12lx)", c, dest_pos, inst_pos, new_off, new_off); 1876 print_dbg_msg(tty, inst, "<- original instruction: branch patching error", 0); 1877 print_dbg_msg(tty, patched_inst, "<- patched instruction: branch patching error", 0); 1878 #ifdef LUCY_DBG 1879 VM_Version::z_SIGSEGV(); 1880 #endif 1881 ShouldNotReachHere(); 1882 } 1883 return patched_inst; 1884 } 1885 1886 // Only called when binding labels (share/vm/asm/assembler.cpp) 1887 // Pass arguments as intended. Do not pre-calculate distance. 1888 void MacroAssembler::pd_patch_instruction(address branch, address target) { 1889 unsigned long stub_inst; 1890 int inst_len = get_instruction(branch, &stub_inst); 1891 1892 set_instruction(branch, patched_branch(target, stub_inst, branch), inst_len); 1893 } 1894 1895 1896 // Extract relative address (aka offset). 1897 // inv_simm16 works for 4-byte instructions only. 1898 // compare and branch instructions are 6-byte and have a 16bit offset "in the middle". 1899 long MacroAssembler::get_pcrel_offset(unsigned long inst) { 1900 1901 if (MacroAssembler::is_pcrelative_short(inst)) { 1902 if (((inst&0xFFFFffff00000000UL) == 0) && ((inst&0x00000000FFFF0000UL) != 0)) { 1903 return RelAddr::inv_pcrel_off16(inv_simm16(inst)); 1904 } else { 1905 return RelAddr::inv_pcrel_off16(inv_simm16_48(inst)); 1906 } 1907 } 1908 1909 if (MacroAssembler::is_pcrelative_long(inst)) { 1910 return RelAddr::inv_pcrel_off32(inv_simm32(inst)); 1911 } 1912 1913 print_dbg_msg(tty, inst, "not a pcrelative instruction", 6); 1914 #ifdef LUCY_DBG 1915 VM_Version::z_SIGSEGV(); 1916 #else 1917 ShouldNotReachHere(); 1918 #endif 1919 return -1; 1920 } 1921 1922 long MacroAssembler::get_pcrel_offset(address pc) { 1923 unsigned long inst; 1924 unsigned int len = get_instruction(pc, &inst); 1925 1926 #ifdef ASSERT 1927 long offset; 1928 if (MacroAssembler::is_pcrelative_short(inst) || MacroAssembler::is_pcrelative_long(inst)) { 1929 offset = get_pcrel_offset(inst); 1930 } else { 1931 offset = -1; 1932 } 1933 1934 if (offset == -1) { 1935 dump_code_range(tty, pc, 32, "not a pcrelative instruction"); 1936 #ifdef LUCY_DBG 1937 VM_Version::z_SIGSEGV(); 1938 #else 1939 ShouldNotReachHere(); 1940 #endif 1941 } 1942 return offset; 1943 #else 1944 return get_pcrel_offset(inst); 1945 #endif // ASSERT 1946 } 1947 1948 // Get target address from pc-relative instructions. 1949 address MacroAssembler::get_target_addr_pcrel(address pc) { 1950 assert(is_pcrelative_long(pc), "not a pcrelative instruction"); 1951 return pc + get_pcrel_offset(pc); 1952 } 1953 1954 // Patch pc relative load address. 1955 void MacroAssembler::patch_target_addr_pcrel(address pc, address con) { 1956 unsigned long inst; 1957 // Offset is +/- 2**32 -> use long. 1958 ptrdiff_t distance = con - pc; 1959 1960 get_instruction(pc, &inst); 1961 1962 if (is_pcrelative_short(inst)) { 1963 *(short *)(pc+2) = RelAddr::pcrel_off16(con, pc); // Instructions are at least 2-byte aligned, no test required. 1964 1965 // Some extra safety net. 1966 if (!RelAddr::is_in_range_of_RelAddr16(distance)) { 1967 print_dbg_msg(tty, inst, "distance out of range (16bit)", 4); 1968 dump_code_range(tty, pc, 32, "distance out of range (16bit)"); 1969 guarantee(RelAddr::is_in_range_of_RelAddr16(distance), "too far away (more than +/- 2**16"); 1970 } 1971 return; 1972 } 1973 1974 if (is_pcrelative_long(inst)) { 1975 *(int *)(pc+2) = RelAddr::pcrel_off32(con, pc); 1976 1977 // Some Extra safety net. 1978 if (!RelAddr::is_in_range_of_RelAddr32(distance)) { 1979 print_dbg_msg(tty, inst, "distance out of range (32bit)", 6); 1980 dump_code_range(tty, pc, 32, "distance out of range (32bit)"); 1981 guarantee(RelAddr::is_in_range_of_RelAddr32(distance), "too far away (more than +/- 2**32"); 1982 } 1983 return; 1984 } 1985 1986 guarantee(false, "not a pcrelative instruction to patch!"); 1987 } 1988 1989 // "Current PC" here means the address just behind the basr instruction. 1990 address MacroAssembler::get_PC(Register result) { 1991 z_basr(result, Z_R0); // Don't branch, just save next instruction address in result. 1992 return pc(); 1993 } 1994 1995 // Get current PC + offset. 1996 // Offset given in bytes, must be even! 1997 // "Current PC" here means the address of the larl instruction plus the given offset. 1998 address MacroAssembler::get_PC(Register result, int64_t offset) { 1999 address here = pc(); 2000 z_larl(result, offset/2); // Save target instruction address in result. 2001 return here + offset; 2002 } 2003 2004 // Resize_frame with SP(new) = SP(old) - [offset]. 2005 void MacroAssembler::resize_frame_sub(Register offset, Register fp, bool load_fp) 2006 { 2007 assert_different_registers(offset, fp, Z_SP); 2008 if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); } 2009 2010 z_sgr(Z_SP, offset); 2011 z_stg(fp, _z_abi(callers_sp), Z_SP); 2012 } 2013 2014 // Resize_frame with SP(new) = [addr]. 2015 void MacroAssembler::resize_frame_absolute(Register addr, Register fp, bool load_fp) { 2016 assert_different_registers(addr, fp, Z_SP); 2017 if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); } 2018 2019 if (addr != Z_R0) { 2020 // Minimize stalls by not using Z_SP immediately after update. 2021 z_stg(fp, _z_abi(callers_sp), addr); 2022 z_lgr(Z_SP, addr); 2023 } else { 2024 z_lgr(Z_SP, addr); 2025 z_stg(fp, _z_abi(callers_sp), Z_SP); 2026 } 2027 } 2028 2029 // Resize_frame with SP(new) = SP(old) + offset. 2030 void MacroAssembler::resize_frame(RegisterOrConstant offset, Register fp, bool load_fp) { 2031 assert_different_registers(fp, Z_SP); 2032 if (load_fp) z_lg(fp, _z_abi(callers_sp), Z_SP); 2033 2034 if (Displacement::is_validDisp((int)_z_abi(callers_sp) + offset.constant_or_zero())) { 2035 // Minimize stalls by first using, then updating Z_SP. 2036 // Do that only if we have a small positive offset or if ExtImm are available. 2037 z_stg(fp, Address(Z_SP, offset, _z_abi(callers_sp))); 2038 add64(Z_SP, offset); 2039 } else { 2040 add64(Z_SP, offset); 2041 z_stg(fp, _z_abi(callers_sp), Z_SP); 2042 } 2043 } 2044 2045 void MacroAssembler::push_frame(Register bytes, Register old_sp, bool copy_sp, bool bytes_with_inverted_sign) { 2046 #ifdef ASSERT 2047 assert_different_registers(bytes, old_sp, Z_SP); 2048 if (!copy_sp) { 2049 z_cgr(old_sp, Z_SP); 2050 asm_assert_eq("[old_sp]!=[Z_SP]", 0x211); 2051 } 2052 #endif 2053 if (copy_sp) { z_lgr(old_sp, Z_SP); } 2054 if (bytes_with_inverted_sign) { 2055 z_stg(old_sp, 0, bytes, Z_SP); 2056 add2reg_with_index(Z_SP, 0, bytes, Z_SP); 2057 } else { 2058 z_sgr(Z_SP, bytes); // Z_sgfr sufficient, but probably not faster. 2059 z_stg(old_sp, 0, Z_SP); 2060 } 2061 } 2062 2063 unsigned int MacroAssembler::push_frame(unsigned int bytes, Register scratch) { 2064 long offset = Assembler::align(bytes, frame::alignment_in_bytes); 2065 2066 if (Displacement::is_validDisp(-offset)) { 2067 // Minimize stalls by first using, then updating Z_SP. 2068 // Do that only if we have ExtImm available. 2069 z_stg(Z_SP, -offset, Z_SP); 2070 add2reg(Z_SP, -offset); 2071 } else { 2072 if (scratch != Z_R0 && scratch != Z_R1) { 2073 z_stg(Z_SP, -offset, Z_SP); 2074 add2reg(Z_SP, -offset); 2075 } else { // scratch == Z_R0 || scratch == Z_R1 2076 z_lgr(scratch, Z_SP); 2077 add2reg(Z_SP, -offset); 2078 z_stg(scratch, 0, Z_SP); 2079 } 2080 } 2081 return offset; 2082 } 2083 2084 // Push a frame of size `bytes' plus abi160 on top. 2085 unsigned int MacroAssembler::push_frame_abi160(unsigned int bytes) { 2086 BLOCK_COMMENT("push_frame_abi160 {"); 2087 unsigned int res = push_frame(bytes + frame::z_abi_160_size); 2088 BLOCK_COMMENT("} push_frame_abi160"); 2089 return res; 2090 } 2091 2092 // Pop current C frame. 2093 void MacroAssembler::pop_frame() { 2094 BLOCK_COMMENT("pop_frame:"); 2095 Assembler::z_lg(Z_SP, _z_abi(callers_sp), Z_SP); 2096 } 2097 2098 void MacroAssembler::call_VM_leaf_base(address entry_point, bool allow_relocation) { 2099 if (allow_relocation) { 2100 call_c(entry_point); 2101 } else { 2102 call_c_static(entry_point); 2103 } 2104 } 2105 2106 void MacroAssembler::call_VM_leaf_base(address entry_point) { 2107 bool allow_relocation = true; 2108 call_VM_leaf_base(entry_point, allow_relocation); 2109 } 2110 2111 void MacroAssembler::call_VM_base(Register oop_result, 2112 Register last_java_sp, 2113 address entry_point, 2114 bool allow_relocation, 2115 bool check_exceptions) { // Defaults to true. 2116 // Allow_relocation indicates, if true, that the generated code shall 2117 // be fit for code relocation or referenced data relocation. In other 2118 // words: all addresses must be considered variable. PC-relative addressing 2119 // is not possible then. 2120 // On the other hand, if (allow_relocation == false), addresses and offsets 2121 // may be considered stable, enabling us to take advantage of some PC-relative 2122 // addressing tweaks. These might improve performance and reduce code size. 2123 2124 // Determine last_java_sp register. 2125 if (!last_java_sp->is_valid()) { 2126 last_java_sp = Z_SP; // Load Z_SP as SP. 2127 } 2128 2129 set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, Z_R1, allow_relocation); 2130 2131 // ARG1 must hold thread address. 2132 z_lgr(Z_ARG1, Z_thread); 2133 2134 address return_pc = NULL; 2135 if (allow_relocation) { 2136 return_pc = call_c(entry_point); 2137 } else { 2138 return_pc = call_c_static(entry_point); 2139 } 2140 2141 reset_last_Java_frame(allow_relocation); 2142 2143 // C++ interp handles this in the interpreter. 2144 check_and_handle_popframe(Z_thread); 2145 check_and_handle_earlyret(Z_thread); 2146 2147 // Check for pending exceptions. 2148 if (check_exceptions) { 2149 // Check for pending exceptions (java_thread is set upon return). 2150 load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset())); 2151 2152 // This used to conditionally jump to forward_exception however it is 2153 // possible if we relocate that the branch will not reach. So we must jump 2154 // around so we can always reach. 2155 2156 Label ok; 2157 z_bre(ok); // Bcondequal is the same as bcondZero. 2158 call_stub(StubRoutines::forward_exception_entry()); 2159 bind(ok); 2160 } 2161 2162 // Get oop result if there is one and reset the value in the thread. 2163 if (oop_result->is_valid()) { 2164 get_vm_result(oop_result); 2165 } 2166 2167 _last_calls_return_pc = return_pc; // Wipe out other (error handling) calls. 2168 } 2169 2170 void MacroAssembler::call_VM_base(Register oop_result, 2171 Register last_java_sp, 2172 address entry_point, 2173 bool check_exceptions) { // Defaults to true. 2174 bool allow_relocation = true; 2175 call_VM_base(oop_result, last_java_sp, entry_point, allow_relocation, check_exceptions); 2176 } 2177 2178 // VM calls without explicit last_java_sp. 2179 2180 void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) { 2181 // Call takes possible detour via InterpreterMacroAssembler. 2182 call_VM_base(oop_result, noreg, entry_point, true, check_exceptions); 2183 } 2184 2185 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 2186 // Z_ARG1 is reserved for the thread. 2187 lgr_if_needed(Z_ARG2, arg_1); 2188 call_VM(oop_result, entry_point, check_exceptions); 2189 } 2190 2191 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 2192 // Z_ARG1 is reserved for the thread. 2193 lgr_if_needed(Z_ARG2, arg_1); 2194 assert(arg_2 != Z_ARG2, "smashed argument"); 2195 lgr_if_needed(Z_ARG3, arg_2); 2196 call_VM(oop_result, entry_point, check_exceptions); 2197 } 2198 2199 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, 2200 Register arg_3, bool check_exceptions) { 2201 // Z_ARG1 is reserved for the thread. 2202 lgr_if_needed(Z_ARG2, arg_1); 2203 assert(arg_2 != Z_ARG2, "smashed argument"); 2204 lgr_if_needed(Z_ARG3, arg_2); 2205 assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument"); 2206 lgr_if_needed(Z_ARG4, arg_3); 2207 call_VM(oop_result, entry_point, check_exceptions); 2208 } 2209 2210 // VM static calls without explicit last_java_sp. 2211 2212 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, bool check_exceptions) { 2213 // Call takes possible detour via InterpreterMacroAssembler. 2214 call_VM_base(oop_result, noreg, entry_point, false, check_exceptions); 2215 } 2216 2217 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2, 2218 Register arg_3, bool check_exceptions) { 2219 // Z_ARG1 is reserved for the thread. 2220 lgr_if_needed(Z_ARG2, arg_1); 2221 assert(arg_2 != Z_ARG2, "smashed argument"); 2222 lgr_if_needed(Z_ARG3, arg_2); 2223 assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument"); 2224 lgr_if_needed(Z_ARG4, arg_3); 2225 call_VM_static(oop_result, entry_point, check_exceptions); 2226 } 2227 2228 // VM calls with explicit last_java_sp. 2229 2230 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions) { 2231 // Call takes possible detour via InterpreterMacroAssembler. 2232 call_VM_base(oop_result, last_java_sp, entry_point, true, check_exceptions); 2233 } 2234 2235 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { 2236 // Z_ARG1 is reserved for the thread. 2237 lgr_if_needed(Z_ARG2, arg_1); 2238 call_VM(oop_result, last_java_sp, entry_point, check_exceptions); 2239 } 2240 2241 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, 2242 Register arg_2, bool check_exceptions) { 2243 // Z_ARG1 is reserved for the thread. 2244 lgr_if_needed(Z_ARG2, arg_1); 2245 assert(arg_2 != Z_ARG2, "smashed argument"); 2246 lgr_if_needed(Z_ARG3, arg_2); 2247 call_VM(oop_result, last_java_sp, entry_point, check_exceptions); 2248 } 2249 2250 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, 2251 Register arg_2, Register arg_3, bool check_exceptions) { 2252 // Z_ARG1 is reserved for the thread. 2253 lgr_if_needed(Z_ARG2, arg_1); 2254 assert(arg_2 != Z_ARG2, "smashed argument"); 2255 lgr_if_needed(Z_ARG3, arg_2); 2256 assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument"); 2257 lgr_if_needed(Z_ARG4, arg_3); 2258 call_VM(oop_result, last_java_sp, entry_point, check_exceptions); 2259 } 2260 2261 // VM leaf calls. 2262 2263 void MacroAssembler::call_VM_leaf(address entry_point) { 2264 // Call takes possible detour via InterpreterMacroAssembler. 2265 call_VM_leaf_base(entry_point, true); 2266 } 2267 2268 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) { 2269 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2270 call_VM_leaf(entry_point); 2271 } 2272 2273 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) { 2274 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2275 assert(arg_2 != Z_ARG1, "smashed argument"); 2276 if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); 2277 call_VM_leaf(entry_point); 2278 } 2279 2280 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) { 2281 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2282 assert(arg_2 != Z_ARG1, "smashed argument"); 2283 if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); 2284 assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument"); 2285 if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3); 2286 call_VM_leaf(entry_point); 2287 } 2288 2289 // Static VM leaf calls. 2290 // Really static VM leaf calls are never patched. 2291 2292 void MacroAssembler::call_VM_leaf_static(address entry_point) { 2293 // Call takes possible detour via InterpreterMacroAssembler. 2294 call_VM_leaf_base(entry_point, false); 2295 } 2296 2297 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1) { 2298 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2299 call_VM_leaf_static(entry_point); 2300 } 2301 2302 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2) { 2303 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2304 assert(arg_2 != Z_ARG1, "smashed argument"); 2305 if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); 2306 call_VM_leaf_static(entry_point); 2307 } 2308 2309 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3) { 2310 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2311 assert(arg_2 != Z_ARG1, "smashed argument"); 2312 if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); 2313 assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument"); 2314 if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3); 2315 call_VM_leaf_static(entry_point); 2316 } 2317 2318 // Don't use detour via call_c(reg). 2319 address MacroAssembler::call_c(address function_entry) { 2320 load_const(Z_R1, function_entry); 2321 return call(Z_R1); 2322 } 2323 2324 // Variant for really static (non-relocatable) calls which are never patched. 2325 address MacroAssembler::call_c_static(address function_entry) { 2326 load_absolute_address(Z_R1, function_entry); 2327 #if 0 // def ASSERT 2328 // Verify that call site did not move. 2329 load_const_optimized(Z_R0, function_entry); 2330 z_cgr(Z_R1, Z_R0); 2331 z_brc(bcondEqual, 3); 2332 z_illtrap(0xba); 2333 #endif 2334 return call(Z_R1); 2335 } 2336 2337 address MacroAssembler::call_c_opt(address function_entry) { 2338 bool success = call_far_patchable(function_entry, -2 /* emit relocation + constant */); 2339 _last_calls_return_pc = success ? pc() : NULL; 2340 return _last_calls_return_pc; 2341 } 2342 2343 // Identify a call_far_patchable instruction: LARL + LG + BASR 2344 // 2345 // nop ; optionally, if required for alignment 2346 // lgrl rx,A(TOC entry) ; PC-relative access into constant pool 2347 // basr Z_R14,rx ; end of this instruction must be aligned to a word boundary 2348 // 2349 // Code pattern will eventually get patched into variant2 (see below for detection code). 2350 // 2351 bool MacroAssembler::is_call_far_patchable_variant0_at(address instruction_addr) { 2352 address iaddr = instruction_addr; 2353 2354 // Check for the actual load instruction. 2355 if (!is_load_const_from_toc(iaddr)) { return false; } 2356 iaddr += load_const_from_toc_size(); 2357 2358 // Check for the call (BASR) instruction, finally. 2359 assert(iaddr-instruction_addr+call_byregister_size() == call_far_patchable_size(), "size mismatch"); 2360 return is_call_byregister(iaddr); 2361 } 2362 2363 // Identify a call_far_patchable instruction: BRASL 2364 // 2365 // Code pattern to suits atomic patching: 2366 // nop ; Optionally, if required for alignment. 2367 // nop ... ; Multiple filler nops to compensate for size difference (variant0 is longer). 2368 // nop ; For code pattern detection: Prepend each BRASL with a nop. 2369 // brasl Z_R14,<reladdr> ; End of code must be 4-byte aligned ! 2370 bool MacroAssembler::is_call_far_patchable_variant2_at(address instruction_addr) { 2371 const address call_addr = (address)((intptr_t)instruction_addr + call_far_patchable_size() - call_far_pcrelative_size()); 2372 2373 // Check for correct number of leading nops. 2374 address iaddr; 2375 for (iaddr = instruction_addr; iaddr < call_addr; iaddr += nop_size()) { 2376 if (!is_z_nop(iaddr)) { return false; } 2377 } 2378 assert(iaddr == call_addr, "sanity"); 2379 2380 // --> Check for call instruction. 2381 if (is_call_far_pcrelative(call_addr)) { 2382 assert(call_addr-instruction_addr+call_far_pcrelative_size() == call_far_patchable_size(), "size mismatch"); 2383 return true; 2384 } 2385 2386 return false; 2387 } 2388 2389 // Emit a NOT mt-safely patchable 64 bit absolute call. 2390 // If toc_offset == -2, then the destination of the call (= target) is emitted 2391 // to the constant pool and a runtime_call relocation is added 2392 // to the code buffer. 2393 // If toc_offset != -2, target must already be in the constant pool at 2394 // _ctableStart+toc_offset (a caller can retrieve toc_offset 2395 // from the runtime_call relocation). 2396 // Special handling of emitting to scratch buffer when there is no constant pool. 2397 // Slightly changed code pattern. We emit an additional nop if we would 2398 // not end emitting at a word aligned address. This is to ensure 2399 // an atomically patchable displacement in brasl instructions. 2400 // 2401 // A call_far_patchable comes in different flavors: 2402 // - LARL(CP) / LG(CP) / BR (address in constant pool, access via CP register) 2403 // - LGRL(CP) / BR (address in constant pool, pc-relative accesss) 2404 // - BRASL (relative address of call target coded in instruction) 2405 // All flavors occupy the same amount of space. Length differences are compensated 2406 // by leading nops, such that the instruction sequence always ends at the same 2407 // byte offset. This is required to keep the return offset constant. 2408 // Furthermore, the return address (the end of the instruction sequence) is forced 2409 // to be on a 4-byte boundary. This is required for atomic patching, should we ever 2410 // need to patch the call target of the BRASL flavor. 2411 // RETURN value: false, if no constant pool entry could be allocated, true otherwise. 2412 bool MacroAssembler::call_far_patchable(address target, int64_t tocOffset) { 2413 // Get current pc and ensure word alignment for end of instr sequence. 2414 const address start_pc = pc(); 2415 const intptr_t start_off = offset(); 2416 assert(!call_far_patchable_requires_alignment_nop(start_pc), "call_far_patchable requires aligned address"); 2417 const ptrdiff_t dist = (ptrdiff_t)(target - (start_pc + 2)); // Prepend each BRASL with a nop. 2418 const bool emit_target_to_pool = (tocOffset == -2) && !code_section()->scratch_emit(); 2419 const bool emit_relative_call = !emit_target_to_pool && 2420 RelAddr::is_in_range_of_RelAddr32(dist) && 2421 ReoptimizeCallSequences && 2422 !code_section()->scratch_emit(); 2423 2424 if (emit_relative_call) { 2425 // Add padding to get the same size as below. 2426 const unsigned int padding = call_far_patchable_size() - call_far_pcrelative_size(); 2427 unsigned int current_padding; 2428 for (current_padding = 0; current_padding < padding; current_padding += nop_size()) { z_nop(); } 2429 assert(current_padding == padding, "sanity"); 2430 2431 // relative call: len = 2(nop) + 6 (brasl) 2432 // CodeBlob resize cannot occur in this case because 2433 // this call is emitted into pre-existing space. 2434 z_nop(); // Prepend each BRASL with a nop. 2435 z_brasl(Z_R14, target); 2436 } else { 2437 // absolute call: Get address from TOC. 2438 // len = (load TOC){6|0} + (load from TOC){6} + (basr){2} = {14|8} 2439 if (emit_target_to_pool) { 2440 // When emitting the call for the first time, we do not need to use 2441 // the pc-relative version. It will be patched anyway, when the code 2442 // buffer is copied. 2443 // Relocation is not needed when !ReoptimizeCallSequences. 2444 relocInfo::relocType rt = ReoptimizeCallSequences ? relocInfo::runtime_call_w_cp_type : relocInfo::none; 2445 AddressLiteral dest(target, rt); 2446 // Store_oop_in_toc() adds dest to the constant table. As side effect, this kills 2447 // inst_mark(). Reset if possible. 2448 bool reset_mark = (inst_mark() == pc()); 2449 tocOffset = store_oop_in_toc(dest); 2450 if (reset_mark) { set_inst_mark(); } 2451 if (tocOffset == -1) { 2452 return false; // Couldn't create constant pool entry. 2453 } 2454 } 2455 assert(offset() == start_off, "emit no code before this point!"); 2456 2457 address tocPos = pc() + tocOffset; 2458 if (emit_target_to_pool) { 2459 tocPos = code()->consts()->start() + tocOffset; 2460 } 2461 load_long_pcrelative(Z_R14, tocPos); 2462 z_basr(Z_R14, Z_R14); 2463 } 2464 2465 #ifdef ASSERT 2466 // Assert that we can identify the emitted call. 2467 assert(is_call_far_patchable_at(addr_at(start_off)), "can't identify emitted call"); 2468 assert(offset() == start_off+call_far_patchable_size(), "wrong size"); 2469 2470 if (emit_target_to_pool) { 2471 assert(get_dest_of_call_far_patchable_at(addr_at(start_off), code()->consts()->start()) == target, 2472 "wrong encoding of dest address"); 2473 } 2474 #endif 2475 return true; // success 2476 } 2477 2478 // Identify a call_far_patchable instruction. 2479 // For more detailed information see header comment of call_far_patchable. 2480 bool MacroAssembler::is_call_far_patchable_at(address instruction_addr) { 2481 return is_call_far_patchable_variant2_at(instruction_addr) || // short version: BRASL 2482 is_call_far_patchable_variant0_at(instruction_addr); // long version LARL + LG + BASR 2483 } 2484 2485 // Does the call_far_patchable instruction use a pc-relative encoding 2486 // of the call destination? 2487 bool MacroAssembler::is_call_far_patchable_pcrelative_at(address instruction_addr) { 2488 // Variant 2 is pc-relative. 2489 return is_call_far_patchable_variant2_at(instruction_addr); 2490 } 2491 2492 bool MacroAssembler::is_call_far_pcrelative(address instruction_addr) { 2493 // Prepend each BRASL with a nop. 2494 return is_z_nop(instruction_addr) && is_z_brasl(instruction_addr + nop_size()); // Match at position after one nop required. 2495 } 2496 2497 // Set destination address of a call_far_patchable instruction. 2498 void MacroAssembler::set_dest_of_call_far_patchable_at(address instruction_addr, address dest, int64_t tocOffset) { 2499 ResourceMark rm; 2500 2501 // Now that CP entry is verified, patch call to a pc-relative call (if circumstances permit). 2502 int code_size = MacroAssembler::call_far_patchable_size(); 2503 CodeBuffer buf(instruction_addr, code_size); 2504 MacroAssembler masm(&buf); 2505 masm.call_far_patchable(dest, tocOffset); 2506 ICache::invalidate_range(instruction_addr, code_size); // Empty on z. 2507 } 2508 2509 // Get dest address of a call_far_patchable instruction. 2510 address MacroAssembler::get_dest_of_call_far_patchable_at(address instruction_addr, address ctable) { 2511 // Dynamic TOC: absolute address in constant pool. 2512 // Check variant2 first, it is more frequent. 2513 2514 // Relative address encoded in call instruction. 2515 if (is_call_far_patchable_variant2_at(instruction_addr)) { 2516 return MacroAssembler::get_target_addr_pcrel(instruction_addr + nop_size()); // Prepend each BRASL with a nop. 2517 2518 // Absolute address in constant pool. 2519 } else if (is_call_far_patchable_variant0_at(instruction_addr)) { 2520 address iaddr = instruction_addr; 2521 2522 long tocOffset = get_load_const_from_toc_offset(iaddr); 2523 address tocLoc = iaddr + tocOffset; 2524 return *(address *)(tocLoc); 2525 } else { 2526 fprintf(stderr, "MacroAssembler::get_dest_of_call_far_patchable_at has a problem at %p:\n", instruction_addr); 2527 fprintf(stderr, "not a call_far_patchable: %16.16lx %16.16lx, len = %d\n", 2528 *(unsigned long*)instruction_addr, 2529 *(unsigned long*)(instruction_addr+8), 2530 call_far_patchable_size()); 2531 Disassembler::decode(instruction_addr, instruction_addr+call_far_patchable_size()); 2532 ShouldNotReachHere(); 2533 return NULL; 2534 } 2535 } 2536 2537 void MacroAssembler::align_call_far_patchable(address pc) { 2538 if (call_far_patchable_requires_alignment_nop(pc)) { z_nop(); } 2539 } 2540 2541 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { 2542 } 2543 2544 void MacroAssembler::check_and_handle_popframe(Register java_thread) { 2545 } 2546 2547 // Read from the polling page. 2548 // Use TM or TMY instruction, depending on read offset. 2549 // offset = 0: Use TM, safepoint polling. 2550 // offset < 0: Use TMY, profiling safepoint polling. 2551 void MacroAssembler::load_from_polling_page(Register polling_page_address, int64_t offset) { 2552 if (Immediate::is_uimm12(offset)) { 2553 z_tm(offset, polling_page_address, mask_safepoint); 2554 } else { 2555 z_tmy(offset, polling_page_address, mask_profiling); 2556 } 2557 } 2558 2559 // Check whether z_instruction is a read access to the polling page 2560 // which was emitted by load_from_polling_page(..). 2561 bool MacroAssembler::is_load_from_polling_page(address instr_loc) { 2562 unsigned long z_instruction; 2563 unsigned int ilen = get_instruction(instr_loc, &z_instruction); 2564 2565 if (ilen == 2) { return false; } // It's none of the allowed instructions. 2566 2567 if (ilen == 4) { 2568 if (!is_z_tm(z_instruction)) { return false; } // It's len=4, but not a z_tm. fail. 2569 2570 int ms = inv_mask(z_instruction,8,32); // mask 2571 int ra = inv_reg(z_instruction,16,32); // base register 2572 int ds = inv_uimm12(z_instruction); // displacement 2573 2574 if (!(ds == 0 && ra != 0 && ms == mask_safepoint)) { 2575 return false; // It's not a z_tm(0, ra, mask_safepoint). Fail. 2576 } 2577 2578 } else { /* if (ilen == 6) */ 2579 2580 assert(!is_z_lg(z_instruction), "old form (LG) polling page access. Please fix and use TM(Y)."); 2581 2582 if (!is_z_tmy(z_instruction)) { return false; } // It's len=6, but not a z_tmy. fail. 2583 2584 int ms = inv_mask(z_instruction,8,48); // mask 2585 int ra = inv_reg(z_instruction,16,48); // base register 2586 int ds = inv_simm20(z_instruction); // displacement 2587 } 2588 2589 return true; 2590 } 2591 2592 // Extract poll address from instruction and ucontext. 2593 address MacroAssembler::get_poll_address(address instr_loc, void* ucontext) { 2594 assert(ucontext != NULL, "must have ucontext"); 2595 ucontext_t* uc = (ucontext_t*) ucontext; 2596 unsigned long z_instruction; 2597 unsigned int ilen = get_instruction(instr_loc, &z_instruction); 2598 2599 if (ilen == 4 && is_z_tm(z_instruction)) { 2600 int ra = inv_reg(z_instruction, 16, 32); // base register 2601 int ds = inv_uimm12(z_instruction); // displacement 2602 address addr = (address)uc->uc_mcontext.gregs[ra]; 2603 return addr + ds; 2604 } else if (ilen == 6 && is_z_tmy(z_instruction)) { 2605 int ra = inv_reg(z_instruction, 16, 48); // base register 2606 int ds = inv_simm20(z_instruction); // displacement 2607 address addr = (address)uc->uc_mcontext.gregs[ra]; 2608 return addr + ds; 2609 } 2610 2611 ShouldNotReachHere(); 2612 return NULL; 2613 } 2614 2615 // Extract poll register from instruction. 2616 uint MacroAssembler::get_poll_register(address instr_loc) { 2617 unsigned long z_instruction; 2618 unsigned int ilen = get_instruction(instr_loc, &z_instruction); 2619 2620 if (ilen == 4 && is_z_tm(z_instruction)) { 2621 return (uint)inv_reg(z_instruction, 16, 32); // base register 2622 } else if (ilen == 6 && is_z_tmy(z_instruction)) { 2623 return (uint)inv_reg(z_instruction, 16, 48); // base register 2624 } 2625 2626 ShouldNotReachHere(); 2627 return 0; 2628 } 2629 2630 bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) { 2631 ShouldNotCallThis(); 2632 return false; 2633 } 2634 2635 // Write serialization page so VM thread can do a pseudo remote membar 2636 // We use the current thread pointer to calculate a thread specific 2637 // offset to write to within the page. This minimizes bus traffic 2638 // due to cache line collision. 2639 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { 2640 assert_different_registers(tmp1, tmp2); 2641 z_sllg(tmp2, thread, os::get_serialize_page_shift_count()); 2642 load_const_optimized(tmp1, (long) os::get_memory_serialize_page()); 2643 2644 int mask = os::get_serialize_page_mask(); 2645 if (Immediate::is_uimm16(mask)) { 2646 z_nill(tmp2, mask); 2647 z_llghr(tmp2, tmp2); 2648 } else { 2649 z_nilf(tmp2, mask); 2650 z_llgfr(tmp2, tmp2); 2651 } 2652 2653 z_release(); 2654 z_st(Z_R0, 0, tmp2, tmp1); 2655 } 2656 2657 // Don't rely on register locking, always use Z_R1 as scratch register instead. 2658 void MacroAssembler::bang_stack_with_offset(int offset) { 2659 // Stack grows down, caller passes positive offset. 2660 assert(offset > 0, "must bang with positive offset"); 2661 if (Displacement::is_validDisp(-offset)) { 2662 z_tmy(-offset, Z_SP, mask_stackbang); 2663 } else { 2664 add2reg(Z_R1, -offset, Z_SP); // Do not destroy Z_SP!!! 2665 z_tm(0, Z_R1, mask_stackbang); // Just banging. 2666 } 2667 } 2668 2669 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 2670 void MacroAssembler::tlab_allocate(Register obj, 2671 Register var_size_in_bytes, 2672 int con_size_in_bytes, 2673 Register t1, 2674 Label& slow_case) { 2675 assert_different_registers(obj, var_size_in_bytes, t1); 2676 Register end = t1; 2677 Register thread = Z_thread; 2678 2679 z_lg(obj, Address(thread, JavaThread::tlab_top_offset())); 2680 if (var_size_in_bytes == noreg) { 2681 z_lay(end, Address(obj, con_size_in_bytes)); 2682 } else { 2683 z_lay(end, Address(obj, var_size_in_bytes)); 2684 } 2685 z_cg(end, Address(thread, JavaThread::tlab_end_offset())); 2686 branch_optimized(bcondHigh, slow_case); 2687 2688 // Update the tlab top pointer. 2689 z_stg(end, Address(thread, JavaThread::tlab_top_offset())); 2690 2691 // Recover var_size_in_bytes if necessary. 2692 if (var_size_in_bytes == end) { 2693 z_sgr(var_size_in_bytes, obj); 2694 } 2695 } 2696 2697 // Emitter for interface method lookup. 2698 // input: recv_klass, intf_klass, itable_index 2699 // output: method_result 2700 // kills: itable_index, temp1_reg, Z_R0, Z_R1 2701 // TODO: Temp2_reg is unused. we may use this emitter also in the itable stubs. 2702 // If the register is still not needed then, remove it. 2703 void MacroAssembler::lookup_interface_method(Register recv_klass, 2704 Register intf_klass, 2705 RegisterOrConstant itable_index, 2706 Register method_result, 2707 Register temp1_reg, 2708 Register temp2_reg, 2709 Label& no_such_interface) { 2710 2711 const Register vtable_len = temp1_reg; // Used to compute itable_entry_addr. 2712 const Register itable_entry_addr = Z_R1_scratch; 2713 const Register itable_interface = Z_R0_scratch; 2714 2715 BLOCK_COMMENT("lookup_interface_method {"); 2716 2717 // Load start of itable entries into itable_entry_addr. 2718 z_llgf(vtable_len, Address(recv_klass, InstanceKlass::vtable_length_offset())); 2719 z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes())); 2720 2721 // Loop over all itable entries until desired interfaceOop(Rinterface) found. 2722 const int vtable_base_offset = in_bytes(InstanceKlass::vtable_start_offset()); 2723 2724 add2reg_with_index(itable_entry_addr, 2725 vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(), 2726 recv_klass, vtable_len); 2727 2728 const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize; 2729 Label search; 2730 2731 bind(search); 2732 2733 // Handle IncompatibleClassChangeError. 2734 // If the entry is NULL then we've reached the end of the table 2735 // without finding the expected interface, so throw an exception. 2736 load_and_test_long(itable_interface, Address(itable_entry_addr)); 2737 z_bre(no_such_interface); 2738 2739 add2reg(itable_entry_addr, itable_offset_search_inc); 2740 z_cgr(itable_interface, intf_klass); 2741 z_brne(search); 2742 2743 // Entry found and itable_entry_addr points to it, get offset of vtable for interface. 2744 2745 const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() - 2746 itableOffsetEntry::interface_offset_in_bytes()) - 2747 itable_offset_search_inc; 2748 2749 // Compute itableMethodEntry and get method and entry point 2750 // we use addressing with index and displacement, since the formula 2751 // for computing the entry's offset has a fixed and a dynamic part, 2752 // the latter depending on the matched interface entry and on the case, 2753 // that the itable index has been passed as a register, not a constant value. 2754 int method_offset = itableMethodEntry::method_offset_in_bytes(); 2755 // Fixed part (displacement), common operand. 2756 Register itable_offset; // Dynamic part (index register). 2757 2758 if (itable_index.is_register()) { 2759 // Compute the method's offset in that register, for the formula, see the 2760 // else-clause below. 2761 itable_offset = itable_index.as_register(); 2762 2763 z_sllg(itable_offset, itable_offset, exact_log2(itableMethodEntry::size() * wordSize)); 2764 z_agf(itable_offset, vtable_offset_offset, itable_entry_addr); 2765 } else { 2766 itable_offset = Z_R1_scratch; 2767 // Displacement increases. 2768 method_offset += itableMethodEntry::size() * wordSize * itable_index.as_constant(); 2769 2770 // Load index from itable. 2771 z_llgf(itable_offset, vtable_offset_offset, itable_entry_addr); 2772 } 2773 2774 // Finally load the method's oop. 2775 z_lg(method_result, method_offset, itable_offset, recv_klass); 2776 BLOCK_COMMENT("} lookup_interface_method"); 2777 } 2778 2779 // Lookup for virtual method invocation. 2780 void MacroAssembler::lookup_virtual_method(Register recv_klass, 2781 RegisterOrConstant vtable_index, 2782 Register method_result) { 2783 assert_different_registers(recv_klass, vtable_index.register_or_noreg()); 2784 assert(vtableEntry::size() * wordSize == wordSize, 2785 "else adjust the scaling in the code below"); 2786 2787 BLOCK_COMMENT("lookup_virtual_method {"); 2788 2789 const int base = in_bytes(Klass::vtable_start_offset()); 2790 2791 if (vtable_index.is_constant()) { 2792 // Load with base + disp. 2793 Address vtable_entry_addr(recv_klass, 2794 vtable_index.as_constant() * wordSize + 2795 base + 2796 vtableEntry::method_offset_in_bytes()); 2797 2798 z_lg(method_result, vtable_entry_addr); 2799 } else { 2800 // Shift index properly and load with base + index + disp. 2801 Register vindex = vtable_index.as_register(); 2802 Address vtable_entry_addr(recv_klass, vindex, 2803 base + vtableEntry::method_offset_in_bytes()); 2804 2805 z_sllg(vindex, vindex, exact_log2(wordSize)); 2806 z_lg(method_result, vtable_entry_addr); 2807 } 2808 BLOCK_COMMENT("} lookup_virtual_method"); 2809 } 2810 2811 // Factor out code to call ic_miss_handler. 2812 // Generate code to call the inline cache miss handler. 2813 // 2814 // In most cases, this code will be generated out-of-line. 2815 // The method parameters are intended to provide some variability. 2816 // ICM - Label which has to be bound to the start of useful code (past any traps). 2817 // trapMarker - Marking byte for the generated illtrap instructions (if any). 2818 // Any value except 0x00 is supported. 2819 // = 0x00 - do not generate illtrap instructions. 2820 // use nops to fill ununsed space. 2821 // requiredSize - required size of the generated code. If the actually 2822 // generated code is smaller, use padding instructions to fill up. 2823 // = 0 - no size requirement, no padding. 2824 // scratch - scratch register to hold branch target address. 2825 // 2826 // The method returns the code offset of the bound label. 2827 unsigned int MacroAssembler::call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch) { 2828 intptr_t startOffset = offset(); 2829 2830 // Prevent entry at content_begin(). 2831 if (trapMarker != 0) { 2832 z_illtrap(trapMarker); 2833 } 2834 2835 // Load address of inline cache miss code into scratch register 2836 // and branch to cache miss handler. 2837 BLOCK_COMMENT("IC miss handler {"); 2838 BIND(ICM); 2839 unsigned int labelOffset = offset(); 2840 AddressLiteral icmiss(SharedRuntime::get_ic_miss_stub()); 2841 2842 load_const_optimized(scratch, icmiss); 2843 z_br(scratch); 2844 2845 // Fill unused space. 2846 if (requiredSize > 0) { 2847 while ((offset() - startOffset) < requiredSize) { 2848 if (trapMarker == 0) { 2849 z_nop(); 2850 } else { 2851 z_illtrap(trapMarker); 2852 } 2853 } 2854 } 2855 BLOCK_COMMENT("} IC miss handler"); 2856 return labelOffset; 2857 } 2858 2859 void MacroAssembler::nmethod_UEP(Label& ic_miss) { 2860 Register ic_reg = as_Register(Matcher::inline_cache_reg_encode()); 2861 int klass_offset = oopDesc::klass_offset_in_bytes(); 2862 if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) { 2863 if (VM_Version::has_CompareBranch()) { 2864 z_cgij(Z_ARG1, 0, Assembler::bcondEqual, ic_miss); 2865 } else { 2866 z_ltgr(Z_ARG1, Z_ARG1); 2867 z_bre(ic_miss); 2868 } 2869 } 2870 // Compare cached class against klass from receiver. 2871 compare_klass_ptr(ic_reg, klass_offset, Z_ARG1, false); 2872 z_brne(ic_miss); 2873 } 2874 2875 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 2876 Register super_klass, 2877 Register temp1_reg, 2878 Label* L_success, 2879 Label* L_failure, 2880 Label* L_slow_path, 2881 RegisterOrConstant super_check_offset) { 2882 2883 const int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2884 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2885 2886 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 2887 bool need_slow_path = (must_load_sco || 2888 super_check_offset.constant_or_zero() == sc_offset); 2889 2890 // Input registers must not overlap. 2891 assert_different_registers(sub_klass, super_klass, temp1_reg); 2892 if (super_check_offset.is_register()) { 2893 assert_different_registers(sub_klass, super_klass, 2894 super_check_offset.as_register()); 2895 } else if (must_load_sco) { 2896 assert(temp1_reg != noreg, "supply either a temp or a register offset"); 2897 } 2898 2899 const Register Rsuper_check_offset = temp1_reg; 2900 2901 NearLabel L_fallthrough; 2902 int label_nulls = 0; 2903 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2904 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2905 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 2906 assert(label_nulls <= 1 || 2907 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), 2908 "at most one NULL in the batch, usually"); 2909 2910 BLOCK_COMMENT("check_klass_subtype_fast_path {"); 2911 // If the pointers are equal, we are done (e.g., String[] elements). 2912 // This self-check enables sharing of secondary supertype arrays among 2913 // non-primary types such as array-of-interface. Otherwise, each such 2914 // type would need its own customized SSA. 2915 // We move this check to the front of the fast path because many 2916 // type checks are in fact trivially successful in this manner, 2917 // so we get a nicely predicted branch right at the start of the check. 2918 compare64_and_branch(sub_klass, super_klass, bcondEqual, *L_success); 2919 2920 // Check the supertype display, which is uint. 2921 if (must_load_sco) { 2922 z_llgf(Rsuper_check_offset, sco_offset, super_klass); 2923 super_check_offset = RegisterOrConstant(Rsuper_check_offset); 2924 } 2925 Address super_check_addr(sub_klass, super_check_offset, 0); 2926 z_cg(super_klass, super_check_addr); // compare w/ displayed supertype 2927 2928 // This check has worked decisively for primary supers. 2929 // Secondary supers are sought in the super_cache ('super_cache_addr'). 2930 // (Secondary supers are interfaces and very deeply nested subtypes.) 2931 // This works in the same check above because of a tricky aliasing 2932 // between the super_cache and the primary super display elements. 2933 // (The 'super_check_addr' can address either, as the case requires.) 2934 // Note that the cache is updated below if it does not help us find 2935 // what we need immediately. 2936 // So if it was a primary super, we can just fail immediately. 2937 // Otherwise, it's the slow path for us (no success at this point). 2938 2939 // Hacked jmp, which may only be used just before L_fallthrough. 2940 #define final_jmp(label) \ 2941 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 2942 else { branch_optimized(Assembler::bcondAlways, label); } /*omit semicolon*/ 2943 2944 if (super_check_offset.is_register()) { 2945 branch_optimized(Assembler::bcondEqual, *L_success); 2946 z_cfi(super_check_offset.as_register(), sc_offset); 2947 if (L_failure == &L_fallthrough) { 2948 branch_optimized(Assembler::bcondEqual, *L_slow_path); 2949 } else { 2950 branch_optimized(Assembler::bcondNotEqual, *L_failure); 2951 final_jmp(*L_slow_path); 2952 } 2953 } else if (super_check_offset.as_constant() == sc_offset) { 2954 // Need a slow path; fast failure is impossible. 2955 if (L_slow_path == &L_fallthrough) { 2956 branch_optimized(Assembler::bcondEqual, *L_success); 2957 } else { 2958 branch_optimized(Assembler::bcondNotEqual, *L_slow_path); 2959 final_jmp(*L_success); 2960 } 2961 } else { 2962 // No slow path; it's a fast decision. 2963 if (L_failure == &L_fallthrough) { 2964 branch_optimized(Assembler::bcondEqual, *L_success); 2965 } else { 2966 branch_optimized(Assembler::bcondNotEqual, *L_failure); 2967 final_jmp(*L_success); 2968 } 2969 } 2970 2971 bind(L_fallthrough); 2972 #undef local_brc 2973 #undef final_jmp 2974 BLOCK_COMMENT("} check_klass_subtype_fast_path"); 2975 // fallthru (to slow path) 2976 } 2977 2978 void MacroAssembler::check_klass_subtype_slow_path(Register Rsubklass, 2979 Register Rsuperklass, 2980 Register Rarray_ptr, // tmp 2981 Register Rlength, // tmp 2982 Label* L_success, 2983 Label* L_failure) { 2984 // Input registers must not overlap. 2985 // Also check for R1 which is explicitely used here. 2986 assert_different_registers(Z_R1, Rsubklass, Rsuperklass, Rarray_ptr, Rlength); 2987 NearLabel L_fallthrough, L_loop; 2988 int label_nulls = 0; 2989 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2990 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2991 assert(label_nulls <= 1, "at most one NULL in the batch"); 2992 2993 const int ss_offset = in_bytes(Klass::secondary_supers_offset()); 2994 const int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2995 2996 const int length_offset = Array<Klass*>::length_offset_in_bytes(); 2997 const int base_offset = Array<Klass*>::base_offset_in_bytes(); 2998 2999 // Hacked jmp, which may only be used just before L_fallthrough. 3000 #define final_jmp(label) \ 3001 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 3002 else branch_optimized(Assembler::bcondAlways, label) /*omit semicolon*/ 3003 3004 NearLabel loop_iterate, loop_count, match; 3005 3006 BLOCK_COMMENT("check_klass_subtype_slow_path {"); 3007 z_lg(Rarray_ptr, ss_offset, Rsubklass); 3008 3009 load_and_test_int(Rlength, Address(Rarray_ptr, length_offset)); 3010 branch_optimized(Assembler::bcondZero, *L_failure); 3011 3012 // Oops in table are NO MORE compressed. 3013 z_cg(Rsuperklass, base_offset, Rarray_ptr); // Check array element for match. 3014 z_bre(match); // Shortcut for array length = 1. 3015 3016 // No match yet, so we must walk the array's elements. 3017 z_lngfr(Rlength, Rlength); 3018 z_sllg(Rlength, Rlength, LogBytesPerWord); // -#bytes of cache array 3019 z_llill(Z_R1, BytesPerWord); // Set increment/end index. 3020 add2reg(Rlength, 2 * BytesPerWord); // start index = -(n-2)*BytesPerWord 3021 z_slgr(Rarray_ptr, Rlength); // start addr: += (n-2)*BytesPerWord 3022 z_bru(loop_count); 3023 3024 BIND(loop_iterate); 3025 z_cg(Rsuperklass, base_offset, Rlength, Rarray_ptr); // Check array element for match. 3026 z_bre(match); 3027 BIND(loop_count); 3028 z_brxlg(Rlength, Z_R1, loop_iterate); 3029 3030 // Rsuperklass not found among secondary super classes -> failure. 3031 branch_optimized(Assembler::bcondAlways, *L_failure); 3032 3033 // Got a hit. Return success (zero result). Set cache. 3034 // Cache load doesn't happen here. For speed it is directly emitted by the compiler. 3035 3036 BIND(match); 3037 3038 z_stg(Rsuperklass, sc_offset, Rsubklass); // Save result to cache. 3039 3040 final_jmp(*L_success); 3041 3042 // Exit to the surrounding code. 3043 BIND(L_fallthrough); 3044 #undef local_brc 3045 #undef final_jmp 3046 BLOCK_COMMENT("} check_klass_subtype_slow_path"); 3047 } 3048 3049 // Emitter for combining fast and slow path. 3050 void MacroAssembler::check_klass_subtype(Register sub_klass, 3051 Register super_klass, 3052 Register temp1_reg, 3053 Register temp2_reg, 3054 Label& L_success) { 3055 NearLabel failure; 3056 BLOCK_COMMENT(err_msg("check_klass_subtype(%s subclass of %s) {", sub_klass->name(), super_klass->name())); 3057 check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg, 3058 &L_success, &failure, NULL); 3059 check_klass_subtype_slow_path(sub_klass, super_klass, 3060 temp1_reg, temp2_reg, &L_success, NULL); 3061 BIND(failure); 3062 BLOCK_COMMENT("} check_klass_subtype"); 3063 } 3064 3065 // Increment a counter at counter_address when the eq condition code is 3066 // set. Kills registers tmp1_reg and tmp2_reg and preserves the condition code. 3067 void MacroAssembler::increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg) { 3068 Label l; 3069 z_brne(l); 3070 load_const(tmp1_reg, counter_address); 3071 add2mem_32(Address(tmp1_reg), 1, tmp2_reg); 3072 z_cr(tmp1_reg, tmp1_reg); // Set cc to eq. 3073 bind(l); 3074 } 3075 3076 // Semantics are dependent on the slow_case label: 3077 // If the slow_case label is not NULL, failure to biased-lock the object 3078 // transfers control to the location of the slow_case label. If the 3079 // object could be biased-locked, control is transferred to the done label. 3080 // The condition code is unpredictable. 3081 // 3082 // If the slow_case label is NULL, failure to biased-lock the object results 3083 // in a transfer of control to the done label with a condition code of not_equal. 3084 // If the biased-lock could be successfully obtained, control is transfered to 3085 // the done label with a condition code of equal. 3086 // It is mandatory to react on the condition code At the done label. 3087 // 3088 void MacroAssembler::biased_locking_enter(Register obj_reg, 3089 Register mark_reg, 3090 Register temp_reg, 3091 Register temp2_reg, // May be Z_RO! 3092 Label &done, 3093 Label *slow_case) { 3094 assert(UseBiasedLocking, "why call this otherwise?"); 3095 assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg); 3096 3097 Label cas_label; // Try, if implemented, CAS locking. Fall thru to slow path otherwise. 3098 3099 BLOCK_COMMENT("biased_locking_enter {"); 3100 3101 // Biased locking 3102 // See whether the lock is currently biased toward our thread and 3103 // whether the epoch is still valid. 3104 // Note that the runtime guarantees sufficient alignment of JavaThread 3105 // pointers to allow age to be placed into low bits. 3106 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, 3107 "biased locking makes assumptions about bit layout"); 3108 z_lr(temp_reg, mark_reg); 3109 z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place); 3110 z_chi(temp_reg, markOopDesc::biased_lock_pattern); 3111 z_brne(cas_label); // Try cas if object is not biased, i.e. cannot be biased locked. 3112 3113 load_prototype_header(temp_reg, obj_reg); 3114 load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place)); 3115 3116 z_ogr(temp_reg, Z_thread); 3117 z_xgr(temp_reg, mark_reg); 3118 z_ngr(temp_reg, temp2_reg); 3119 if (PrintBiasedLockingStatistics) { 3120 increment_counter_eq((address) BiasedLocking::biased_lock_entry_count_addr(), mark_reg, temp2_reg); 3121 // Restore mark_reg. 3122 z_lg(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg); 3123 } 3124 branch_optimized(Assembler::bcondEqual, done); // Biased lock obtained, return success. 3125 3126 Label try_revoke_bias; 3127 Label try_rebias; 3128 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); 3129 3130 //---------------------------------------------------------------------------- 3131 // At this point we know that the header has the bias pattern and 3132 // that we are not the bias owner in the current epoch. We need to 3133 // figure out more details about the state of the header in order to 3134 // know what operations can be legally performed on the object's 3135 // header. 3136 3137 // If the low three bits in the xor result aren't clear, that means 3138 // the prototype header is no longer biased and we have to revoke 3139 // the bias on this object. 3140 z_tmll(temp_reg, markOopDesc::biased_lock_mask_in_place); 3141 z_brnaz(try_revoke_bias); 3142 3143 // Biasing is still enabled for this data type. See whether the 3144 // epoch of the current bias is still valid, meaning that the epoch 3145 // bits of the mark word are equal to the epoch bits of the 3146 // prototype header. (Note that the prototype header's epoch bits 3147 // only change at a safepoint.) If not, attempt to rebias the object 3148 // toward the current thread. Note that we must be absolutely sure 3149 // that the current epoch is invalid in order to do this because 3150 // otherwise the manipulations it performs on the mark word are 3151 // illegal. 3152 z_tmll(temp_reg, markOopDesc::epoch_mask_in_place); 3153 z_brnaz(try_rebias); 3154 3155 //---------------------------------------------------------------------------- 3156 // The epoch of the current bias is still valid but we know nothing 3157 // about the owner; it might be set or it might be clear. Try to 3158 // acquire the bias of the object using an atomic operation. If this 3159 // fails we will go in to the runtime to revoke the object's bias. 3160 // Note that we first construct the presumed unbiased header so we 3161 // don't accidentally blow away another thread's valid bias. 3162 z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | 3163 markOopDesc::epoch_mask_in_place); 3164 z_lgr(temp_reg, Z_thread); 3165 z_llgfr(mark_reg, mark_reg); 3166 z_ogr(temp_reg, mark_reg); 3167 3168 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 3169 3170 z_csg(mark_reg, temp_reg, 0, obj_reg); 3171 3172 // If the biasing toward our thread failed, this means that 3173 // another thread succeeded in biasing it toward itself and we 3174 // need to revoke that bias. The revocation will occur in the 3175 // interpreter runtime in the slow case. 3176 3177 if (PrintBiasedLockingStatistics) { 3178 increment_counter_eq((address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), 3179 temp_reg, temp2_reg); 3180 } 3181 if (slow_case != NULL) { 3182 branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way. 3183 } 3184 branch_optimized(Assembler::bcondAlways, done); // Biased lock status given in condition code. 3185 3186 //---------------------------------------------------------------------------- 3187 bind(try_rebias); 3188 // At this point we know the epoch has expired, meaning that the 3189 // current "bias owner", if any, is actually invalid. Under these 3190 // circumstances _only_, we are allowed to use the current header's 3191 // value as the comparison value when doing the cas to acquire the 3192 // bias in the current epoch. In other words, we allow transfer of 3193 // the bias from one thread to another directly in this situation. 3194 3195 z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); 3196 load_prototype_header(temp_reg, obj_reg); 3197 z_llgfr(mark_reg, mark_reg); 3198 3199 z_ogr(temp_reg, Z_thread); 3200 3201 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 3202 3203 z_csg(mark_reg, temp_reg, 0, obj_reg); 3204 3205 // If the biasing toward our thread failed, this means that 3206 // another thread succeeded in biasing it toward itself and we 3207 // need to revoke that bias. The revocation will occur in the 3208 // interpreter runtime in the slow case. 3209 3210 if (PrintBiasedLockingStatistics) { 3211 increment_counter_eq((address) BiasedLocking::rebiased_lock_entry_count_addr(), temp_reg, temp2_reg); 3212 } 3213 if (slow_case != NULL) { 3214 branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way. 3215 } 3216 z_bru(done); // Biased lock status given in condition code. 3217 3218 //---------------------------------------------------------------------------- 3219 bind(try_revoke_bias); 3220 // The prototype mark in the klass doesn't have the bias bit set any 3221 // more, indicating that objects of this data type are not supposed 3222 // to be biased any more. We are going to try to reset the mark of 3223 // this object to the prototype value and fall through to the 3224 // CAS-based locking scheme. Note that if our CAS fails, it means 3225 // that another thread raced us for the privilege of revoking the 3226 // bias of this particular object, so it's okay to continue in the 3227 // normal locking code. 3228 load_prototype_header(temp_reg, obj_reg); 3229 3230 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 3231 3232 z_csg(mark_reg, temp_reg, 0, obj_reg); 3233 3234 // Fall through to the normal CAS-based lock, because no matter what 3235 // the result of the above CAS, some thread must have succeeded in 3236 // removing the bias bit from the object's header. 3237 if (PrintBiasedLockingStatistics) { 3238 // z_cgr(mark_reg, temp2_reg); 3239 increment_counter_eq((address) BiasedLocking::revoked_lock_entry_count_addr(), temp_reg, temp2_reg); 3240 } 3241 3242 bind(cas_label); 3243 BLOCK_COMMENT("} biased_locking_enter"); 3244 } 3245 3246 void MacroAssembler::biased_locking_exit(Register mark_addr, Register temp_reg, Label& done) { 3247 // Check for biased locking unlock case, which is a no-op 3248 // Note: we do not have to check the thread ID for two reasons. 3249 // First, the interpreter checks for IllegalMonitorStateException at 3250 // a higher level. Second, if the bias was revoked while we held the 3251 // lock, the object could not be rebiased toward another thread, so 3252 // the bias bit would be clear. 3253 BLOCK_COMMENT("biased_locking_exit {"); 3254 3255 z_lg(temp_reg, 0, mark_addr); 3256 z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place); 3257 3258 z_chi(temp_reg, markOopDesc::biased_lock_pattern); 3259 z_bre(done); 3260 BLOCK_COMMENT("} biased_locking_exit"); 3261 } 3262 3263 void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) { 3264 Register displacedHeader = temp1; 3265 Register currentHeader = temp1; 3266 Register temp = temp2; 3267 NearLabel done, object_has_monitor; 3268 3269 BLOCK_COMMENT("compiler_fast_lock_object {"); 3270 3271 // Load markOop from oop into mark. 3272 z_lg(displacedHeader, 0, oop); 3273 3274 if (try_bias) { 3275 biased_locking_enter(oop, displacedHeader, temp, Z_R0, done); 3276 } 3277 3278 // Handle existing monitor. 3279 if ((EmitSync & 0x01) == 0) { 3280 // The object has an existing monitor iff (mark & monitor_value) != 0. 3281 guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word"); 3282 z_lr(temp, displacedHeader); 3283 z_nill(temp, markOopDesc::monitor_value); 3284 z_brne(object_has_monitor); 3285 } 3286 3287 // Set mark to markOop | markOopDesc::unlocked_value. 3288 z_oill(displacedHeader, markOopDesc::unlocked_value); 3289 3290 // Load Compare Value application register. 3291 3292 // Initialize the box (must happen before we update the object mark). 3293 z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box); 3294 3295 // Memory Fence (in cmpxchgd) 3296 // Compare object markOop with mark and if equal exchange scratch1 with object markOop. 3297 3298 // If the compare-and-swap succeeded, then we found an unlocked object and we 3299 // have now locked it. 3300 z_csg(displacedHeader, box, 0, oop); 3301 assert(currentHeader==displacedHeader, "must be same register"); // Identified two registers from z/Architecture. 3302 z_bre(done); 3303 3304 // We did not see an unlocked object so try the fast recursive case. 3305 3306 z_sgr(currentHeader, Z_SP); 3307 load_const_optimized(temp, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place)); 3308 3309 z_ngr(currentHeader, temp); 3310 // z_brne(done); 3311 // z_release(); 3312 z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box); 3313 3314 z_bru(done); 3315 3316 if ((EmitSync & 0x01) == 0) { 3317 Register zero = temp; 3318 Register monitor_tagged = displacedHeader; // Tagged with markOopDesc::monitor_value. 3319 bind(object_has_monitor); 3320 // The object's monitor m is unlocked iff m->owner == NULL, 3321 // otherwise m->owner may contain a thread or a stack address. 3322 // 3323 // Try to CAS m->owner from NULL to current thread. 3324 z_lghi(zero, 0); 3325 // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ. 3326 z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged); 3327 // Store a non-null value into the box. 3328 z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box); 3329 #ifdef ASSERT 3330 z_brne(done); 3331 // We've acquired the monitor, check some invariants. 3332 // Invariant 1: _recursions should be 0. 3333 asm_assert_mem8_is_zero(OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), monitor_tagged, 3334 "monitor->_recursions should be 0", -1); 3335 z_ltgr(zero, zero); // Set CR=EQ. 3336 #endif 3337 } 3338 bind(done); 3339 3340 BLOCK_COMMENT("} compiler_fast_lock_object"); 3341 // If locking was successful, CR should indicate 'EQ'. 3342 // The compiler or the native wrapper generates a branch to the runtime call 3343 // _complete_monitor_locking_Java. 3344 } 3345 3346 void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) { 3347 Register displacedHeader = temp1; 3348 Register currentHeader = temp2; 3349 Register temp = temp1; 3350 Register monitor = temp2; 3351 3352 Label done, object_has_monitor; 3353 3354 BLOCK_COMMENT("compiler_fast_unlock_object {"); 3355 3356 if (try_bias) { 3357 biased_locking_exit(oop, currentHeader, done); 3358 } 3359 3360 // Find the lock address and load the displaced header from the stack. 3361 // if the displaced header is zero, we have a recursive unlock. 3362 load_and_test_long(displacedHeader, Address(box, BasicLock::displaced_header_offset_in_bytes())); 3363 z_bre(done); 3364 3365 // Handle existing monitor. 3366 if ((EmitSync & 0x02) == 0) { 3367 // The object has an existing monitor iff (mark & monitor_value) != 0. 3368 z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop); 3369 guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word"); 3370 z_nill(currentHeader, markOopDesc::monitor_value); 3371 z_brne(object_has_monitor); 3372 } 3373 3374 // Check if it is still a light weight lock, this is true if we see 3375 // the stack address of the basicLock in the markOop of the object 3376 // copy box to currentHeader such that csg does not kill it. 3377 z_lgr(currentHeader, box); 3378 z_csg(currentHeader, displacedHeader, 0, oop); 3379 z_bru(done); // Csg sets CR as desired. 3380 3381 // Handle existing monitor. 3382 if ((EmitSync & 0x02) == 0) { 3383 bind(object_has_monitor); 3384 z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop); // CurrentHeader is tagged with monitor_value set. 3385 load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); 3386 z_brne(done); 3387 load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 3388 z_brne(done); 3389 load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); 3390 z_brne(done); 3391 load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); 3392 z_brne(done); 3393 z_release(); 3394 z_stg(temp/*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), currentHeader); 3395 } 3396 3397 bind(done); 3398 3399 BLOCK_COMMENT("} compiler_fast_unlock_object"); 3400 // flag == EQ indicates success 3401 // flag == NE indicates failure 3402 } 3403 3404 // Write to card table for modification at store_addr - register is destroyed afterwards. 3405 void MacroAssembler::card_write_barrier_post(Register store_addr, Register tmp) { 3406 CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set(); 3407 assert(bs->kind() == BarrierSet::CardTableForRS || 3408 bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); 3409 assert_different_registers(store_addr, tmp); 3410 z_srlg(store_addr, store_addr, CardTableModRefBS::card_shift); 3411 load_absolute_address(tmp, (address)bs->byte_map_base); 3412 z_agr(store_addr, tmp); 3413 z_mvi(0, store_addr, 0); // Store byte 0. 3414 } 3415 3416 #if INCLUDE_ALL_GCS 3417 3418 //------------------------------------------------------ 3419 // General G1 pre-barrier generator. 3420 // Purpose: record the previous value if it is not null. 3421 // All non-tmps are preserved. 3422 //------------------------------------------------------ 3423 void MacroAssembler::g1_write_barrier_pre(Register Robj, 3424 RegisterOrConstant offset, 3425 Register Rpre_val, // Ideally, this is a non-volatile register. 3426 Register Rval, // Will be preserved. 3427 Register Rtmp1, // If Rpre_val is volatile, either Rtmp1 3428 Register Rtmp2, // or Rtmp2 has to be non-volatile.. 3429 bool pre_val_needed // Save Rpre_val across runtime call, caller uses it. 3430 ) { 3431 Label callRuntime, filtered; 3432 const int active_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()); 3433 const int buffer_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_buf()); 3434 const int index_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_index()); 3435 assert_different_registers(Rtmp1, Rtmp2, Z_R0_scratch); // None of the Rtmp<i> must be Z_R0!! 3436 3437 BLOCK_COMMENT("g1_write_barrier_pre {"); 3438 3439 // Is marking active? 3440 // Note: value is loaded for test purposes only. No further use here. 3441 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 3442 load_and_test_int(Rtmp1, Address(Z_thread, active_offset)); 3443 } else { 3444 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 3445 load_and_test_byte(Rtmp1, Address(Z_thread, active_offset)); 3446 } 3447 z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently. 3448 3449 // Do we need to load the previous value into Rpre_val? 3450 if (Robj != noreg) { 3451 // Load the previous value... 3452 Register ixReg = offset.is_register() ? offset.register_or_noreg() : Z_R0; 3453 if (UseCompressedOops) { 3454 z_llgf(Rpre_val, offset.constant_or_zero(), ixReg, Robj); 3455 } else { 3456 z_lg(Rpre_val, offset.constant_or_zero(), ixReg, Robj); 3457 } 3458 } 3459 assert(Rpre_val != noreg, "must have a real register"); 3460 3461 // Is the previous value NULL? 3462 // Note: pre_val is loaded, decompressed and stored (directly or via runtime call). 3463 // Register contents is preserved across runtime call if caller requests to do so. 3464 z_ltgr(Rpre_val, Rpre_val); 3465 z_bre(filtered); // previous value is NULL, so we don't need to record it. 3466 3467 // Decode the oop now. We know it's not NULL. 3468 if (Robj != noreg && UseCompressedOops) { 3469 oop_decoder(Rpre_val, Rpre_val, /*maybeNULL=*/false); 3470 } 3471 3472 // OK, it's not filtered, so we'll need to call enqueue. 3473 3474 // We can store the original value in the thread's buffer 3475 // only if index > 0. Otherwise, we need runtime to handle. 3476 // (The index field is typed as size_t.) 3477 Register Rbuffer = Rtmp1, Rindex = Rtmp2; 3478 3479 z_lg(Rbuffer, buffer_offset, Z_thread); 3480 3481 load_and_test_long(Rindex, Address(Z_thread, index_offset)); 3482 z_bre(callRuntime); // If index == 0, goto runtime. 3483 3484 add2reg(Rindex, -wordSize); // Decrement index. 3485 z_stg(Rindex, index_offset, Z_thread); 3486 3487 // Record the previous value. 3488 z_stg(Rpre_val, 0, Rbuffer, Rindex); 3489 z_bru(filtered); // We are done. 3490 3491 Rbuffer = noreg; // end of life 3492 Rindex = noreg; // end of life 3493 3494 bind(callRuntime); 3495 3496 // Save Rpre_val (result) over runtime call. 3497 // Requires Rtmp1, Rtmp2, or Rpre_val to be non-volatile. 3498 Register Rpre_save = Rpre_val; 3499 if (pre_val_needed && Rpre_val->is_volatile()) { 3500 guarantee(!Rtmp1->is_volatile() || !Rtmp2->is_volatile(), "oops!"); 3501 Rpre_save = !Rtmp1->is_volatile() ? Rtmp1 : Rtmp2; 3502 } 3503 lgr_if_needed(Rpre_save, Rpre_val); 3504 3505 // Preserve inputs by spilling them into the top frame. 3506 if (Robj != noreg && Robj->is_volatile()) { 3507 z_stg(Robj, Robj->encoding()*BytesPerWord, Z_SP); 3508 } 3509 if (offset.is_register() && offset.as_register()->is_volatile()) { 3510 Register Roff = offset.as_register(); 3511 z_stg(Roff, Roff->encoding()*BytesPerWord, Z_SP); 3512 } 3513 if (Rval != noreg && Rval->is_volatile()) { 3514 z_stg(Rval, Rval->encoding()*BytesPerWord, Z_SP); 3515 } 3516 3517 // Push frame to protect top frame with return pc and spilled register values. 3518 save_return_pc(); 3519 push_frame_abi160(0); // Will use Z_R0 as tmp on old CPUs. 3520 3521 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_val, Z_thread); 3522 3523 pop_frame(); 3524 restore_return_pc(); 3525 3526 // Restore spilled values. 3527 if (Robj != noreg && Robj->is_volatile()) { 3528 z_lg(Robj, Robj->encoding()*BytesPerWord, Z_SP); 3529 } 3530 if (offset.is_register() && offset.as_register()->is_volatile()) { 3531 Register Roff = offset.as_register(); 3532 z_lg(Roff, Roff->encoding()*BytesPerWord, Z_SP); 3533 } 3534 if (Rval != noreg && Rval->is_volatile()) { 3535 z_lg(Rval, Rval->encoding()*BytesPerWord, Z_SP); 3536 } 3537 3538 // Restore Rpre_val (result) after runtime call. 3539 lgr_if_needed(Rpre_val, Rpre_save); 3540 3541 bind(filtered); 3542 BLOCK_COMMENT("} g1_write_barrier_pre"); 3543 } 3544 3545 // General G1 post-barrier generator. 3546 // Purpose: Store cross-region card. 3547 void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, 3548 Register Rnew_val, 3549 Register Rtmp1, 3550 Register Rtmp2, 3551 Register Rtmp3) { 3552 Label callRuntime, filtered; 3553 3554 assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); // Most probably, Rnew_val == Rtmp3. 3555 3556 G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set(); 3557 assert(bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier"); 3558 3559 BLOCK_COMMENT("g1_write_barrier_post {"); 3560 3561 // Does store cross heap regions? 3562 // It does if the two addresses specify different grain addresses. 3563 if (G1RSBarrierRegionFilter) { 3564 if (VM_Version::has_DistinctOpnds()) { 3565 z_xgrk(Rtmp1, Rstore_addr, Rnew_val); 3566 } else { 3567 z_lgr(Rtmp1, Rstore_addr); 3568 z_xgr(Rtmp1, Rnew_val); 3569 } 3570 z_srag(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes); 3571 z_bre(filtered); 3572 } 3573 3574 // Crosses regions, storing NULL? 3575 #ifdef ASSERT 3576 z_ltgr(Rnew_val, Rnew_val); 3577 asm_assert_ne("null oop not allowed (G1)", 0x255); // TODO: also on z? Checked by caller on PPC64, so following branch is obsolete: 3578 z_bre(filtered); // Safety net: don't break if we have a NULL oop. 3579 #endif 3580 Rnew_val = noreg; // end of lifetime 3581 3582 // Storing region crossing non-NULL, is card already dirty? 3583 assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code"); 3584 assert_different_registers(Rtmp1, Rtmp2, Rtmp3); 3585 // Make sure not to use Z_R0 for any of these registers. 3586 Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3; 3587 Register Rbase = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp3; 3588 3589 // calculate address of card 3590 load_const_optimized(Rbase, (address)bs->byte_map_base); // Card table base. 3591 z_srlg(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift); // Index into card table. 3592 add2reg_with_index(Rcard_addr, 0, Rcard_addr, Rbase); // Explicit calculation needed for cli. 3593 Rbase = noreg; // end of lifetime 3594 3595 // Filter young. 3596 assert((unsigned int)G1SATBCardTableModRefBS::g1_young_card_val() <= 255, "otherwise check this code"); 3597 z_cli(0, Rcard_addr, (int)G1SATBCardTableModRefBS::g1_young_card_val()); 3598 z_bre(filtered); 3599 3600 // Check the card value. If dirty, we're done. 3601 // This also avoids false sharing of the (already dirty) card. 3602 z_sync(); // Required to support concurrent cleaning. 3603 assert((unsigned int)CardTableModRefBS::dirty_card_val() <= 255, "otherwise check this code"); 3604 z_cli(0, Rcard_addr, CardTableModRefBS::dirty_card_val()); // Reload after membar. 3605 z_bre(filtered); 3606 3607 // Storing a region crossing, non-NULL oop, card is clean. 3608 // Dirty card and log. 3609 z_mvi(0, Rcard_addr, CardTableModRefBS::dirty_card_val()); 3610 3611 Register Rcard_addr_x = Rcard_addr; 3612 Register Rqueue_index = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp1; 3613 Register Rqueue_buf = (Rtmp3 != Z_R0_scratch) ? Rtmp3 : Rtmp1; 3614 const int qidx_off = in_bytes(JavaThread::dirty_card_queue_offset() + SATBMarkQueue::byte_offset_of_index()); 3615 const int qbuf_off = in_bytes(JavaThread::dirty_card_queue_offset() + SATBMarkQueue::byte_offset_of_buf()); 3616 if ((Rcard_addr == Rqueue_buf) || (Rcard_addr == Rqueue_index)) { 3617 Rcard_addr_x = Z_R0_scratch; // Register shortage. We have to use Z_R0. 3618 } 3619 lgr_if_needed(Rcard_addr_x, Rcard_addr); 3620 3621 load_and_test_long(Rqueue_index, Address(Z_thread, qidx_off)); 3622 z_bre(callRuntime); // Index == 0 then jump to runtime. 3623 3624 z_lg(Rqueue_buf, qbuf_off, Z_thread); 3625 3626 add2reg(Rqueue_index, -wordSize); // Decrement index. 3627 z_stg(Rqueue_index, qidx_off, Z_thread); 3628 3629 z_stg(Rcard_addr_x, 0, Rqueue_index, Rqueue_buf); // Store card. 3630 z_bru(filtered); 3631 3632 bind(callRuntime); 3633 3634 // TODO: do we need a frame? Introduced to be on the safe side. 3635 bool needs_frame = true; 3636 3637 // VM call need frame to access(write) O register. 3638 if (needs_frame) { 3639 save_return_pc(); 3640 push_frame_abi160(0); // Will use Z_R0 as tmp on old CPUs. 3641 } 3642 3643 // Save the live input values. 3644 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr_x, Z_thread); 3645 3646 if (needs_frame) { 3647 pop_frame(); 3648 restore_return_pc(); 3649 } 3650 3651 bind(filtered); 3652 3653 BLOCK_COMMENT("} g1_write_barrier_post"); 3654 } 3655 #endif // INCLUDE_ALL_GCS 3656 3657 // Last_Java_sp must comply to the rules in frame_s390.hpp. 3658 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation) { 3659 BLOCK_COMMENT("set_last_Java_frame {"); 3660 3661 // Always set last_Java_pc and flags first because once last_Java_sp 3662 // is visible has_last_Java_frame is true and users will look at the 3663 // rest of the fields. (Note: flags should always be zero before we 3664 // get here so doesn't need to be set.) 3665 3666 // Verify that last_Java_pc was zeroed on return to Java. 3667 if (allow_relocation) { 3668 asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()), 3669 Z_thread, 3670 "last_Java_pc not zeroed before leaving Java", 3671 0x200); 3672 } else { 3673 asm_assert_mem8_is_zero_static(in_bytes(JavaThread::last_Java_pc_offset()), 3674 Z_thread, 3675 "last_Java_pc not zeroed before leaving Java", 3676 0x200); 3677 } 3678 3679 // When returning from calling out from Java mode the frame anchor's 3680 // last_Java_pc will always be set to NULL. It is set here so that 3681 // if we are doing a call to native (not VM) that we capture the 3682 // known pc and don't have to rely on the native call having a 3683 // standard frame linkage where we can find the pc. 3684 if (last_Java_pc!=noreg) { 3685 z_stg(last_Java_pc, Address(Z_thread, JavaThread::last_Java_pc_offset())); 3686 } 3687 3688 // This membar release is not required on z/Architecture, since the sequence of stores 3689 // in maintained. Nevertheless, we leave it in to document the required ordering. 3690 // The implementation of z_release() should be empty. 3691 // z_release(); 3692 3693 z_stg(last_Java_sp, Address(Z_thread, JavaThread::last_Java_sp_offset())); 3694 BLOCK_COMMENT("} set_last_Java_frame"); 3695 } 3696 3697 void MacroAssembler::reset_last_Java_frame(bool allow_relocation) { 3698 BLOCK_COMMENT("reset_last_Java_frame {"); 3699 3700 if (allow_relocation) { 3701 asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()), 3702 Z_thread, 3703 "SP was not set, still zero", 3704 0x202); 3705 } else { 3706 asm_assert_mem8_isnot_zero_static(in_bytes(JavaThread::last_Java_sp_offset()), 3707 Z_thread, 3708 "SP was not set, still zero", 3709 0x202); 3710 } 3711 3712 // _last_Java_sp = 0 3713 // Clearing storage must be atomic here, so don't use clear_mem()! 3714 store_const(Address(Z_thread, JavaThread::last_Java_sp_offset()), 0); 3715 3716 // _last_Java_pc = 0 3717 store_const(Address(Z_thread, JavaThread::last_Java_pc_offset()), 0); 3718 3719 BLOCK_COMMENT("} reset_last_Java_frame"); 3720 return; 3721 } 3722 3723 void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation) { 3724 assert_different_registers(sp, tmp1); 3725 3726 // We cannot trust that code generated by the C++ compiler saves R14 3727 // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at 3728 // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()). 3729 // Therefore we load the PC into tmp1 and let set_last_Java_frame() save 3730 // it into the frame anchor. 3731 get_PC(tmp1); 3732 set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1, allow_relocation); 3733 } 3734 3735 void MacroAssembler::set_thread_state(JavaThreadState new_state) { 3736 z_release(); 3737 3738 assert(Immediate::is_uimm16(_thread_max_state), "enum value out of range for instruction"); 3739 assert(sizeof(JavaThreadState) == sizeof(int), "enum value must have base type int"); 3740 store_const(Address(Z_thread, JavaThread::thread_state_offset()), new_state, Z_R0, false); 3741 } 3742 3743 void MacroAssembler::get_vm_result(Register oop_result) { 3744 verify_thread(); 3745 3746 z_lg(oop_result, Address(Z_thread, JavaThread::vm_result_offset())); 3747 clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(void*)); 3748 3749 verify_oop(oop_result); 3750 } 3751 3752 void MacroAssembler::get_vm_result_2(Register result) { 3753 verify_thread(); 3754 3755 z_lg(result, Address(Z_thread, JavaThread::vm_result_2_offset())); 3756 clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(void*)); 3757 } 3758 3759 // We require that C code which does not return a value in vm_result will 3760 // leave it undisturbed. 3761 void MacroAssembler::set_vm_result(Register oop_result) { 3762 z_stg(oop_result, Address(Z_thread, JavaThread::vm_result_offset())); 3763 } 3764 3765 // Explicit null checks (used for method handle code). 3766 void MacroAssembler::null_check(Register reg, Register tmp, int64_t offset) { 3767 if (!ImplicitNullChecks) { 3768 NearLabel ok; 3769 3770 compare64_and_branch(reg, (intptr_t) 0, Assembler::bcondNotEqual, ok); 3771 3772 // We just put the address into reg if it was 0 (tmp==Z_R0 is allowed so we can't use it for the address). 3773 address exception_entry = Interpreter::throw_NullPointerException_entry(); 3774 load_absolute_address(reg, exception_entry); 3775 z_br(reg); 3776 3777 bind(ok); 3778 } else { 3779 if (needs_explicit_null_check((intptr_t)offset)) { 3780 // Provoke OS NULL exception if reg = NULL by 3781 // accessing M[reg] w/o changing any registers. 3782 z_lg(tmp, 0, reg); 3783 } 3784 // else 3785 // Nothing to do, (later) access of M[reg + offset] 3786 // will provoke OS NULL exception if reg = NULL. 3787 } 3788 } 3789 3790 //------------------------------------- 3791 // Compressed Klass Pointers 3792 //------------------------------------- 3793 3794 // Klass oop manipulations if compressed. 3795 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 3796 Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible. 3797 address base = Universe::narrow_klass_base(); 3798 int shift = Universe::narrow_klass_shift(); 3799 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 3800 3801 BLOCK_COMMENT("cKlass encoder {"); 3802 3803 #ifdef ASSERT 3804 Label ok; 3805 z_tmll(current, KlassAlignmentInBytes-1); // Check alignment. 3806 z_brc(Assembler::bcondAllZero, ok); 3807 // The plain disassembler does not recognize illtrap. It instead displays 3808 // a 32-bit value. Issueing two illtraps assures the disassembler finds 3809 // the proper beginning of the next instruction. 3810 z_illtrap(0xee); 3811 z_illtrap(0xee); 3812 bind(ok); 3813 #endif 3814 3815 if (base != NULL) { 3816 unsigned int base_h = ((unsigned long)base)>>32; 3817 unsigned int base_l = (unsigned int)((unsigned long)base); 3818 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 3819 lgr_if_needed(dst, current); 3820 z_aih(dst, -((int)base_h)); // Base has no set bits in lower half. 3821 } else if ((base_h == 0) && (base_l != 0)) { 3822 lgr_if_needed(dst, current); 3823 z_agfi(dst, -(int)base_l); 3824 } else { 3825 load_const(Z_R0, base); 3826 lgr_if_needed(dst, current); 3827 z_sgr(dst, Z_R0); 3828 } 3829 current = dst; 3830 } 3831 if (shift != 0) { 3832 assert (LogKlassAlignmentInBytes == shift, "decode alg wrong"); 3833 z_srlg(dst, current, shift); 3834 current = dst; 3835 } 3836 lgr_if_needed(dst, current); // Move may be required (if neither base nor shift != 0). 3837 3838 BLOCK_COMMENT("} cKlass encoder"); 3839 } 3840 3841 // This function calculates the size of the code generated by 3842 // decode_klass_not_null(register dst, Register src) 3843 // when (Universe::heap() != NULL). Hence, if the instructions 3844 // it generates change, then this method needs to be updated. 3845 int MacroAssembler::instr_size_for_decode_klass_not_null() { 3846 address base = Universe::narrow_klass_base(); 3847 int shift_size = Universe::narrow_klass_shift() == 0 ? 0 : 6; /* sllg */ 3848 int addbase_size = 0; 3849 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 3850 3851 if (base != NULL) { 3852 unsigned int base_h = ((unsigned long)base)>>32; 3853 unsigned int base_l = (unsigned int)((unsigned long)base); 3854 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 3855 addbase_size += 6; /* aih */ 3856 } else if ((base_h == 0) && (base_l != 0)) { 3857 addbase_size += 6; /* algfi */ 3858 } else { 3859 addbase_size += load_const_size(); 3860 addbase_size += 4; /* algr */ 3861 } 3862 } 3863 #ifdef ASSERT 3864 addbase_size += 10; 3865 addbase_size += 2; // Extra sigill. 3866 #endif 3867 return addbase_size + shift_size; 3868 } 3869 3870 // !!! If the instructions that get generated here change 3871 // then function instr_size_for_decode_klass_not_null() 3872 // needs to get updated. 3873 // This variant of decode_klass_not_null() must generate predictable code! 3874 // The code must only depend on globally known parameters. 3875 void MacroAssembler::decode_klass_not_null(Register dst) { 3876 address base = Universe::narrow_klass_base(); 3877 int shift = Universe::narrow_klass_shift(); 3878 int beg_off = offset(); 3879 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 3880 3881 BLOCK_COMMENT("cKlass decoder (const size) {"); 3882 3883 if (shift != 0) { // Shift required? 3884 z_sllg(dst, dst, shift); 3885 } 3886 if (base != NULL) { 3887 unsigned int base_h = ((unsigned long)base)>>32; 3888 unsigned int base_l = (unsigned int)((unsigned long)base); 3889 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 3890 z_aih(dst, base_h); // Base has no set bits in lower half. 3891 } else if ((base_h == 0) && (base_l != 0)) { 3892 z_algfi(dst, base_l); // Base has no set bits in upper half. 3893 } else { 3894 load_const(Z_R0, base); // Base has set bits everywhere. 3895 z_algr(dst, Z_R0); 3896 } 3897 } 3898 3899 #ifdef ASSERT 3900 Label ok; 3901 z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment. 3902 z_brc(Assembler::bcondAllZero, ok); 3903 // The plain disassembler does not recognize illtrap. It instead displays 3904 // a 32-bit value. Issueing two illtraps assures the disassembler finds 3905 // the proper beginning of the next instruction. 3906 z_illtrap(0xd1); 3907 z_illtrap(0xd1); 3908 bind(ok); 3909 #endif 3910 assert(offset() == beg_off + instr_size_for_decode_klass_not_null(), "Code gen mismatch."); 3911 3912 BLOCK_COMMENT("} cKlass decoder (const size)"); 3913 } 3914 3915 // This variant of decode_klass_not_null() is for cases where 3916 // 1) the size of the generated instructions may vary 3917 // 2) the result is (potentially) stored in a register different from the source. 3918 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 3919 address base = Universe::narrow_klass_base(); 3920 int shift = Universe::narrow_klass_shift(); 3921 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 3922 3923 BLOCK_COMMENT("cKlass decoder {"); 3924 3925 if (src == noreg) src = dst; 3926 3927 if (shift != 0) { // Shift or at least move required? 3928 z_sllg(dst, src, shift); 3929 } else { 3930 lgr_if_needed(dst, src); 3931 } 3932 3933 if (base != NULL) { 3934 unsigned int base_h = ((unsigned long)base)>>32; 3935 unsigned int base_l = (unsigned int)((unsigned long)base); 3936 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 3937 z_aih(dst, base_h); // Base has not set bits in lower half. 3938 } else if ((base_h == 0) && (base_l != 0)) { 3939 z_algfi(dst, base_l); // Base has no set bits in upper half. 3940 } else { 3941 load_const_optimized(Z_R0, base); // Base has set bits everywhere. 3942 z_algr(dst, Z_R0); 3943 } 3944 } 3945 3946 #ifdef ASSERT 3947 Label ok; 3948 z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment. 3949 z_brc(Assembler::bcondAllZero, ok); 3950 // The plain disassembler does not recognize illtrap. It instead displays 3951 // a 32-bit value. Issueing two illtraps assures the disassembler finds 3952 // the proper beginning of the next instruction. 3953 z_illtrap(0xd2); 3954 z_illtrap(0xd2); 3955 bind(ok); 3956 #endif 3957 BLOCK_COMMENT("} cKlass decoder"); 3958 } 3959 3960 void MacroAssembler::load_klass(Register klass, Address mem) { 3961 if (UseCompressedClassPointers) { 3962 z_llgf(klass, mem); 3963 // Attention: no null check here! 3964 decode_klass_not_null(klass); 3965 } else { 3966 z_lg(klass, mem); 3967 } 3968 } 3969 3970 void MacroAssembler::load_klass(Register klass, Register src_oop) { 3971 if (UseCompressedClassPointers) { 3972 z_llgf(klass, oopDesc::klass_offset_in_bytes(), src_oop); 3973 // Attention: no null check here! 3974 decode_klass_not_null(klass); 3975 } else { 3976 z_lg(klass, oopDesc::klass_offset_in_bytes(), src_oop); 3977 } 3978 } 3979 3980 void MacroAssembler::load_prototype_header(Register Rheader, Register Rsrc_oop) { 3981 assert_different_registers(Rheader, Rsrc_oop); 3982 load_klass(Rheader, Rsrc_oop); 3983 z_lg(Rheader, Address(Rheader, Klass::prototype_header_offset())); 3984 } 3985 3986 void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) { 3987 if (UseCompressedClassPointers) { 3988 assert_different_registers(dst_oop, klass, Z_R0); 3989 if (ck == noreg) ck = klass; 3990 encode_klass_not_null(ck, klass); 3991 z_st(ck, Address(dst_oop, oopDesc::klass_offset_in_bytes())); 3992 } else { 3993 z_stg(klass, Address(dst_oop, oopDesc::klass_offset_in_bytes())); 3994 } 3995 } 3996 3997 void MacroAssembler::store_klass_gap(Register s, Register d) { 3998 if (UseCompressedClassPointers) { 3999 assert(s != d, "not enough registers"); 4000 z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes())); 4001 } 4002 } 4003 4004 // Compare klass ptr in memory against klass ptr in register. 4005 // 4006 // Rop1 - klass in register, always uncompressed. 4007 // disp - Offset of klass in memory, compressed/uncompressed, depending on runtime flag. 4008 // Rbase - Base address of cKlass in memory. 4009 // maybeNULL - True if Rop1 possibly is a NULL. 4010 void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL) { 4011 4012 BLOCK_COMMENT("compare klass ptr {"); 4013 4014 if (UseCompressedClassPointers) { 4015 const int shift = Universe::narrow_klass_shift(); 4016 address base = Universe::narrow_klass_base(); 4017 4018 assert((shift == 0) || (shift == LogKlassAlignmentInBytes), "cKlass encoder detected bad shift"); 4019 assert_different_registers(Rop1, Z_R0); 4020 assert_different_registers(Rop1, Rbase, Z_R1); 4021 4022 // First encode register oop and then compare with cOop in memory. 4023 // This sequence saves an unnecessary cOop load and decode. 4024 if (base == NULL) { 4025 if (shift == 0) { 4026 z_cl(Rop1, disp, Rbase); // Unscaled 4027 } else { 4028 z_srlg(Z_R0, Rop1, shift); // ZeroBased 4029 z_cl(Z_R0, disp, Rbase); 4030 } 4031 } else { // HeapBased 4032 #ifdef ASSERT 4033 bool used_R0 = true; 4034 bool used_R1 = true; 4035 #endif 4036 Register current = Rop1; 4037 Label done; 4038 4039 if (maybeNULL) { // NULL ptr must be preserved! 4040 z_ltgr(Z_R0, current); 4041 z_bre(done); 4042 current = Z_R0; 4043 } 4044 4045 unsigned int base_h = ((unsigned long)base)>>32; 4046 unsigned int base_l = (unsigned int)((unsigned long)base); 4047 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 4048 lgr_if_needed(Z_R0, current); 4049 z_aih(Z_R0, -((int)base_h)); // Base has no set bits in lower half. 4050 } else if ((base_h == 0) && (base_l != 0)) { 4051 lgr_if_needed(Z_R0, current); 4052 z_agfi(Z_R0, -(int)base_l); 4053 } else { 4054 int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base)); 4055 add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); // Subtract base by adding complement. 4056 } 4057 4058 if (shift != 0) { 4059 z_srlg(Z_R0, Z_R0, shift); 4060 } 4061 bind(done); 4062 z_cl(Z_R0, disp, Rbase); 4063 #ifdef ASSERT 4064 if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2); 4065 if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2); 4066 #endif 4067 } 4068 } else { 4069 z_clg(Rop1, disp, Z_R0, Rbase); 4070 } 4071 BLOCK_COMMENT("} compare klass ptr"); 4072 } 4073 4074 //--------------------------- 4075 // Compressed oops 4076 //--------------------------- 4077 4078 void MacroAssembler::encode_heap_oop(Register oop) { 4079 oop_encoder(oop, oop, true /*maybe null*/); 4080 } 4081 4082 void MacroAssembler::encode_heap_oop_not_null(Register oop) { 4083 oop_encoder(oop, oop, false /*not null*/); 4084 } 4085 4086 // Called with something derived from the oop base. e.g. oop_base>>3. 4087 int MacroAssembler::get_oop_base_pow2_offset(uint64_t oop_base) { 4088 unsigned int oop_base_ll = ((unsigned int)(oop_base >> 0)) & 0xffff; 4089 unsigned int oop_base_lh = ((unsigned int)(oop_base >> 16)) & 0xffff; 4090 unsigned int oop_base_hl = ((unsigned int)(oop_base >> 32)) & 0xffff; 4091 unsigned int oop_base_hh = ((unsigned int)(oop_base >> 48)) & 0xffff; 4092 unsigned int n_notzero_parts = (oop_base_ll == 0 ? 0:1) 4093 + (oop_base_lh == 0 ? 0:1) 4094 + (oop_base_hl == 0 ? 0:1) 4095 + (oop_base_hh == 0 ? 0:1); 4096 4097 assert(oop_base != 0, "This is for HeapBased cOops only"); 4098 4099 if (n_notzero_parts != 1) { // Check if oop_base is just a few pages shy of a power of 2. 4100 uint64_t pow2_offset = 0x10000 - oop_base_ll; 4101 if (pow2_offset < 0x8000) { // This might not be necessary. 4102 uint64_t oop_base2 = oop_base + pow2_offset; 4103 4104 oop_base_ll = ((unsigned int)(oop_base2 >> 0)) & 0xffff; 4105 oop_base_lh = ((unsigned int)(oop_base2 >> 16)) & 0xffff; 4106 oop_base_hl = ((unsigned int)(oop_base2 >> 32)) & 0xffff; 4107 oop_base_hh = ((unsigned int)(oop_base2 >> 48)) & 0xffff; 4108 n_notzero_parts = (oop_base_ll == 0 ? 0:1) + 4109 (oop_base_lh == 0 ? 0:1) + 4110 (oop_base_hl == 0 ? 0:1) + 4111 (oop_base_hh == 0 ? 0:1); 4112 if (n_notzero_parts == 1) { 4113 assert(-(int64_t)pow2_offset != (int64_t)-1, "We use -1 to signal uninitialized base register"); 4114 return -pow2_offset; 4115 } 4116 } 4117 } 4118 return 0; 4119 } 4120 4121 // If base address is offset from a straight power of two by just a few pages, 4122 // return this offset to the caller for a possible later composite add. 4123 // TODO/FIX: will only work correctly for 4k pages. 4124 int MacroAssembler::get_oop_base(Register Rbase, uint64_t oop_base) { 4125 int pow2_offset = get_oop_base_pow2_offset(oop_base); 4126 4127 load_const_optimized(Rbase, oop_base - pow2_offset); // Best job possible. 4128 4129 return pow2_offset; 4130 } 4131 4132 int MacroAssembler::get_oop_base_complement(Register Rbase, uint64_t oop_base) { 4133 int offset = get_oop_base(Rbase, oop_base); 4134 z_lcgr(Rbase, Rbase); 4135 return -offset; 4136 } 4137 4138 // Compare compressed oop in memory against oop in register. 4139 // Rop1 - Oop in register. 4140 // disp - Offset of cOop in memory. 4141 // Rbase - Base address of cOop in memory. 4142 // maybeNULL - True if Rop1 possibly is a NULL. 4143 // maybeNULLtarget - Branch target for Rop1 == NULL, if flow control shall NOT continue with compare instruction. 4144 void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL) { 4145 Register Rbase = mem.baseOrR0(); 4146 Register Rindex = mem.indexOrR0(); 4147 int64_t disp = mem.disp(); 4148 4149 const int shift = Universe::narrow_oop_shift(); 4150 address base = Universe::narrow_oop_base(); 4151 4152 assert(UseCompressedOops, "must be on to call this method"); 4153 assert(Universe::heap() != NULL, "java heap must be initialized to call this method"); 4154 assert((shift == 0) || (shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift"); 4155 assert_different_registers(Rop1, Z_R0); 4156 assert_different_registers(Rop1, Rbase, Z_R1); 4157 assert_different_registers(Rop1, Rindex, Z_R1); 4158 4159 BLOCK_COMMENT("compare heap oop {"); 4160 4161 // First encode register oop and then compare with cOop in memory. 4162 // This sequence saves an unnecessary cOop load and decode. 4163 if (base == NULL) { 4164 if (shift == 0) { 4165 z_cl(Rop1, disp, Rindex, Rbase); // Unscaled 4166 } else { 4167 z_srlg(Z_R0, Rop1, shift); // ZeroBased 4168 z_cl(Z_R0, disp, Rindex, Rbase); 4169 } 4170 } else { // HeapBased 4171 #ifdef ASSERT 4172 bool used_R0 = true; 4173 bool used_R1 = true; 4174 #endif 4175 Label done; 4176 int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base)); 4177 4178 if (maybeNULL) { // NULL ptr must be preserved! 4179 z_ltgr(Z_R0, Rop1); 4180 z_bre(done); 4181 } 4182 4183 add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); 4184 z_srlg(Z_R0, Z_R0, shift); 4185 4186 bind(done); 4187 z_cl(Z_R0, disp, Rindex, Rbase); 4188 #ifdef ASSERT 4189 if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2); 4190 if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2); 4191 #endif 4192 } 4193 BLOCK_COMMENT("} compare heap oop"); 4194 } 4195 4196 // Load heap oop and decompress, if necessary. 4197 void MacroAssembler::load_heap_oop(Register dest, const Address &a) { 4198 if (UseCompressedOops) { 4199 z_llgf(dest, a.disp(), a.indexOrR0(), a.baseOrR0()); 4200 oop_decoder(dest, dest, true); 4201 } else { 4202 z_lg(dest, a.disp(), a.indexOrR0(), a.baseOrR0()); 4203 } 4204 } 4205 4206 // Load heap oop and decompress, if necessary. 4207 void MacroAssembler::load_heap_oop(Register dest, int64_t disp, Register base) { 4208 if (UseCompressedOops) { 4209 z_llgf(dest, disp, base); 4210 oop_decoder(dest, dest, true); 4211 } else { 4212 z_lg(dest, disp, base); 4213 } 4214 } 4215 4216 // Load heap oop and decompress, if necessary. 4217 void MacroAssembler::load_heap_oop_not_null(Register dest, int64_t disp, Register base) { 4218 if (UseCompressedOops) { 4219 z_llgf(dest, disp, base); 4220 oop_decoder(dest, dest, false); 4221 } else { 4222 z_lg(dest, disp, base); 4223 } 4224 } 4225 4226 // Compress, if necessary, and store oop to heap. 4227 void MacroAssembler::store_heap_oop(Register Roop, RegisterOrConstant offset, Register base) { 4228 Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0; 4229 if (UseCompressedOops) { 4230 assert_different_registers(Roop, offset.register_or_noreg(), base); 4231 encode_heap_oop(Roop); 4232 z_st(Roop, offset.constant_or_zero(), Ridx, base); 4233 } else { 4234 z_stg(Roop, offset.constant_or_zero(), Ridx, base); 4235 } 4236 } 4237 4238 // Compress, if necessary, and store oop to heap. Oop is guaranteed to be not NULL. 4239 void MacroAssembler::store_heap_oop_not_null(Register Roop, RegisterOrConstant offset, Register base) { 4240 Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0; 4241 if (UseCompressedOops) { 4242 assert_different_registers(Roop, offset.register_or_noreg(), base); 4243 encode_heap_oop_not_null(Roop); 4244 z_st(Roop, offset.constant_or_zero(), Ridx, base); 4245 } else { 4246 z_stg(Roop, offset.constant_or_zero(), Ridx, base); 4247 } 4248 } 4249 4250 // Store NULL oop to heap. 4251 void MacroAssembler::store_heap_oop_null(Register zero, RegisterOrConstant offset, Register base) { 4252 Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0; 4253 if (UseCompressedOops) { 4254 z_st(zero, offset.constant_or_zero(), Ridx, base); 4255 } else { 4256 z_stg(zero, offset.constant_or_zero(), Ridx, base); 4257 } 4258 } 4259 4260 //------------------------------------------------- 4261 // Encode compressed oop. Generally usable encoder. 4262 //------------------------------------------------- 4263 // Rsrc - contains regular oop on entry. It remains unchanged. 4264 // Rdst - contains compressed oop on exit. 4265 // Rdst and Rsrc may indicate same register, in which case Rsrc does not remain unchanged. 4266 // 4267 // Rdst must not indicate scratch register Z_R1 (Z_R1_scratch) for functionality. 4268 // Rdst should not indicate scratch register Z_R0 (Z_R0_scratch) for performance. 4269 // 4270 // only32bitValid is set, if later code only uses the lower 32 bits. In this 4271 // case we must not fix the upper 32 bits. 4272 void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL, 4273 Register Rbase, int pow2_offset, bool only32bitValid) { 4274 4275 const address oop_base = Universe::narrow_oop_base(); 4276 const int oop_shift = Universe::narrow_oop_shift(); 4277 const bool disjoint = Universe::narrow_oop_base_disjoint(); 4278 4279 assert(UseCompressedOops, "must be on to call this method"); 4280 assert(Universe::heap() != NULL, "java heap must be initialized to call this encoder"); 4281 assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift"); 4282 4283 if (disjoint || (oop_base == NULL)) { 4284 BLOCK_COMMENT("cOop encoder zeroBase {"); 4285 if (oop_shift == 0) { 4286 if (oop_base != NULL && !only32bitValid) { 4287 z_llgfr(Rdst, Rsrc); // Clear upper bits in case the register will be decoded again. 4288 } else { 4289 lgr_if_needed(Rdst, Rsrc); 4290 } 4291 } else { 4292 z_srlg(Rdst, Rsrc, oop_shift); 4293 if (oop_base != NULL && !only32bitValid) { 4294 z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again. 4295 } 4296 } 4297 BLOCK_COMMENT("} cOop encoder zeroBase"); 4298 return; 4299 } 4300 4301 bool used_R0 = false; 4302 bool used_R1 = false; 4303 4304 BLOCK_COMMENT("cOop encoder general {"); 4305 assert_different_registers(Rdst, Z_R1); 4306 assert_different_registers(Rsrc, Rbase); 4307 if (maybeNULL) { 4308 Label done; 4309 // We reorder shifting and subtracting, so that we can compare 4310 // and shift in parallel: 4311 // 4312 // cycle 0: potential LoadN, base = <const> 4313 // cycle 1: base = !base dst = src >> 3, cmp cr = (src != 0) 4314 // cycle 2: if (cr) br, dst = dst + base + offset 4315 4316 // Get oop_base components. 4317 if (pow2_offset == -1) { 4318 if (Rdst == Rbase) { 4319 if (Rdst == Z_R1 || Rsrc == Z_R1) { 4320 Rbase = Z_R0; 4321 used_R0 = true; 4322 } else { 4323 Rdst = Z_R1; 4324 used_R1 = true; 4325 } 4326 } 4327 if (Rbase == Z_R1) { 4328 used_R1 = true; 4329 } 4330 pow2_offset = get_oop_base_complement(Rbase, ((uint64_t)(intptr_t)oop_base) >> oop_shift); 4331 } 4332 assert_different_registers(Rdst, Rbase); 4333 4334 // Check for NULL oop (must be left alone) and shift. 4335 if (oop_shift != 0) { // Shift out alignment bits 4336 if (((intptr_t)oop_base&0xc000000000000000L) == 0L) { // We are sure: no single address will have the leftmost bit set. 4337 z_srag(Rdst, Rsrc, oop_shift); // Arithmetic shift sets the condition code. 4338 } else { 4339 z_srlg(Rdst, Rsrc, oop_shift); 4340 z_ltgr(Rsrc, Rsrc); // This is the recommended way of testing for zero. 4341 // This probably is faster, as it does not write a register. No! 4342 // z_cghi(Rsrc, 0); 4343 } 4344 } else { 4345 z_ltgr(Rdst, Rsrc); // Move NULL to result register. 4346 } 4347 z_bre(done); 4348 4349 // Subtract oop_base components. 4350 if ((Rdst == Z_R0) || (Rbase == Z_R0)) { 4351 z_algr(Rdst, Rbase); 4352 if (pow2_offset != 0) { add2reg(Rdst, pow2_offset); } 4353 } else { 4354 add2reg_with_index(Rdst, pow2_offset, Rbase, Rdst); 4355 } 4356 if (!only32bitValid) { 4357 z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again. 4358 } 4359 bind(done); 4360 4361 } else { // not null 4362 // Get oop_base components. 4363 if (pow2_offset == -1) { 4364 pow2_offset = get_oop_base_complement(Rbase, (uint64_t)(intptr_t)oop_base); 4365 } 4366 4367 // Subtract oop_base components and shift. 4368 if (Rdst == Z_R0 || Rsrc == Z_R0 || Rbase == Z_R0) { 4369 // Don't use lay instruction. 4370 if (Rdst == Rsrc) { 4371 z_algr(Rdst, Rbase); 4372 } else { 4373 lgr_if_needed(Rdst, Rbase); 4374 z_algr(Rdst, Rsrc); 4375 } 4376 if (pow2_offset != 0) add2reg(Rdst, pow2_offset); 4377 } else { 4378 add2reg_with_index(Rdst, pow2_offset, Rbase, Rsrc); 4379 } 4380 if (oop_shift != 0) { // Shift out alignment bits. 4381 z_srlg(Rdst, Rdst, oop_shift); 4382 } 4383 if (!only32bitValid) { 4384 z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again. 4385 } 4386 } 4387 #ifdef ASSERT 4388 if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb01bUL, 2); } 4389 if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb02bUL, 2); } 4390 #endif 4391 BLOCK_COMMENT("} cOop encoder general"); 4392 } 4393 4394 //------------------------------------------------- 4395 // decode compressed oop. Generally usable decoder. 4396 //------------------------------------------------- 4397 // Rsrc - contains compressed oop on entry. 4398 // Rdst - contains regular oop on exit. 4399 // Rdst and Rsrc may indicate same register. 4400 // Rdst must not be the same register as Rbase, if Rbase was preloaded (before call). 4401 // Rdst can be the same register as Rbase. Then, either Z_R0 or Z_R1 must be available as scratch. 4402 // Rbase - register to use for the base 4403 // pow2_offset - offset of base to nice value. If -1, base must be loaded. 4404 // For performance, it is good to 4405 // - avoid Z_R0 for any of the argument registers. 4406 // - keep Rdst and Rsrc distinct from Rbase. Rdst == Rsrc is ok for performance. 4407 // - avoid Z_R1 for Rdst if Rdst == Rbase. 4408 void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, Register Rbase, int pow2_offset) { 4409 4410 const address oop_base = Universe::narrow_oop_base(); 4411 const int oop_shift = Universe::narrow_oop_shift(); 4412 const bool disjoint = Universe::narrow_oop_base_disjoint(); 4413 4414 assert(UseCompressedOops, "must be on to call this method"); 4415 assert(Universe::heap() != NULL, "java heap must be initialized to call this decoder"); 4416 assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), 4417 "cOop encoder detected bad shift"); 4418 4419 // cOops are always loaded zero-extended from memory. No explicit zero-extension necessary. 4420 4421 if (oop_base != NULL) { 4422 unsigned int oop_base_hl = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xffff; 4423 unsigned int oop_base_hh = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 48)) & 0xffff; 4424 unsigned int oop_base_hf = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xFFFFffff; 4425 if (disjoint && (oop_base_hl == 0 || oop_base_hh == 0)) { 4426 BLOCK_COMMENT("cOop decoder disjointBase {"); 4427 // We do not need to load the base. Instead, we can install the upper bits 4428 // with an OR instead of an ADD. 4429 Label done; 4430 4431 // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set. 4432 if (maybeNULL) { // NULL ptr must be preserved! 4433 z_slag(Rdst, Rsrc, oop_shift); // Arithmetic shift sets the condition code. 4434 z_bre(done); 4435 } else { 4436 z_sllg(Rdst, Rsrc, oop_shift); // Logical shift leaves condition code alone. 4437 } 4438 if ((oop_base_hl != 0) && (oop_base_hh != 0)) { 4439 z_oihf(Rdst, oop_base_hf); 4440 } else if (oop_base_hl != 0) { 4441 z_oihl(Rdst, oop_base_hl); 4442 } else { 4443 assert(oop_base_hh != 0, "not heapbased mode"); 4444 z_oihh(Rdst, oop_base_hh); 4445 } 4446 bind(done); 4447 BLOCK_COMMENT("} cOop decoder disjointBase"); 4448 } else { 4449 BLOCK_COMMENT("cOop decoder general {"); 4450 // There are three decode steps: 4451 // scale oop offset (shift left) 4452 // get base (in reg) and pow2_offset (constant) 4453 // add base, pow2_offset, and oop offset 4454 // The following register overlap situations may exist: 4455 // Rdst == Rsrc, Rbase any other 4456 // not a problem. Scaling in-place leaves Rbase undisturbed. 4457 // Loading Rbase does not impact the scaled offset. 4458 // Rdst == Rbase, Rsrc any other 4459 // scaling would destroy a possibly preloaded Rbase. Loading Rbase 4460 // would destroy the scaled offset. 4461 // Remedy: use Rdst_tmp if Rbase has been preloaded. 4462 // use Rbase_tmp if base has to be loaded. 4463 // Rsrc == Rbase, Rdst any other 4464 // Only possible without preloaded Rbase. 4465 // Loading Rbase does not destroy compressed oop because it was scaled into Rdst before. 4466 // Rsrc == Rbase, Rdst == Rbase 4467 // Only possible without preloaded Rbase. 4468 // Loading Rbase would destroy compressed oop. Scaling in-place is ok. 4469 // Remedy: use Rbase_tmp. 4470 // 4471 Label done; 4472 Register Rdst_tmp = Rdst; 4473 Register Rbase_tmp = Rbase; 4474 bool used_R0 = false; 4475 bool used_R1 = false; 4476 bool base_preloaded = pow2_offset >= 0; 4477 guarantee(!(base_preloaded && (Rsrc == Rbase)), "Register clash, check caller"); 4478 assert(oop_shift != 0, "room for optimization"); 4479 4480 // Check if we need to use scratch registers. 4481 if (Rdst == Rbase) { 4482 assert(!(((Rdst == Z_R0) && (Rsrc == Z_R1)) || ((Rdst == Z_R1) && (Rsrc == Z_R0))), "need a scratch reg"); 4483 if (Rdst != Rsrc) { 4484 if (base_preloaded) { Rdst_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; } 4485 else { Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; } 4486 } else { 4487 Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; 4488 } 4489 } 4490 if (base_preloaded) lgr_if_needed(Rbase_tmp, Rbase); 4491 4492 // Scale oop and check for NULL. 4493 // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set. 4494 if (maybeNULL) { // NULL ptr must be preserved! 4495 z_slag(Rdst_tmp, Rsrc, oop_shift); // Arithmetic shift sets the condition code. 4496 z_bre(done); 4497 } else { 4498 z_sllg(Rdst_tmp, Rsrc, oop_shift); // Logical shift leaves condition code alone. 4499 } 4500 4501 // Get oop_base components. 4502 if (!base_preloaded) { 4503 pow2_offset = get_oop_base(Rbase_tmp, (uint64_t)(intptr_t)oop_base); 4504 } 4505 4506 // Add up all components. 4507 if ((Rbase_tmp == Z_R0) || (Rdst_tmp == Z_R0)) { 4508 z_algr(Rdst_tmp, Rbase_tmp); 4509 if (pow2_offset != 0) { add2reg(Rdst_tmp, pow2_offset); } 4510 } else { 4511 add2reg_with_index(Rdst_tmp, pow2_offset, Rbase_tmp, Rdst_tmp); 4512 } 4513 4514 bind(done); 4515 lgr_if_needed(Rdst, Rdst_tmp); 4516 #ifdef ASSERT 4517 if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb03bUL, 2); } 4518 if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb04bUL, 2); } 4519 #endif 4520 BLOCK_COMMENT("} cOop decoder general"); 4521 } 4522 } else { 4523 BLOCK_COMMENT("cOop decoder zeroBase {"); 4524 if (oop_shift == 0) { 4525 lgr_if_needed(Rdst, Rsrc); 4526 } else { 4527 z_sllg(Rdst, Rsrc, oop_shift); 4528 } 4529 BLOCK_COMMENT("} cOop decoder zeroBase"); 4530 } 4531 } 4532 4533 void MacroAssembler::load_mirror(Register mirror, Register method) { 4534 mem2reg_opt(mirror, Address(method, Method::const_offset())); 4535 mem2reg_opt(mirror, Address(mirror, ConstMethod::constants_offset())); 4536 mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes())); 4537 mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset())); 4538 } 4539 4540 //--------------------------------------------------------------- 4541 //--- Operations on arrays. 4542 //--------------------------------------------------------------- 4543 4544 // Compiler ensures base is doubleword aligned and cnt is #doublewords. 4545 // Emitter does not KILL cnt and base arguments, since they need to be copied to 4546 // work registers anyway. 4547 // Actually, only r0, r1, and r5 are killed. 4548 unsigned int MacroAssembler::Clear_Array(Register cnt_arg, Register base_pointer_arg, Register src_addr, Register src_len) { 4549 // Src_addr is evenReg. 4550 // Src_len is odd_Reg. 4551 4552 int block_start = offset(); 4553 Register tmp_reg = src_len; // Holds target instr addr for EX. 4554 Register dst_len = Z_R1; // Holds dst len for MVCLE. 4555 Register dst_addr = Z_R0; // Holds dst addr for MVCLE. 4556 4557 Label doXC, doMVCLE, done; 4558 4559 BLOCK_COMMENT("Clear_Array {"); 4560 4561 // Check for zero len and convert to long. 4562 z_ltgfr(src_len, cnt_arg); // Remember casted value for doSTG case. 4563 z_bre(done); // Nothing to do if len == 0. 4564 4565 // Prefetch data to be cleared. 4566 if (VM_Version::has_Prefetch()) { 4567 z_pfd(0x02, 0, Z_R0, base_pointer_arg); 4568 z_pfd(0x02, 256, Z_R0, base_pointer_arg); 4569 } 4570 4571 z_sllg(dst_len, src_len, 3); // #bytes to clear. 4572 z_cghi(src_len, 32); // Check for len <= 256 bytes (<=32 DW). 4573 z_brnh(doXC); // If so, use executed XC to clear. 4574 4575 // MVCLE: initialize long arrays (general case). 4576 bind(doMVCLE); 4577 z_lgr(dst_addr, base_pointer_arg); 4578 clear_reg(src_len, true, false); // Src len of MVCLE is zero. 4579 4580 MacroAssembler::move_long_ext(dst_addr, src_addr, 0); 4581 z_bru(done); 4582 4583 // XC: initialize short arrays. 4584 Label XC_template; // Instr template, never exec directly! 4585 bind(XC_template); 4586 z_xc(0,0,base_pointer_arg,0,base_pointer_arg); 4587 4588 bind(doXC); 4589 add2reg(dst_len, -1); // Get #bytes-1 for EXECUTE. 4590 if (VM_Version::has_ExecuteExtensions()) { 4591 z_exrl(dst_len, XC_template); // Execute XC with var. len. 4592 } else { 4593 z_larl(tmp_reg, XC_template); 4594 z_ex(dst_len,0,Z_R0,tmp_reg); // Execute XC with var. len. 4595 } 4596 // z_bru(done); // fallthru 4597 4598 bind(done); 4599 4600 BLOCK_COMMENT("} Clear_Array"); 4601 4602 int block_end = offset(); 4603 return block_end - block_start; 4604 } 4605 4606 // Compiler ensures base is doubleword aligned and cnt is count of doublewords. 4607 // Emitter does not KILL any arguments nor work registers. 4608 // Emitter generates up to 16 XC instructions, depending on the array length. 4609 unsigned int MacroAssembler::Clear_Array_Const(long cnt, Register base) { 4610 int block_start = offset(); 4611 int off; 4612 int lineSize_Bytes = AllocatePrefetchStepSize; 4613 int lineSize_DW = AllocatePrefetchStepSize>>LogBytesPerWord; 4614 bool doPrefetch = VM_Version::has_Prefetch(); 4615 int XC_maxlen = 256; 4616 int numXCInstr = cnt > 0 ? (cnt*BytesPerWord-1)/XC_maxlen+1 : 0; 4617 4618 BLOCK_COMMENT("Clear_Array_Const {"); 4619 assert(cnt*BytesPerWord <= 4096, "ClearArrayConst can handle 4k only"); 4620 4621 // Do less prefetching for very short arrays. 4622 if (numXCInstr > 0) { 4623 // Prefetch only some cache lines, then begin clearing. 4624 if (doPrefetch) { 4625 if (cnt*BytesPerWord <= lineSize_Bytes/4) { // If less than 1/4 of a cache line to clear, 4626 z_pfd(0x02, 0, Z_R0, base); // prefetch just the first cache line. 4627 } else { 4628 assert(XC_maxlen == lineSize_Bytes, "ClearArrayConst needs 256B cache lines"); 4629 for (off = 0; (off < AllocatePrefetchLines) && (off <= numXCInstr); off ++) { 4630 z_pfd(0x02, off*lineSize_Bytes, Z_R0, base); 4631 } 4632 } 4633 } 4634 4635 for (off=0; off<(numXCInstr-1); off++) { 4636 z_xc(off*XC_maxlen, XC_maxlen-1, base, off*XC_maxlen, base); 4637 4638 // Prefetch some cache lines in advance. 4639 if (doPrefetch && (off <= numXCInstr-AllocatePrefetchLines)) { 4640 z_pfd(0x02, (off+AllocatePrefetchLines)*lineSize_Bytes, Z_R0, base); 4641 } 4642 } 4643 if (off*XC_maxlen < cnt*BytesPerWord) { 4644 z_xc(off*XC_maxlen, (cnt*BytesPerWord-off*XC_maxlen)-1, base, off*XC_maxlen, base); 4645 } 4646 } 4647 BLOCK_COMMENT("} Clear_Array_Const"); 4648 4649 int block_end = offset(); 4650 return block_end - block_start; 4651 } 4652 4653 // Compiler ensures base is doubleword aligned and cnt is #doublewords. 4654 // Emitter does not KILL cnt and base arguments, since they need to be copied to 4655 // work registers anyway. 4656 // Actually, only r0, r1, r4, and r5 (which are work registers) are killed. 4657 // 4658 // For very large arrays, exploit MVCLE H/W support. 4659 // MVCLE instruction automatically exploits H/W-optimized page mover. 4660 // - Bytes up to next page boundary are cleared with a series of XC to self. 4661 // - All full pages are cleared with the page mover H/W assist. 4662 // - Remaining bytes are again cleared by a series of XC to self. 4663 // 4664 unsigned int MacroAssembler::Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register src_addr, Register src_len) { 4665 // Src_addr is evenReg. 4666 // Src_len is odd_Reg. 4667 4668 int block_start = offset(); 4669 Register dst_len = Z_R1; // Holds dst len for MVCLE. 4670 Register dst_addr = Z_R0; // Holds dst addr for MVCLE. 4671 4672 BLOCK_COMMENT("Clear_Array_Const_Big {"); 4673 4674 // Get len to clear. 4675 load_const_optimized(dst_len, (long)cnt*8L); // in Bytes = #DW*8 4676 4677 // Prepare other args to MVCLE. 4678 z_lgr(dst_addr, base_pointer_arg); 4679 // Indicate unused result. 4680 (void) clear_reg(src_len, true, false); // Src len of MVCLE is zero. 4681 4682 // Clear. 4683 MacroAssembler::move_long_ext(dst_addr, src_addr, 0); 4684 BLOCK_COMMENT("} Clear_Array_Const_Big"); 4685 4686 int block_end = offset(); 4687 return block_end - block_start; 4688 } 4689 4690 // Allocator. 4691 unsigned int MacroAssembler::CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg, 4692 Register cnt_reg, 4693 Register tmp1_reg, Register tmp2_reg) { 4694 // Tmp1 is oddReg. 4695 // Tmp2 is evenReg. 4696 4697 int block_start = offset(); 4698 Label doMVC, doMVCLE, done, MVC_template; 4699 4700 BLOCK_COMMENT("CopyRawMemory_AlignedDisjoint {"); 4701 4702 // Check for zero len and convert to long. 4703 z_ltgfr(cnt_reg, cnt_reg); // Remember casted value for doSTG case. 4704 z_bre(done); // Nothing to do if len == 0. 4705 4706 z_sllg(Z_R1, cnt_reg, 3); // Dst len in bytes. calc early to have the result ready. 4707 4708 z_cghi(cnt_reg, 32); // Check for len <= 256 bytes (<=32 DW). 4709 z_brnh(doMVC); // If so, use executed MVC to clear. 4710 4711 bind(doMVCLE); // A lot of data (more than 256 bytes). 4712 // Prep dest reg pair. 4713 z_lgr(Z_R0, dst_reg); // dst addr 4714 // Dst len already in Z_R1. 4715 // Prep src reg pair. 4716 z_lgr(tmp2_reg, src_reg); // src addr 4717 z_lgr(tmp1_reg, Z_R1); // Src len same as dst len. 4718 4719 // Do the copy. 4720 move_long_ext(Z_R0, tmp2_reg, 0xb0); // Bypass cache. 4721 z_bru(done); // All done. 4722 4723 bind(MVC_template); // Just some data (not more than 256 bytes). 4724 z_mvc(0, 0, dst_reg, 0, src_reg); 4725 4726 bind(doMVC); 4727 4728 if (VM_Version::has_ExecuteExtensions()) { 4729 add2reg(Z_R1, -1); 4730 } else { 4731 add2reg(tmp1_reg, -1, Z_R1); 4732 z_larl(Z_R1, MVC_template); 4733 } 4734 4735 if (VM_Version::has_Prefetch()) { 4736 z_pfd(1, 0,Z_R0,src_reg); 4737 z_pfd(2, 0,Z_R0,dst_reg); 4738 // z_pfd(1,256,Z_R0,src_reg); // Assume very short copy. 4739 // z_pfd(2,256,Z_R0,dst_reg); 4740 } 4741 4742 if (VM_Version::has_ExecuteExtensions()) { 4743 z_exrl(Z_R1, MVC_template); 4744 } else { 4745 z_ex(tmp1_reg, 0, Z_R0, Z_R1); 4746 } 4747 4748 bind(done); 4749 4750 BLOCK_COMMENT("} CopyRawMemory_AlignedDisjoint"); 4751 4752 int block_end = offset(); 4753 return block_end - block_start; 4754 } 4755 4756 //------------------------------------------------------ 4757 // Special String Intrinsics. Implementation 4758 //------------------------------------------------------ 4759 4760 // Intrinsics for CompactStrings 4761 4762 // Compress char[] to byte[]. odd_reg contains cnt. Kills dst. Early clobber: result 4763 // The result is the number of characters copied before the first incompatible character was found. 4764 // If tmp2 is provided and the compression fails, the compression stops exactly at this point and the result is precise. 4765 // 4766 // Note: Does not behave exactly like package private StringUTF16 compress java implementation in case of failure: 4767 // - Different number of characters may have been written to dead array (if tmp2 not provided). 4768 // - Returns a number <cnt instead of 0. (Result gets compared with cnt.) 4769 unsigned int MacroAssembler::string_compress(Register result, Register src, Register dst, Register odd_reg, 4770 Register even_reg, Register tmp, Register tmp2) { 4771 int block_start = offset(); 4772 Label Lloop1, Lloop2, Lslow, Ldone; 4773 const Register addr2 = dst, ind1 = result, mask = tmp; 4774 const bool precise = (tmp2 != noreg); 4775 4776 BLOCK_COMMENT("string_compress {"); 4777 4778 z_sll(odd_reg, 1); // Number of bytes to read. (Must be a positive simm32.) 4779 clear_reg(ind1); // Index to read. 4780 z_llilf(mask, 0xFF00FF00); 4781 z_ahi(odd_reg, -16); // Last possible index for fast loop. 4782 z_brl(Lslow); 4783 4784 // ind1: index, even_reg: index increment, odd_reg: index limit 4785 z_iihf(mask, 0xFF00FF00); 4786 z_lhi(even_reg, 16); 4787 4788 bind(Lloop1); // 8 Characters per iteration. 4789 z_lg(Z_R0, Address(src, ind1)); 4790 z_lg(Z_R1, Address(src, ind1, 8)); 4791 if (precise) { 4792 if (VM_Version::has_DistinctOpnds()) { 4793 z_ogrk(tmp2, Z_R0, Z_R1); 4794 } else { 4795 z_lgr(tmp2, Z_R0); 4796 z_ogr(tmp2, Z_R1); 4797 } 4798 z_ngr(tmp2, mask); 4799 z_brne(Lslow); // Failed fast case, retry slowly. 4800 } 4801 z_stcmh(Z_R0, 5, 0, addr2); 4802 z_stcm(Z_R0, 5, 2, addr2); 4803 if (!precise) { z_ogr(Z_R0, Z_R1); } 4804 z_stcmh(Z_R1, 5, 4, addr2); 4805 z_stcm(Z_R1, 5, 6, addr2); 4806 if (!precise) { 4807 z_ngr(Z_R0, mask); 4808 z_brne(Ldone); // Failed (more than needed was written). 4809 } 4810 z_aghi(addr2, 8); 4811 z_brxle(ind1, even_reg, Lloop1); 4812 4813 bind(Lslow); 4814 // Compute index limit and skip if negative. 4815 z_ahi(odd_reg, 16-2); // Last possible index for slow loop. 4816 z_lhi(even_reg, 2); 4817 z_cr(ind1, odd_reg); 4818 z_brh(Ldone); 4819 4820 bind(Lloop2); // 1 Character per iteration. 4821 z_llh(Z_R0, Address(src, ind1)); 4822 z_tmll(Z_R0, 0xFF00); 4823 z_brnaz(Ldone); // Failed slow case: Return number of written characters. 4824 z_stc(Z_R0, Address(addr2)); 4825 z_aghi(addr2, 1); 4826 z_brxle(ind1, even_reg, Lloop2); 4827 4828 bind(Ldone); // result = ind1 = 2*cnt 4829 z_srl(ind1, 1); 4830 4831 BLOCK_COMMENT("} string_compress"); 4832 4833 return offset() - block_start; 4834 } 4835 4836 // Inflate byte[] to char[]. 4837 unsigned int MacroAssembler::string_inflate_trot(Register src, Register dst, Register cnt, Register tmp) { 4838 int block_start = offset(); 4839 4840 BLOCK_COMMENT("string_inflate {"); 4841 4842 Register stop_char = Z_R0; 4843 Register table = Z_R1; 4844 Register src_addr = tmp; 4845 4846 assert_different_registers(Z_R0, Z_R1, tmp, src, dst, cnt); 4847 assert(dst->encoding()%2 == 0, "must be even reg"); 4848 assert(cnt->encoding()%2 == 1, "must be odd reg"); 4849 assert(cnt->encoding() - dst->encoding() == 1, "must be even/odd pair"); 4850 4851 StubRoutines::zarch::generate_load_trot_table_addr(this, table); // kills Z_R0 (if ASSERT) 4852 clear_reg(stop_char); // Stop character. Not used here, but initialized to have a defined value. 4853 lgr_if_needed(src_addr, src); 4854 z_llgfr(cnt, cnt); // # src characters, must be a positive simm32. 4855 4856 translate_ot(dst, src_addr, /* mask = */ 0x0001); 4857 4858 BLOCK_COMMENT("} string_inflate"); 4859 4860 return offset() - block_start; 4861 } 4862 4863 // Inflate byte[] to char[]. odd_reg contains cnt. Kills src. 4864 unsigned int MacroAssembler::string_inflate(Register src, Register dst, Register odd_reg, 4865 Register even_reg, Register tmp) { 4866 int block_start = offset(); 4867 4868 BLOCK_COMMENT("string_inflate {"); 4869 4870 Label Lloop1, Lloop2, Lslow, Ldone; 4871 const Register addr1 = src, ind2 = tmp; 4872 4873 z_sll(odd_reg, 1); // Number of bytes to write. (Must be a positive simm32.) 4874 clear_reg(ind2); // Index to write. 4875 z_ahi(odd_reg, -16); // Last possible index for fast loop. 4876 z_brl(Lslow); 4877 4878 // ind2: index, even_reg: index increment, odd_reg: index limit 4879 clear_reg(Z_R0); 4880 clear_reg(Z_R1); 4881 z_lhi(even_reg, 16); 4882 4883 bind(Lloop1); // 8 Characters per iteration. 4884 z_icmh(Z_R0, 5, 0, addr1); 4885 z_icmh(Z_R1, 5, 4, addr1); 4886 z_icm(Z_R0, 5, 2, addr1); 4887 z_icm(Z_R1, 5, 6, addr1); 4888 z_aghi(addr1, 8); 4889 z_stg(Z_R0, Address(dst, ind2)); 4890 z_stg(Z_R1, Address(dst, ind2, 8)); 4891 z_brxle(ind2, even_reg, Lloop1); 4892 4893 bind(Lslow); 4894 // Compute index limit and skip if negative. 4895 z_ahi(odd_reg, 16-2); // Last possible index for slow loop. 4896 z_lhi(even_reg, 2); 4897 z_cr(ind2, odd_reg); 4898 z_brh(Ldone); 4899 4900 bind(Lloop2); // 1 Character per iteration. 4901 z_llc(Z_R0, Address(addr1)); 4902 z_sth(Z_R0, Address(dst, ind2)); 4903 z_aghi(addr1, 1); 4904 z_brxle(ind2, even_reg, Lloop2); 4905 4906 bind(Ldone); 4907 4908 BLOCK_COMMENT("} string_inflate"); 4909 4910 return offset() - block_start; 4911 } 4912 4913 // Kills src. 4914 unsigned int MacroAssembler::has_negatives(Register result, Register src, Register cnt, 4915 Register odd_reg, Register even_reg, Register tmp) { 4916 int block_start = offset(); 4917 Label Lloop1, Lloop2, Lslow, Lnotfound, Ldone; 4918 const Register addr = src, mask = tmp; 4919 4920 BLOCK_COMMENT("has_negatives {"); 4921 4922 z_llgfr(Z_R1, cnt); // Number of bytes to read. (Must be a positive simm32.) 4923 z_llilf(mask, 0x80808080); 4924 z_lhi(result, 1); // Assume true. 4925 // Last possible addr for fast loop. 4926 z_lay(odd_reg, -16, Z_R1, src); 4927 z_chi(cnt, 16); 4928 z_brl(Lslow); 4929 4930 // ind1: index, even_reg: index increment, odd_reg: index limit 4931 z_iihf(mask, 0x80808080); 4932 z_lghi(even_reg, 16); 4933 4934 bind(Lloop1); // 16 bytes per iteration. 4935 z_lg(Z_R0, Address(addr)); 4936 z_lg(Z_R1, Address(addr, 8)); 4937 z_ogr(Z_R0, Z_R1); 4938 z_ngr(Z_R0, mask); 4939 z_brne(Ldone); // If found return 1. 4940 z_brxlg(addr, even_reg, Lloop1); 4941 4942 bind(Lslow); 4943 z_aghi(odd_reg, 16-1); // Last possible addr for slow loop. 4944 z_lghi(even_reg, 1); 4945 z_cgr(addr, odd_reg); 4946 z_brh(Lnotfound); 4947 4948 bind(Lloop2); // 1 byte per iteration. 4949 z_cli(Address(addr), 0x80); 4950 z_brnl(Ldone); // If found return 1. 4951 z_brxlg(addr, even_reg, Lloop2); 4952 4953 bind(Lnotfound); 4954 z_lhi(result, 0); 4955 4956 bind(Ldone); 4957 4958 BLOCK_COMMENT("} has_negatives"); 4959 4960 return offset() - block_start; 4961 } 4962 4963 // kill: cnt1, cnt2, odd_reg, even_reg; early clobber: result 4964 unsigned int MacroAssembler::string_compare(Register str1, Register str2, 4965 Register cnt1, Register cnt2, 4966 Register odd_reg, Register even_reg, Register result, int ae) { 4967 int block_start = offset(); 4968 4969 assert_different_registers(str1, cnt1, cnt2, odd_reg, even_reg, result); 4970 assert_different_registers(str2, cnt1, cnt2, odd_reg, even_reg, result); 4971 4972 // If strings are equal up to min length, return the length difference. 4973 const Register diff = result, // Pre-set result with length difference. 4974 min = cnt1, // min number of bytes 4975 tmp = cnt2; 4976 4977 // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a) 4978 // we interchange str1 and str2 in the UL case and negate the result. 4979 // Like this, str1 is always latin1 encoded, except for the UU case. 4980 // In addition, we need 0 (or sign which is 0) extend when using 64 bit register. 4981 const bool used_as_LU = (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL); 4982 4983 BLOCK_COMMENT("string_compare {"); 4984 4985 if (used_as_LU) { 4986 z_srl(cnt2, 1); 4987 } 4988 4989 // See if the lengths are different, and calculate min in cnt1. 4990 // Save diff in case we need it for a tie-breaker. 4991 4992 // diff = cnt1 - cnt2 4993 if (VM_Version::has_DistinctOpnds()) { 4994 z_srk(diff, cnt1, cnt2); 4995 } else { 4996 z_lr(diff, cnt1); 4997 z_sr(diff, cnt2); 4998 } 4999 if (str1 != str2) { 5000 if (VM_Version::has_LoadStoreConditional()) { 5001 z_locr(min, cnt2, Assembler::bcondHigh); 5002 } else { 5003 Label Lskip; 5004 z_brl(Lskip); // min ok if cnt1 < cnt2 5005 z_lr(min, cnt2); // min = cnt2 5006 bind(Lskip); 5007 } 5008 } 5009 5010 if (ae == StrIntrinsicNode::UU) { 5011 z_sra(diff, 1); 5012 } 5013 if (str1 != str2) { 5014 Label Ldone; 5015 if (used_as_LU) { 5016 // Loop which searches the first difference character by character. 5017 Label Lloop; 5018 const Register ind1 = Z_R1, 5019 ind2 = min; 5020 int stride1 = 1, stride2 = 2; // See comment above. 5021 5022 // ind1: index, even_reg: index increment, odd_reg: index limit 5023 z_llilf(ind1, (unsigned int)(-stride1)); 5024 z_lhi(even_reg, stride1); 5025 add2reg(odd_reg, -stride1, min); 5026 clear_reg(ind2); // kills min 5027 5028 bind(Lloop); 5029 z_brxh(ind1, even_reg, Ldone); 5030 z_llc(tmp, Address(str1, ind1)); 5031 z_llh(Z_R0, Address(str2, ind2)); 5032 z_ahi(ind2, stride2); 5033 z_sr(tmp, Z_R0); 5034 z_bre(Lloop); 5035 5036 z_lr(result, tmp); 5037 5038 } else { 5039 // Use clcle in fast loop (only for same encoding). 5040 z_lgr(Z_R0, str1); 5041 z_lgr(even_reg, str2); 5042 z_llgfr(Z_R1, min); 5043 z_llgfr(odd_reg, min); 5044 5045 if (ae == StrIntrinsicNode::LL) { 5046 compare_long_ext(Z_R0, even_reg, 0); 5047 } else { 5048 compare_long_uni(Z_R0, even_reg, 0); 5049 } 5050 z_bre(Ldone); 5051 z_lgr(Z_R1, Z_R0); 5052 if (ae == StrIntrinsicNode::LL) { 5053 z_llc(Z_R0, Address(even_reg)); 5054 z_llc(result, Address(Z_R1)); 5055 } else { 5056 z_llh(Z_R0, Address(even_reg)); 5057 z_llh(result, Address(Z_R1)); 5058 } 5059 z_sr(result, Z_R0); 5060 } 5061 5062 // Otherwise, return the difference between the first mismatched chars. 5063 bind(Ldone); 5064 } 5065 5066 if (ae == StrIntrinsicNode::UL) { 5067 z_lcr(result, result); // Negate result (see note above). 5068 } 5069 5070 BLOCK_COMMENT("} string_compare"); 5071 5072 return offset() - block_start; 5073 } 5074 5075 unsigned int MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, Register limit, 5076 Register odd_reg, Register even_reg, Register result, bool is_byte) { 5077 int block_start = offset(); 5078 5079 BLOCK_COMMENT("array_equals {"); 5080 5081 assert_different_registers(ary1, limit, odd_reg, even_reg); 5082 assert_different_registers(ary2, limit, odd_reg, even_reg); 5083 5084 Label Ldone, Ldone_true, Ldone_false, Lclcle, CLC_template; 5085 int base_offset = 0; 5086 5087 if (ary1 != ary2) { 5088 if (is_array_equ) { 5089 base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR); 5090 5091 // Return true if the same array. 5092 compareU64_and_branch(ary1, ary2, Assembler::bcondEqual, Ldone_true); 5093 5094 // Return false if one of them is NULL. 5095 compareU64_and_branch(ary1, (intptr_t)0, Assembler::bcondEqual, Ldone_false); 5096 compareU64_and_branch(ary2, (intptr_t)0, Assembler::bcondEqual, Ldone_false); 5097 5098 // Load the lengths of arrays. 5099 z_llgf(odd_reg, Address(ary1, arrayOopDesc::length_offset_in_bytes())); 5100 5101 // Return false if the two arrays are not equal length. 5102 z_c(odd_reg, Address(ary2, arrayOopDesc::length_offset_in_bytes())); 5103 z_brne(Ldone_false); 5104 5105 // string len in bytes (right operand) 5106 if (!is_byte) { 5107 z_chi(odd_reg, 128); 5108 z_sll(odd_reg, 1); // preserves flags 5109 z_brh(Lclcle); 5110 } else { 5111 compareU32_and_branch(odd_reg, (intptr_t)256, Assembler::bcondHigh, Lclcle); 5112 } 5113 } else { 5114 z_llgfr(odd_reg, limit); // Need to zero-extend prior to using the value. 5115 compareU32_and_branch(limit, (intptr_t)256, Assembler::bcondHigh, Lclcle); 5116 } 5117 5118 5119 // Use clc instruction for up to 256 bytes. 5120 { 5121 Register str1_reg = ary1, 5122 str2_reg = ary2; 5123 if (is_array_equ) { 5124 str1_reg = Z_R1; 5125 str2_reg = even_reg; 5126 add2reg(str1_reg, base_offset, ary1); // string addr (left operand) 5127 add2reg(str2_reg, base_offset, ary2); // string addr (right operand) 5128 } 5129 z_ahi(odd_reg, -1); // Clc uses decremented limit. Also compare result to 0. 5130 z_brl(Ldone_true); 5131 // Note: We could jump to the template if equal. 5132 5133 assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware"); 5134 z_exrl(odd_reg, CLC_template); 5135 z_bre(Ldone_true); 5136 // fall through 5137 5138 bind(Ldone_false); 5139 clear_reg(result); 5140 z_bru(Ldone); 5141 5142 bind(CLC_template); 5143 z_clc(0, 0, str1_reg, 0, str2_reg); 5144 } 5145 5146 // Use clcle instruction. 5147 { 5148 bind(Lclcle); 5149 add2reg(even_reg, base_offset, ary2); // string addr (right operand) 5150 add2reg(Z_R0, base_offset, ary1); // string addr (left operand) 5151 5152 z_lgr(Z_R1, odd_reg); // string len in bytes (left operand) 5153 if (is_byte) { 5154 compare_long_ext(Z_R0, even_reg, 0); 5155 } else { 5156 compare_long_uni(Z_R0, even_reg, 0); 5157 } 5158 z_lghi(result, 0); // Preserve flags. 5159 z_brne(Ldone); 5160 } 5161 } 5162 // fall through 5163 5164 bind(Ldone_true); 5165 z_lghi(result, 1); // All characters are equal. 5166 bind(Ldone); 5167 5168 BLOCK_COMMENT("} array_equals"); 5169 5170 return offset() - block_start; 5171 } 5172 5173 // kill: haycnt, needlecnt, odd_reg, even_reg; early clobber: result 5174 unsigned int MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt, 5175 Register needle, Register needlecnt, int needlecntval, 5176 Register odd_reg, Register even_reg, int ae) { 5177 int block_start = offset(); 5178 5179 // Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite! 5180 assert(ae != StrIntrinsicNode::LU, "Invalid encoding"); 5181 const int h_csize = (ae == StrIntrinsicNode::LL) ? 1 : 2; 5182 const int n_csize = (ae == StrIntrinsicNode::UU) ? 2 : 1; 5183 Label L_needle1, L_Found, L_NotFound; 5184 5185 BLOCK_COMMENT("string_indexof {"); 5186 5187 if (needle == haystack) { 5188 z_lhi(result, 0); 5189 } else { 5190 5191 // Load first character of needle (R0 used by search_string instructions). 5192 if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); } 5193 5194 // Compute last haystack addr to use if no match gets found. 5195 if (needlecnt != noreg) { // variable needlecnt 5196 z_ahi(needlecnt, -1); // Remaining characters after first one. 5197 z_sr(haycnt, needlecnt); // Compute index succeeding last element to compare. 5198 if (n_csize == 2) { z_sll(needlecnt, 1); } // In bytes. 5199 } else { // constant needlecnt 5200 assert((needlecntval & 0x7fff) == needlecntval, "must be positive simm16 immediate"); 5201 // Compute index succeeding last element to compare. 5202 if (needlecntval != 1) { z_ahi(haycnt, 1 - needlecntval); } 5203 } 5204 5205 z_llgfr(haycnt, haycnt); // Clear high half. 5206 z_lgr(result, haystack); // Final result will be computed from needle start pointer. 5207 if (h_csize == 2) { z_sll(haycnt, 1); } // Scale to number of bytes. 5208 z_agr(haycnt, haystack); // Point to address succeeding last element (haystack+scale*(haycnt-needlecnt+1)). 5209 5210 if (h_csize != n_csize) { 5211 assert(ae == StrIntrinsicNode::UL, "Invalid encoding"); 5212 5213 if (needlecnt != noreg || needlecntval != 1) { 5214 if (needlecnt != noreg) { 5215 compare32_and_branch(needlecnt, (intptr_t)0, Assembler::bcondEqual, L_needle1); 5216 } 5217 5218 // Main Loop: UL version (now we have at least 2 characters). 5219 Label L_OuterLoop, L_InnerLoop, L_Skip; 5220 bind(L_OuterLoop); // Search for 1st 2 characters. 5221 z_lgr(Z_R1, haycnt); 5222 MacroAssembler::search_string_uni(Z_R1, result); 5223 z_brc(Assembler::bcondNotFound, L_NotFound); 5224 z_lgr(result, Z_R1); 5225 5226 z_lghi(Z_R1, n_csize); 5227 z_lghi(even_reg, h_csize); 5228 bind(L_InnerLoop); 5229 z_llgc(odd_reg, Address(needle, Z_R1)); 5230 z_ch(odd_reg, Address(result, even_reg)); 5231 z_brne(L_Skip); 5232 if (needlecnt != noreg) { z_cr(Z_R1, needlecnt); } else { z_chi(Z_R1, needlecntval - 1); } 5233 z_brnl(L_Found); 5234 z_aghi(Z_R1, n_csize); 5235 z_aghi(even_reg, h_csize); 5236 z_bru(L_InnerLoop); 5237 5238 bind(L_Skip); 5239 z_aghi(result, h_csize); // This is the new address we want to use for comparing. 5240 z_bru(L_OuterLoop); 5241 } 5242 5243 } else { 5244 const intptr_t needle_bytes = (n_csize == 2) ? ((needlecntval - 1) << 1) : (needlecntval - 1); 5245 Label L_clcle; 5246 5247 if (needlecnt != noreg || (needlecntval != 1 && needle_bytes <= 256)) { 5248 if (needlecnt != noreg) { 5249 compare32_and_branch(needlecnt, 256, Assembler::bcondHigh, L_clcle); 5250 z_ahi(needlecnt, -1); // remaining bytes -1 (for CLC) 5251 z_brl(L_needle1); 5252 } 5253 5254 // Main Loop: clc version (now we have at least 2 characters). 5255 Label L_OuterLoop, CLC_template; 5256 bind(L_OuterLoop); // Search for 1st 2 characters. 5257 z_lgr(Z_R1, haycnt); 5258 if (h_csize == 1) { 5259 MacroAssembler::search_string(Z_R1, result); 5260 } else { 5261 MacroAssembler::search_string_uni(Z_R1, result); 5262 } 5263 z_brc(Assembler::bcondNotFound, L_NotFound); 5264 z_lgr(result, Z_R1); 5265 5266 if (needlecnt != noreg) { 5267 assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware"); 5268 z_exrl(needlecnt, CLC_template); 5269 } else { 5270 z_clc(h_csize, needle_bytes -1, Z_R1, n_csize, needle); 5271 } 5272 z_bre(L_Found); 5273 z_aghi(result, h_csize); // This is the new address we want to use for comparing. 5274 z_bru(L_OuterLoop); 5275 5276 if (needlecnt != noreg) { 5277 bind(CLC_template); 5278 z_clc(h_csize, 0, Z_R1, n_csize, needle); 5279 } 5280 } 5281 5282 if (needlecnt != noreg || needle_bytes > 256) { 5283 bind(L_clcle); 5284 5285 // Main Loop: clcle version (now we have at least 256 bytes). 5286 Label L_OuterLoop, CLC_template; 5287 bind(L_OuterLoop); // Search for 1st 2 characters. 5288 z_lgr(Z_R1, haycnt); 5289 if (h_csize == 1) { 5290 MacroAssembler::search_string(Z_R1, result); 5291 } else { 5292 MacroAssembler::search_string_uni(Z_R1, result); 5293 } 5294 z_brc(Assembler::bcondNotFound, L_NotFound); 5295 5296 add2reg(Z_R0, n_csize, needle); 5297 add2reg(even_reg, h_csize, Z_R1); 5298 z_lgr(result, Z_R1); 5299 if (needlecnt != noreg) { 5300 z_llgfr(Z_R1, needlecnt); // needle len in bytes (left operand) 5301 z_llgfr(odd_reg, needlecnt); 5302 } else { 5303 load_const_optimized(Z_R1, needle_bytes); 5304 if (Immediate::is_simm16(needle_bytes)) { z_lghi(odd_reg, needle_bytes); } else { z_lgr(odd_reg, Z_R1); } 5305 } 5306 if (h_csize == 1) { 5307 compare_long_ext(Z_R0, even_reg, 0); 5308 } else { 5309 compare_long_uni(Z_R0, even_reg, 0); 5310 } 5311 z_bre(L_Found); 5312 5313 if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); } // Reload. 5314 z_aghi(result, h_csize); // This is the new address we want to use for comparing. 5315 z_bru(L_OuterLoop); 5316 } 5317 } 5318 5319 if (needlecnt != noreg || needlecntval == 1) { 5320 bind(L_needle1); 5321 5322 // Single needle character version. 5323 if (h_csize == 1) { 5324 MacroAssembler::search_string(haycnt, result); 5325 } else { 5326 MacroAssembler::search_string_uni(haycnt, result); 5327 } 5328 z_lgr(result, haycnt); 5329 z_brc(Assembler::bcondFound, L_Found); 5330 } 5331 5332 bind(L_NotFound); 5333 add2reg(result, -1, haystack); // Return -1. 5334 5335 bind(L_Found); // Return index (or -1 in fallthrough case). 5336 z_sgr(result, haystack); 5337 if (h_csize == 2) { z_srag(result, result, exact_log2(sizeof(jchar))); } 5338 } 5339 BLOCK_COMMENT("} string_indexof"); 5340 5341 return offset() - block_start; 5342 } 5343 5344 // early clobber: result 5345 unsigned int MacroAssembler::string_indexof_char(Register result, Register haystack, Register haycnt, 5346 Register needle, jchar needleChar, Register odd_reg, Register even_reg, bool is_byte) { 5347 int block_start = offset(); 5348 5349 BLOCK_COMMENT("string_indexof_char {"); 5350 5351 if (needle == haystack) { 5352 z_lhi(result, 0); 5353 } else { 5354 5355 Label Ldone; 5356 5357 z_llgfr(odd_reg, haycnt); // Preset loop ctr/searchrange end. 5358 if (needle == noreg) { 5359 load_const_optimized(Z_R0, (unsigned long)needleChar); 5360 } else { 5361 if (is_byte) { 5362 z_llgcr(Z_R0, needle); // First (and only) needle char. 5363 } else { 5364 z_llghr(Z_R0, needle); // First (and only) needle char. 5365 } 5366 } 5367 5368 if (!is_byte) { 5369 z_agr(odd_reg, odd_reg); // Calc #bytes to be processed with SRSTU. 5370 } 5371 5372 z_lgr(even_reg, haystack); // haystack addr 5373 z_agr(odd_reg, haystack); // First char after range end. 5374 z_lghi(result, -1); 5375 5376 if (is_byte) { 5377 MacroAssembler::search_string(odd_reg, even_reg); 5378 } else { 5379 MacroAssembler::search_string_uni(odd_reg, even_reg); 5380 } 5381 z_brc(Assembler::bcondNotFound, Ldone); 5382 if (is_byte) { 5383 if (VM_Version::has_DistinctOpnds()) { 5384 z_sgrk(result, odd_reg, haystack); 5385 } else { 5386 z_sgr(odd_reg, haystack); 5387 z_lgr(result, odd_reg); 5388 } 5389 } else { 5390 z_slgr(odd_reg, haystack); 5391 z_srlg(result, odd_reg, exact_log2(sizeof(jchar))); 5392 } 5393 5394 bind(Ldone); 5395 } 5396 BLOCK_COMMENT("} string_indexof_char"); 5397 5398 return offset() - block_start; 5399 } 5400 5401 5402 //------------------------------------------------- 5403 // Constants (scalar and oop) in constant pool 5404 //------------------------------------------------- 5405 5406 // Add a non-relocated constant to the CP. 5407 int MacroAssembler::store_const_in_toc(AddressLiteral& val) { 5408 long value = val.value(); 5409 address tocPos = long_constant(value); 5410 5411 if (tocPos != NULL) { 5412 int tocOffset = (int)(tocPos - code()->consts()->start()); 5413 return tocOffset; 5414 } 5415 // Address_constant returned NULL, so no constant entry has been created. 5416 // In that case, we return a "fatal" offset, just in case that subsequently 5417 // generated access code is executed. 5418 return -1; 5419 } 5420 5421 // Returns the TOC offset where the address is stored. 5422 // Add a relocated constant to the CP. 5423 int MacroAssembler::store_oop_in_toc(AddressLiteral& oop) { 5424 // Use RelocationHolder::none for the constant pool entry. 5425 // Otherwise we will end up with a failing NativeCall::verify(x), 5426 // where x is the address of the constant pool entry. 5427 address tocPos = address_constant((address)oop.value(), RelocationHolder::none); 5428 5429 if (tocPos != NULL) { 5430 int tocOffset = (int)(tocPos - code()->consts()->start()); 5431 RelocationHolder rsp = oop.rspec(); 5432 Relocation *rel = rsp.reloc(); 5433 5434 // Store toc_offset in relocation, used by call_far_patchable. 5435 if ((relocInfo::relocType)rel->type() == relocInfo::runtime_call_w_cp_type) { 5436 ((runtime_call_w_cp_Relocation *)(rel))->set_constant_pool_offset(tocOffset); 5437 } 5438 // Relocate at the load's pc. 5439 relocate(rsp); 5440 5441 return tocOffset; 5442 } 5443 // Address_constant returned NULL, so no constant entry has been created 5444 // in that case, we return a "fatal" offset, just in case that subsequently 5445 // generated access code is executed. 5446 return -1; 5447 } 5448 5449 bool MacroAssembler::load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc) { 5450 int tocOffset = store_const_in_toc(a); 5451 if (tocOffset == -1) return false; 5452 address tocPos = tocOffset + code()->consts()->start(); 5453 assert((address)code()->consts()->start() != NULL, "Please add CP address"); 5454 5455 load_long_pcrelative(dst, tocPos); 5456 return true; 5457 } 5458 5459 bool MacroAssembler::load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc) { 5460 int tocOffset = store_oop_in_toc(a); 5461 if (tocOffset == -1) return false; 5462 address tocPos = tocOffset + code()->consts()->start(); 5463 assert((address)code()->consts()->start() != NULL, "Please add CP address"); 5464 5465 load_addr_pcrelative(dst, tocPos); 5466 return true; 5467 } 5468 5469 // If the instruction sequence at the given pc is a load_const_from_toc 5470 // sequence, return the value currently stored at the referenced position 5471 // in the TOC. 5472 intptr_t MacroAssembler::get_const_from_toc(address pc) { 5473 5474 assert(is_load_const_from_toc(pc), "must be load_const_from_pool"); 5475 5476 long offset = get_load_const_from_toc_offset(pc); 5477 address dataLoc = NULL; 5478 if (is_load_const_from_toc_pcrelative(pc)) { 5479 dataLoc = pc + offset; 5480 } else { 5481 CodeBlob* cb = CodeCache::find_blob_unsafe(pc); // Else we get assertion if nmethod is zombie. 5482 assert(cb && cb->is_nmethod(), "sanity"); 5483 nmethod* nm = (nmethod*)cb; 5484 dataLoc = nm->ctable_begin() + offset; 5485 } 5486 return *(intptr_t *)dataLoc; 5487 } 5488 5489 // If the instruction sequence at the given pc is a load_const_from_toc 5490 // sequence, copy the passed-in new_data value into the referenced 5491 // position in the TOC. 5492 void MacroAssembler::set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb) { 5493 assert(is_load_const_from_toc(pc), "must be load_const_from_pool"); 5494 5495 long offset = MacroAssembler::get_load_const_from_toc_offset(pc); 5496 address dataLoc = NULL; 5497 if (is_load_const_from_toc_pcrelative(pc)) { 5498 dataLoc = pc+offset; 5499 } else { 5500 nmethod* nm = CodeCache::find_nmethod(pc); 5501 assert((cb == NULL) || (nm == (nmethod*)cb), "instruction address should be in CodeBlob"); 5502 dataLoc = nm->ctable_begin() + offset; 5503 } 5504 if (*(unsigned long *)dataLoc != new_data) { // Prevent cache invalidation: update only if necessary. 5505 *(unsigned long *)dataLoc = new_data; 5506 } 5507 } 5508 5509 // Dynamic TOC. Getter must only be called if "a" is a load_const_from_toc 5510 // site. Verify by calling is_load_const_from_toc() before!! 5511 // Offset is +/- 2**32 -> use long. 5512 long MacroAssembler::get_load_const_from_toc_offset(address a) { 5513 assert(is_load_const_from_toc_pcrelative(a), "expected pc relative load"); 5514 // expected code sequence: 5515 // z_lgrl(t, simm32); len = 6 5516 unsigned long inst; 5517 unsigned int len = get_instruction(a, &inst); 5518 return get_pcrel_offset(inst); 5519 } 5520 5521 //********************************************************************************** 5522 // inspection of generated instruction sequences for a particular pattern 5523 //********************************************************************************** 5524 5525 bool MacroAssembler::is_load_const_from_toc_pcrelative(address a) { 5526 #ifdef ASSERT 5527 unsigned long inst; 5528 unsigned int len = get_instruction(a+2, &inst); 5529 if ((len == 6) && is_load_pcrelative_long(a) && is_call_pcrelative_long(inst)) { 5530 const int range = 128; 5531 Assembler::dump_code_range(tty, a, range, "instr(a) == z_lgrl && instr(a+2) == z_brasl"); 5532 VM_Version::z_SIGSEGV(); 5533 } 5534 #endif 5535 // expected code sequence: 5536 // z_lgrl(t, relAddr32); len = 6 5537 //TODO: verify accessed data is in CP, if possible. 5538 return is_load_pcrelative_long(a); // TODO: might be too general. Currently, only lgrl is used. 5539 } 5540 5541 bool MacroAssembler::is_load_const_from_toc_call(address a) { 5542 return is_load_const_from_toc(a) && is_call_byregister(a + load_const_from_toc_size()); 5543 } 5544 5545 bool MacroAssembler::is_load_const_call(address a) { 5546 return is_load_const(a) && is_call_byregister(a + load_const_size()); 5547 } 5548 5549 //------------------------------------------------- 5550 // Emitters for some really CICS instructions 5551 //------------------------------------------------- 5552 5553 void MacroAssembler::move_long_ext(Register dst, Register src, unsigned int pad) { 5554 assert(dst->encoding()%2==0, "must be an even/odd register pair"); 5555 assert(src->encoding()%2==0, "must be an even/odd register pair"); 5556 assert(pad<256, "must be a padding BYTE"); 5557 5558 Label retry; 5559 bind(retry); 5560 Assembler::z_mvcle(dst, src, pad); 5561 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5562 } 5563 5564 void MacroAssembler::compare_long_ext(Register left, Register right, unsigned int pad) { 5565 assert(left->encoding() % 2 == 0, "must be an even/odd register pair"); 5566 assert(right->encoding() % 2 == 0, "must be an even/odd register pair"); 5567 assert(pad<256, "must be a padding BYTE"); 5568 5569 Label retry; 5570 bind(retry); 5571 Assembler::z_clcle(left, right, pad, Z_R0); 5572 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5573 } 5574 5575 void MacroAssembler::compare_long_uni(Register left, Register right, unsigned int pad) { 5576 assert(left->encoding() % 2 == 0, "must be an even/odd register pair"); 5577 assert(right->encoding() % 2 == 0, "must be an even/odd register pair"); 5578 assert(pad<=0xfff, "must be a padding HALFWORD"); 5579 assert(VM_Version::has_ETF2(), "instruction must be available"); 5580 5581 Label retry; 5582 bind(retry); 5583 Assembler::z_clclu(left, right, pad, Z_R0); 5584 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5585 } 5586 5587 void MacroAssembler::search_string(Register end, Register start) { 5588 assert(end->encoding() != 0, "end address must not be in R0"); 5589 assert(start->encoding() != 0, "start address must not be in R0"); 5590 5591 Label retry; 5592 bind(retry); 5593 Assembler::z_srst(end, start); 5594 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5595 } 5596 5597 void MacroAssembler::search_string_uni(Register end, Register start) { 5598 assert(end->encoding() != 0, "end address must not be in R0"); 5599 assert(start->encoding() != 0, "start address must not be in R0"); 5600 assert(VM_Version::has_ETF3(), "instruction must be available"); 5601 5602 Label retry; 5603 bind(retry); 5604 Assembler::z_srstu(end, start); 5605 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5606 } 5607 5608 void MacroAssembler::kmac(Register srcBuff) { 5609 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 5610 assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair"); 5611 5612 Label retry; 5613 bind(retry); 5614 Assembler::z_kmac(Z_R0, srcBuff); 5615 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5616 } 5617 5618 void MacroAssembler::kimd(Register srcBuff) { 5619 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 5620 assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair"); 5621 5622 Label retry; 5623 bind(retry); 5624 Assembler::z_kimd(Z_R0, srcBuff); 5625 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5626 } 5627 5628 void MacroAssembler::klmd(Register srcBuff) { 5629 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 5630 assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair"); 5631 5632 Label retry; 5633 bind(retry); 5634 Assembler::z_klmd(Z_R0, srcBuff); 5635 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5636 } 5637 5638 void MacroAssembler::km(Register dstBuff, Register srcBuff) { 5639 // DstBuff and srcBuff are allowed to be the same register (encryption in-place). 5640 // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block. 5641 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 5642 assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register"); 5643 assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair"); 5644 5645 Label retry; 5646 bind(retry); 5647 Assembler::z_km(dstBuff, srcBuff); 5648 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5649 } 5650 5651 void MacroAssembler::kmc(Register dstBuff, Register srcBuff) { 5652 // DstBuff and srcBuff are allowed to be the same register (encryption in-place). 5653 // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block. 5654 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 5655 assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register"); 5656 assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair"); 5657 5658 Label retry; 5659 bind(retry); 5660 Assembler::z_kmc(dstBuff, srcBuff); 5661 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5662 } 5663 5664 void MacroAssembler::cksm(Register crcBuff, Register srcBuff) { 5665 assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair"); 5666 5667 Label retry; 5668 bind(retry); 5669 Assembler::z_cksm(crcBuff, srcBuff); 5670 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5671 } 5672 5673 void MacroAssembler::translate_oo(Register r1, Register r2, uint m3) { 5674 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); 5675 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); 5676 5677 Label retry; 5678 bind(retry); 5679 Assembler::z_troo(r1, r2, m3); 5680 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5681 } 5682 5683 void MacroAssembler::translate_ot(Register r1, Register r2, uint m3) { 5684 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); 5685 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); 5686 5687 Label retry; 5688 bind(retry); 5689 Assembler::z_trot(r1, r2, m3); 5690 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5691 } 5692 5693 void MacroAssembler::translate_to(Register r1, Register r2, uint m3) { 5694 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); 5695 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); 5696 5697 Label retry; 5698 bind(retry); 5699 Assembler::z_trto(r1, r2, m3); 5700 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5701 } 5702 5703 void MacroAssembler::translate_tt(Register r1, Register r2, uint m3) { 5704 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); 5705 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); 5706 5707 Label retry; 5708 bind(retry); 5709 Assembler::z_trtt(r1, r2, m3); 5710 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5711 } 5712 5713 void MacroAssembler::generate_safepoint_check(Label& slow_path, Register scratch, bool may_relocate) { 5714 if (scratch == noreg) scratch = Z_R1; 5715 address Astate = SafepointSynchronize::address_of_state(); 5716 BLOCK_COMMENT("safepoint check:"); 5717 5718 if (may_relocate) { 5719 ptrdiff_t total_distance = Astate - this->pc(); 5720 if (RelAddr::is_in_range_of_RelAddr32(total_distance)) { 5721 RelocationHolder rspec = external_word_Relocation::spec(Astate); 5722 (this)->relocate(rspec, relocInfo::pcrel_addr_format); 5723 load_absolute_address(scratch, Astate); 5724 } else { 5725 load_const_optimized(scratch, Astate); 5726 } 5727 } else { 5728 load_absolute_address(scratch, Astate); 5729 } 5730 z_cli(/*SafepointSynchronize::sz_state()*/4-1, scratch, SafepointSynchronize::_not_synchronized); 5731 z_brne(slow_path); 5732 } 5733 5734 5735 void MacroAssembler::generate_type_profiling(const Register Rdata, 5736 const Register Rreceiver_klass, 5737 const Register Rwanted_receiver_klass, 5738 const Register Rmatching_row, 5739 bool is_virtual_call) { 5740 const int row_size = in_bytes(ReceiverTypeData::receiver_offset(1)) - 5741 in_bytes(ReceiverTypeData::receiver_offset(0)); 5742 const int num_rows = ReceiverTypeData::row_limit(); 5743 NearLabel found_free_row; 5744 NearLabel do_increment; 5745 NearLabel found_no_slot; 5746 5747 BLOCK_COMMENT("type profiling {"); 5748 5749 // search for: 5750 // a) The type given in Rwanted_receiver_klass. 5751 // b) The *first* empty row. 5752 5753 // First search for a) only, just running over b) with no regard. 5754 // This is possible because 5755 // wanted_receiver_class == receiver_class && wanted_receiver_class == 0 5756 // is never true (receiver_class can't be zero). 5757 for (int row_num = 0; row_num < num_rows; row_num++) { 5758 // Row_offset should be a well-behaved positive number. The generated code relies 5759 // on that wrt constant code size. Add2reg can handle all row_offset values, but 5760 // will have to vary generated code size. 5761 int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num)); 5762 assert(Displacement::is_shortDisp(row_offset), "Limitation of generated code"); 5763 5764 // Is Rwanted_receiver_klass in this row? 5765 if (VM_Version::has_CompareBranch()) { 5766 z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata); 5767 // Rmatching_row = Rdata + row_offset; 5768 add2reg(Rmatching_row, row_offset, Rdata); 5769 // if (*row_recv == (intptr_t) receiver_klass) goto fill_existing_slot; 5770 compare64_and_branch(Rwanted_receiver_klass, Rreceiver_klass, Assembler::bcondEqual, do_increment); 5771 } else { 5772 add2reg(Rmatching_row, row_offset, Rdata); 5773 z_cg(Rreceiver_klass, row_offset, Z_R0, Rdata); 5774 z_bre(do_increment); 5775 } 5776 } 5777 5778 // Now that we did not find a match, let's search for b). 5779 5780 // We could save the first calculation of Rmatching_row if we woud search for a) in reverse order. 5781 // We would then end up here with Rmatching_row containing the value for row_num == 0. 5782 // We would not see much benefit, if any at all, because the CPU can schedule 5783 // two instructions together with a branch anyway. 5784 for (int row_num = 0; row_num < num_rows; row_num++) { 5785 int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num)); 5786 5787 // Has this row a zero receiver_klass, i.e. is it empty? 5788 if (VM_Version::has_CompareBranch()) { 5789 z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata); 5790 // Rmatching_row = Rdata + row_offset 5791 add2reg(Rmatching_row, row_offset, Rdata); 5792 // if (*row_recv == (intptr_t) 0) goto found_free_row 5793 compare64_and_branch(Rwanted_receiver_klass, (intptr_t)0, Assembler::bcondEqual, found_free_row); 5794 } else { 5795 add2reg(Rmatching_row, row_offset, Rdata); 5796 load_and_test_long(Rwanted_receiver_klass, Address(Rdata, row_offset)); 5797 z_bre(found_free_row); // zero -> Found a free row. 5798 } 5799 } 5800 5801 // No match, no empty row found. 5802 // Increment total counter to indicate polymorphic case. 5803 if (is_virtual_call) { 5804 add2mem_64(Address(Rdata, CounterData::count_offset()), 1, Rmatching_row); 5805 } 5806 z_bru(found_no_slot); 5807 5808 // Here we found an empty row, but we have not found Rwanted_receiver_klass. 5809 // Rmatching_row holds the address to the first empty row. 5810 bind(found_free_row); 5811 // Store receiver_klass into empty slot. 5812 z_stg(Rreceiver_klass, 0, Z_R0, Rmatching_row); 5813 5814 // Increment the counter of Rmatching_row. 5815 bind(do_increment); 5816 ByteSize counter_offset = ReceiverTypeData::receiver_count_offset(0) - ReceiverTypeData::receiver_offset(0); 5817 add2mem_64(Address(Rmatching_row, counter_offset), 1, Rdata); 5818 5819 bind(found_no_slot); 5820 5821 BLOCK_COMMENT("} type profiling"); 5822 } 5823 5824 //--------------------------------------- 5825 // Helpers for Intrinsic Emitters 5826 //--------------------------------------- 5827 5828 /** 5829 * uint32_t crc; 5830 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 5831 */ 5832 void MacroAssembler::fold_byte_crc32(Register crc, Register val, Register table, Register tmp) { 5833 assert_different_registers(crc, table, tmp); 5834 assert_different_registers(val, table); 5835 if (crc == val) { // Must rotate first to use the unmodified value. 5836 rotate_then_insert(tmp, val, 56-2, 63-2, 2, true); // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest. 5837 z_srl(crc, 8); // Unsigned shift, clear leftmost 8 bits. 5838 } else { 5839 z_srl(crc, 8); // Unsigned shift, clear leftmost 8 bits. 5840 rotate_then_insert(tmp, val, 56-2, 63-2, 2, true); // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest. 5841 } 5842 z_x(crc, Address(table, tmp, 0)); 5843 } 5844 5845 /** 5846 * uint32_t crc; 5847 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 5848 */ 5849 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 5850 fold_byte_crc32(crc, crc, table, tmp); 5851 } 5852 5853 /** 5854 * Emits code to update CRC-32 with a byte value according to constants in table. 5855 * 5856 * @param [in,out]crc Register containing the crc. 5857 * @param [in]val Register containing the byte to fold into the CRC. 5858 * @param [in]table Register containing the table of crc constants. 5859 * 5860 * uint32_t crc; 5861 * val = crc_table[(val ^ crc) & 0xFF]; 5862 * crc = val ^ (crc >> 8); 5863 */ 5864 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 5865 z_xr(val, crc); 5866 fold_byte_crc32(crc, val, table, val); 5867 } 5868 5869 5870 /** 5871 * @param crc register containing existing CRC (32-bit) 5872 * @param buf register pointing to input byte buffer (byte*) 5873 * @param len register containing number of bytes 5874 * @param table register pointing to CRC table 5875 */ 5876 void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, 5877 Register data, bool invertCRC) { 5878 assert_different_registers(crc, buf, len, table, data); 5879 5880 Label L_mainLoop, L_done; 5881 const int mainLoop_stepping = 1; 5882 5883 // Process all bytes in a single-byte loop. 5884 z_ltr(len, len); 5885 z_brnh(L_done); 5886 5887 if (invertCRC) { 5888 not_(crc, noreg, false); // ~c 5889 } 5890 5891 bind(L_mainLoop); 5892 z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register. 5893 add2reg(buf, mainLoop_stepping); // Advance buffer position. 5894 update_byte_crc32(crc, data, table); 5895 z_brct(len, L_mainLoop); // Iterate. 5896 5897 if (invertCRC) { 5898 not_(crc, noreg, false); // ~c 5899 } 5900 5901 bind(L_done); 5902 } 5903 5904 /** 5905 * Emits code to update CRC-32 with a 4-byte value according to constants in table. 5906 * Implementation according to jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.c. 5907 * 5908 */ 5909 void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc, 5910 Register t0, Register t1, Register t2, Register t3) { 5911 // This is what we implement (the DOBIG4 part): 5912 // 5913 // #define DOBIG4 c ^= *++buf4; \ 5914 // c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \ 5915 // crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24] 5916 // #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4 5917 const int ix0 = 4*(4*CRC32_COLUMN_SIZE); 5918 const int ix1 = 5*(4*CRC32_COLUMN_SIZE); 5919 const int ix2 = 6*(4*CRC32_COLUMN_SIZE); 5920 const int ix3 = 7*(4*CRC32_COLUMN_SIZE); 5921 5922 // XOR crc with next four bytes of buffer. 5923 lgr_if_needed(t0, crc); 5924 z_x(t0, Address(buf, bufDisp)); 5925 if (bufInc != 0) { 5926 add2reg(buf, bufInc); 5927 } 5928 5929 // Chop crc into 4 single-byte pieces, shifted left 2 bits, to form the table indices. 5930 rotate_then_insert(t3, t0, 56-2, 63-2, 2, true); // ((c >> 0) & 0xff) << 2 5931 rotate_then_insert(t2, t0, 56-2, 63-2, 2-8, true); // ((c >> 8) & 0xff) << 2 5932 rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true); // ((c >> 16) & 0xff) << 2 5933 rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true); // ((c >> 24) & 0xff) << 2 5934 5935 // Load pre-calculated table values. 5936 // Use columns 4..7 for big-endian. 5937 z_ly(t3, Address(table, t3, (intptr_t)ix0)); 5938 z_ly(t2, Address(table, t2, (intptr_t)ix1)); 5939 z_ly(t1, Address(table, t1, (intptr_t)ix2)); 5940 z_ly(t0, Address(table, t0, (intptr_t)ix3)); 5941 5942 // Calculate new crc from table values. 5943 z_xr(t2, t3); 5944 z_xr(t0, t1); 5945 z_xr(t0, t2); // Now crc contains the final checksum value. 5946 lgr_if_needed(crc, t0); 5947 } 5948 5949 /** 5950 * @param crc register containing existing CRC (32-bit) 5951 * @param buf register pointing to input byte buffer (byte*) 5952 * @param len register containing number of bytes 5953 * @param table register pointing to CRC table 5954 * 5955 * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller! 5956 */ 5957 void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table, 5958 Register t0, Register t1, Register t2, Register t3) { 5959 assert_different_registers(crc, buf, len, table); 5960 5961 Label L_mainLoop, L_tail; 5962 Register data = t0; 5963 Register ctr = Z_R0; 5964 const int mainLoop_stepping = 8; 5965 const int tailLoop_stepping = 1; 5966 const int log_stepping = exact_log2(mainLoop_stepping); 5967 5968 // Don't test for len <= 0 here. This pathological case should not occur anyway. 5969 // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles. 5970 // The situation itself is detected and handled correctly by the conditional branches 5971 // following aghi(len, -stepping) and aghi(len, +stepping). 5972 5973 not_(crc, noreg, false); // 1s complement of crc 5974 5975 #if 0 5976 { 5977 // Pre-mainLoop alignment did not show any positive effect on performance. 5978 // We leave the code in for reference. Maybe the vector instructions in z13 depend on alignment. 5979 5980 z_cghi(len, mainLoop_stepping); // Alignment is useless for short data streams. 5981 z_brnh(L_tail); 5982 5983 // Align buf to word (4-byte) boundary. 5984 z_lcr(ctr, buf); 5985 rotate_then_insert(ctr, ctr, 62, 63, 0, true); // TODO: should set cc 5986 z_sgfr(len, ctr); // Remaining len after alignment. 5987 5988 update_byteLoop_crc32(crc, buf, ctr, table, data, false); 5989 } 5990 #endif 5991 5992 // Check for short (<mainLoop_stepping bytes) buffer. 5993 z_srag(ctr, len, log_stepping); 5994 z_brnh(L_tail); 5995 5996 z_lrvr(crc, crc); // Revert byte order because we are dealing with big-endian data. 5997 rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop 5998 5999 BIND(L_mainLoop); 6000 update_1word_crc32(crc, buf, table, 0, 0, crc, t1, t2, t3); 6001 update_1word_crc32(crc, buf, table, 4, mainLoop_stepping, crc, t1, t2, t3); 6002 z_brct(ctr, L_mainLoop); // Iterate. 6003 6004 z_lrvr(crc, crc); // Revert byte order back to original. 6005 6006 // Process last few (<8) bytes of buffer. 6007 BIND(L_tail); 6008 update_byteLoop_crc32(crc, buf, len, table, data, false); 6009 6010 not_(crc, noreg, false); // 1s complement of crc 6011 } 6012 6013 /** 6014 * @param crc register containing existing CRC (32-bit) 6015 * @param buf register pointing to input byte buffer (byte*) 6016 * @param len register containing number of bytes 6017 * @param table register pointing to CRC table 6018 * 6019 * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller! 6020 */ 6021 void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table, 6022 Register t0, Register t1, Register t2, Register t3) { 6023 assert_different_registers(crc, buf, len, table); 6024 6025 Label L_mainLoop, L_tail; 6026 Register data = t0; 6027 Register ctr = Z_R0; 6028 const int mainLoop_stepping = 4; 6029 const int log_stepping = exact_log2(mainLoop_stepping); 6030 6031 // Don't test for len <= 0 here. This pathological case should not occur anyway. 6032 // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles. 6033 // The situation itself is detected and handled correctly by the conditional branches 6034 // following aghi(len, -stepping) and aghi(len, +stepping). 6035 6036 not_(crc, noreg, false); // 1s complement of crc 6037 6038 // Check for short (<4 bytes) buffer. 6039 z_srag(ctr, len, log_stepping); 6040 z_brnh(L_tail); 6041 6042 z_lrvr(crc, crc); // Revert byte order because we are dealing with big-endian data. 6043 rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop 6044 6045 BIND(L_mainLoop); 6046 update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3); 6047 z_brct(ctr, L_mainLoop); // Iterate. 6048 z_lrvr(crc, crc); // Revert byte order back to original. 6049 6050 // Process last few (<8) bytes of buffer. 6051 BIND(L_tail); 6052 update_byteLoop_crc32(crc, buf, len, table, data, false); 6053 6054 not_(crc, noreg, false); // 1s complement of crc 6055 } 6056 6057 /** 6058 * @param crc register containing existing CRC (32-bit) 6059 * @param buf register pointing to input byte buffer (byte*) 6060 * @param len register containing number of bytes 6061 * @param table register pointing to CRC table 6062 */ 6063 void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table, 6064 Register t0, Register t1, Register t2, Register t3) { 6065 assert_different_registers(crc, buf, len, table); 6066 Register data = t0; 6067 6068 update_byteLoop_crc32(crc, buf, len, table, data, true); 6069 } 6070 6071 void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp) { 6072 assert_different_registers(crc, buf, len, table, tmp); 6073 6074 not_(crc, noreg, false); // ~c 6075 6076 z_llgc(tmp, Address(buf, (intptr_t)0)); // Current byte of input buffer (zero extended). Avoids garbage in upper half of register. 6077 update_byte_crc32(crc, tmp, table); 6078 6079 not_(crc, noreg, false); // ~c 6080 } 6081 6082 // 6083 // Code for BigInteger::multiplyToLen() intrinsic. 6084 // 6085 6086 // dest_lo += src1 + src2 6087 // dest_hi += carry1 + carry2 6088 // Z_R7 is destroyed ! 6089 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, 6090 Register src1, Register src2) { 6091 clear_reg(Z_R7); 6092 z_algr(dest_lo, src1); 6093 z_alcgr(dest_hi, Z_R7); 6094 z_algr(dest_lo, src2); 6095 z_alcgr(dest_hi, Z_R7); 6096 } 6097 6098 // Multiply 64 bit by 64 bit first loop. 6099 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, 6100 Register x_xstart, 6101 Register y, Register y_idx, 6102 Register z, 6103 Register carry, 6104 Register product, 6105 Register idx, Register kdx) { 6106 // jlong carry, x[], y[], z[]; 6107 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) { 6108 // huge_128 product = y[idx] * x[xstart] + carry; 6109 // z[kdx] = (jlong)product; 6110 // carry = (jlong)(product >>> 64); 6111 // } 6112 // z[xstart] = carry; 6113 6114 Label L_first_loop, L_first_loop_exit; 6115 Label L_one_x, L_one_y, L_multiply; 6116 6117 z_aghi(xstart, -1); 6118 z_brl(L_one_x); // Special case: length of x is 1. 6119 6120 // Load next two integers of x. 6121 z_sllg(Z_R1_scratch, xstart, LogBytesPerInt); 6122 mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0)); 6123 6124 6125 bind(L_first_loop); 6126 6127 z_aghi(idx, -1); 6128 z_brl(L_first_loop_exit); 6129 z_aghi(idx, -1); 6130 z_brl(L_one_y); 6131 6132 // Load next two integers of y. 6133 z_sllg(Z_R1_scratch, idx, LogBytesPerInt); 6134 mem2reg_opt(y_idx, Address(y, Z_R1_scratch, 0)); 6135 6136 6137 bind(L_multiply); 6138 6139 Register multiplicand = product->successor(); 6140 Register product_low = multiplicand; 6141 6142 lgr_if_needed(multiplicand, x_xstart); 6143 z_mlgr(product, y_idx); // multiplicand * y_idx -> product::multiplicand 6144 clear_reg(Z_R7); 6145 z_algr(product_low, carry); // Add carry to result. 6146 z_alcgr(product, Z_R7); // Add carry of the last addition. 6147 add2reg(kdx, -2); 6148 6149 // Store result. 6150 z_sllg(Z_R7, kdx, LogBytesPerInt); 6151 reg2mem_opt(product_low, Address(z, Z_R7, 0)); 6152 lgr_if_needed(carry, product); 6153 z_bru(L_first_loop); 6154 6155 6156 bind(L_one_y); // Load one 32 bit portion of y as (0,value). 6157 6158 clear_reg(y_idx); 6159 mem2reg_opt(y_idx, Address(y, (intptr_t) 0), false); 6160 z_bru(L_multiply); 6161 6162 6163 bind(L_one_x); // Load one 32 bit portion of x as (0,value). 6164 6165 clear_reg(x_xstart); 6166 mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false); 6167 z_bru(L_first_loop); 6168 6169 bind(L_first_loop_exit); 6170 } 6171 6172 // Multiply 64 bit by 64 bit and add 128 bit. 6173 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, 6174 Register z, 6175 Register yz_idx, Register idx, 6176 Register carry, Register product, 6177 int offset) { 6178 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; 6179 // z[kdx] = (jlong)product; 6180 6181 Register multiplicand = product->successor(); 6182 Register product_low = multiplicand; 6183 6184 z_sllg(Z_R7, idx, LogBytesPerInt); 6185 mem2reg_opt(yz_idx, Address(y, Z_R7, offset)); 6186 6187 lgr_if_needed(multiplicand, x_xstart); 6188 z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand 6189 mem2reg_opt(yz_idx, Address(z, Z_R7, offset)); 6190 6191 add2_with_carry(product, product_low, carry, yz_idx); 6192 6193 z_sllg(Z_R7, idx, LogBytesPerInt); 6194 reg2mem_opt(product_low, Address(z, Z_R7, offset)); 6195 6196 } 6197 6198 // Multiply 128 bit by 128 bit. Unrolled inner loop. 6199 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, 6200 Register y, Register z, 6201 Register yz_idx, Register idx, 6202 Register jdx, 6203 Register carry, Register product, 6204 Register carry2) { 6205 // jlong carry, x[], y[], z[]; 6206 // int kdx = ystart+1; 6207 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 6208 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; 6209 // z[kdx+idx+1] = (jlong)product; 6210 // jlong carry2 = (jlong)(product >>> 64); 6211 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; 6212 // z[kdx+idx] = (jlong)product; 6213 // carry = (jlong)(product >>> 64); 6214 // } 6215 // idx += 2; 6216 // if (idx > 0) { 6217 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry; 6218 // z[kdx+idx] = (jlong)product; 6219 // carry = (jlong)(product >>> 64); 6220 // } 6221 6222 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 6223 6224 // scale the index 6225 lgr_if_needed(jdx, idx); 6226 and_imm(jdx, 0xfffffffffffffffcL); 6227 rshift(jdx, 2); 6228 6229 6230 bind(L_third_loop); 6231 6232 z_aghi(jdx, -1); 6233 z_brl(L_third_loop_exit); 6234 add2reg(idx, -4); 6235 6236 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8); 6237 lgr_if_needed(carry2, product); 6238 6239 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0); 6240 lgr_if_needed(carry, product); 6241 z_bru(L_third_loop); 6242 6243 6244 bind(L_third_loop_exit); // Handle any left-over operand parts. 6245 6246 and_imm(idx, 0x3); 6247 z_brz(L_post_third_loop_done); 6248 6249 Label L_check_1; 6250 6251 z_aghi(idx, -2); 6252 z_brl(L_check_1); 6253 6254 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0); 6255 lgr_if_needed(carry, product); 6256 6257 6258 bind(L_check_1); 6259 6260 add2reg(idx, 0x2); 6261 and_imm(idx, 0x1); 6262 z_aghi(idx, -1); 6263 z_brl(L_post_third_loop_done); 6264 6265 Register multiplicand = product->successor(); 6266 Register product_low = multiplicand; 6267 6268 z_sllg(Z_R7, idx, LogBytesPerInt); 6269 clear_reg(yz_idx); 6270 mem2reg_opt(yz_idx, Address(y, Z_R7, 0), false); 6271 lgr_if_needed(multiplicand, x_xstart); 6272 z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand 6273 clear_reg(yz_idx); 6274 mem2reg_opt(yz_idx, Address(z, Z_R7, 0), false); 6275 6276 add2_with_carry(product, product_low, yz_idx, carry); 6277 6278 z_sllg(Z_R7, idx, LogBytesPerInt); 6279 reg2mem_opt(product_low, Address(z, Z_R7, 0), false); 6280 rshift(product_low, 32); 6281 6282 lshift(product, 32); 6283 z_ogr(product_low, product); 6284 lgr_if_needed(carry, product_low); 6285 6286 bind(L_post_third_loop_done); 6287 } 6288 6289 void MacroAssembler::multiply_to_len(Register x, Register xlen, 6290 Register y, Register ylen, 6291 Register z, 6292 Register tmp1, Register tmp2, 6293 Register tmp3, Register tmp4, 6294 Register tmp5) { 6295 ShortBranchVerifier sbv(this); 6296 6297 assert_different_registers(x, xlen, y, ylen, z, 6298 tmp1, tmp2, tmp3, tmp4, tmp5, Z_R1_scratch, Z_R7); 6299 assert_different_registers(x, xlen, y, ylen, z, 6300 tmp1, tmp2, tmp3, tmp4, tmp5, Z_R8); 6301 6302 z_stmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP); 6303 6304 // In openJdk, we store the argument as 32-bit value to slot. 6305 Address zlen(Z_SP, _z_abi(remaining_cargs)); // Int in long on big endian. 6306 6307 const Register idx = tmp1; 6308 const Register kdx = tmp2; 6309 const Register xstart = tmp3; 6310 6311 const Register y_idx = tmp4; 6312 const Register carry = tmp5; 6313 const Register product = Z_R0_scratch; 6314 const Register x_xstart = Z_R8; 6315 6316 // First Loop. 6317 // 6318 // final static long LONG_MASK = 0xffffffffL; 6319 // int xstart = xlen - 1; 6320 // int ystart = ylen - 1; 6321 // long carry = 0; 6322 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 6323 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 6324 // z[kdx] = (int)product; 6325 // carry = product >>> 32; 6326 // } 6327 // z[xstart] = (int)carry; 6328 // 6329 6330 lgr_if_needed(idx, ylen); // idx = ylen 6331 z_llgf(kdx, zlen); // C2 does not respect int to long conversion for stub calls, thus load zero-extended. 6332 clear_reg(carry); // carry = 0 6333 6334 Label L_done; 6335 6336 lgr_if_needed(xstart, xlen); 6337 z_aghi(xstart, -1); 6338 z_brl(L_done); 6339 6340 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 6341 6342 NearLabel L_second_loop; 6343 compare64_and_branch(kdx, RegisterOrConstant((intptr_t) 0), bcondEqual, L_second_loop); 6344 6345 NearLabel L_carry; 6346 z_aghi(kdx, -1); 6347 z_brz(L_carry); 6348 6349 // Store lower 32 bits of carry. 6350 z_sllg(Z_R1_scratch, kdx, LogBytesPerInt); 6351 reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); 6352 rshift(carry, 32); 6353 z_aghi(kdx, -1); 6354 6355 6356 bind(L_carry); 6357 6358 // Store upper 32 bits of carry. 6359 z_sllg(Z_R1_scratch, kdx, LogBytesPerInt); 6360 reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); 6361 6362 // Second and third (nested) loops. 6363 // 6364 // for (int i = xstart-1; i >= 0; i--) { // Second loop 6365 // carry = 0; 6366 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 6367 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 6368 // (z[k] & LONG_MASK) + carry; 6369 // z[k] = (int)product; 6370 // carry = product >>> 32; 6371 // } 6372 // z[i] = (int)carry; 6373 // } 6374 // 6375 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx 6376 6377 const Register jdx = tmp1; 6378 6379 bind(L_second_loop); 6380 6381 clear_reg(carry); // carry = 0; 6382 lgr_if_needed(jdx, ylen); // j = ystart+1 6383 6384 z_aghi(xstart, -1); // i = xstart-1; 6385 z_brl(L_done); 6386 6387 // Use free slots in the current stackframe instead of push/pop. 6388 Address zsave(Z_SP, _z_abi(carg_1)); 6389 reg2mem_opt(z, zsave); 6390 6391 6392 Label L_last_x; 6393 6394 z_sllg(Z_R1_scratch, xstart, LogBytesPerInt); 6395 load_address(z, Address(z, Z_R1_scratch, 4)); // z = z + k - j 6396 z_aghi(xstart, -1); // i = xstart-1; 6397 z_brl(L_last_x); 6398 6399 z_sllg(Z_R1_scratch, xstart, LogBytesPerInt); 6400 mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0)); 6401 6402 6403 Label L_third_loop_prologue; 6404 6405 bind(L_third_loop_prologue); 6406 6407 Address xsave(Z_SP, _z_abi(carg_2)); 6408 Address xlensave(Z_SP, _z_abi(carg_3)); 6409 Address ylensave(Z_SP, _z_abi(carg_4)); 6410 6411 reg2mem_opt(x, xsave); 6412 reg2mem_opt(xstart, xlensave); 6413 reg2mem_opt(ylen, ylensave); 6414 6415 6416 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x); 6417 6418 mem2reg_opt(z, zsave); 6419 mem2reg_opt(x, xsave); 6420 mem2reg_opt(xlen, xlensave); // This is the decrement of the loop counter! 6421 mem2reg_opt(ylen, ylensave); 6422 6423 add2reg(tmp3, 1, xlen); 6424 z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt); 6425 reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); 6426 z_aghi(tmp3, -1); 6427 z_brl(L_done); 6428 6429 rshift(carry, 32); 6430 z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt); 6431 reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); 6432 z_bru(L_second_loop); 6433 6434 // Next infrequent code is moved outside loops. 6435 bind(L_last_x); 6436 6437 clear_reg(x_xstart); 6438 mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false); 6439 z_bru(L_third_loop_prologue); 6440 6441 bind(L_done); 6442 6443 z_lmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP); 6444 } 6445 6446 #ifndef PRODUCT 6447 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false). 6448 void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) { 6449 Label ok; 6450 if (check_equal) { 6451 z_bre(ok); 6452 } else { 6453 z_brne(ok); 6454 } 6455 stop(msg, id); 6456 bind(ok); 6457 } 6458 6459 // Assert if CC indicates "low". 6460 void MacroAssembler::asm_assert_low(const char *msg, int id) { 6461 Label ok; 6462 z_brnl(ok); 6463 stop(msg, id); 6464 bind(ok); 6465 } 6466 6467 // Assert if CC indicates "high". 6468 void MacroAssembler::asm_assert_high(const char *msg, int id) { 6469 Label ok; 6470 z_brnh(ok); 6471 stop(msg, id); 6472 bind(ok); 6473 } 6474 6475 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false) 6476 // generate non-relocatable code. 6477 void MacroAssembler::asm_assert_static(bool check_equal, const char *msg, int id) { 6478 Label ok; 6479 if (check_equal) { z_bre(ok); } 6480 else { z_brne(ok); } 6481 stop_static(msg, id); 6482 bind(ok); 6483 } 6484 6485 void MacroAssembler::asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset, 6486 Register mem_base, const char* msg, int id) { 6487 switch (size) { 6488 case 4: 6489 load_and_test_int(Z_R0, Address(mem_base, mem_offset)); 6490 break; 6491 case 8: 6492 load_and_test_long(Z_R0, Address(mem_base, mem_offset)); 6493 break; 6494 default: 6495 ShouldNotReachHere(); 6496 } 6497 if (allow_relocation) { asm_assert(check_equal, msg, id); } 6498 else { asm_assert_static(check_equal, msg, id); } 6499 } 6500 6501 // Check the condition 6502 // expected_size == FP - SP 6503 // after transformation: 6504 // expected_size - FP + SP == 0 6505 // Destroys Register expected_size if no tmp register is passed. 6506 void MacroAssembler::asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) { 6507 if (tmp == noreg) { 6508 tmp = expected_size; 6509 } else { 6510 if (tmp != expected_size) { 6511 z_lgr(tmp, expected_size); 6512 } 6513 z_algr(tmp, Z_SP); 6514 z_slg(tmp, 0, Z_R0, Z_SP); 6515 asm_assert_eq(msg, id); 6516 } 6517 } 6518 #endif // !PRODUCT 6519 6520 void MacroAssembler::verify_thread() { 6521 if (VerifyThread) { 6522 unimplemented("", 117); 6523 } 6524 } 6525 6526 // Plausibility check for oops. 6527 void MacroAssembler::verify_oop(Register oop, const char* msg) { 6528 if (!VerifyOops) return; 6529 6530 BLOCK_COMMENT("verify_oop {"); 6531 Register tmp = Z_R0; 6532 unsigned int nbytes_save = 6 *8; 6533 address entry = StubRoutines::verify_oop_subroutine_entry_address(); 6534 save_return_pc(); 6535 push_frame_abi160(nbytes_save); 6536 z_stmg(Z_R0, Z_R5, 160, Z_SP); 6537 6538 z_lgr(Z_ARG2, oop); 6539 load_const(Z_ARG1, (address) msg); 6540 load_const(Z_R1, entry); 6541 z_lg(Z_R1, 0, Z_R1); 6542 call_c(Z_R1); 6543 6544 z_lmg(Z_R0, Z_R5, 160, Z_SP); 6545 pop_frame(); 6546 6547 restore_return_pc(); 6548 BLOCK_COMMENT("} verify_oop "); 6549 } 6550 6551 const char* MacroAssembler::stop_types[] = { 6552 "stop", 6553 "untested", 6554 "unimplemented", 6555 "shouldnotreachhere" 6556 }; 6557 6558 static void stop_on_request(const char* tp, const char* msg) { 6559 tty->print("Z assembly code requires stop: (%s) %s\n", tp, msg); 6560 guarantee(false, "Z assembly code requires stop: %s", msg); 6561 } 6562 6563 void MacroAssembler::stop(int type, const char* msg, int id) { 6564 BLOCK_COMMENT(err_msg("stop: %s {", msg)); 6565 6566 // Setup arguments. 6567 load_const(Z_ARG1, (void*) stop_types[type%stop_end]); 6568 load_const(Z_ARG2, (void*) msg); 6569 get_PC(Z_R14); // Following code pushes a frame without entering a new function. Use current pc as return address. 6570 save_return_pc(); // Saves return pc Z_R14. 6571 push_frame_abi160(0); 6572 call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2); 6573 // The plain disassembler does not recognize illtrap. It instead displays 6574 // a 32-bit value. Issueing two illtraps assures the disassembler finds 6575 // the proper beginning of the next instruction. 6576 z_illtrap(); // Illegal instruction. 6577 z_illtrap(); // Illegal instruction. 6578 6579 BLOCK_COMMENT(" } stop"); 6580 } 6581 6582 // Special version of stop() for code size reduction. 6583 // Reuses the previously generated call sequence, if any. 6584 // Generates the call sequence on its own, if necessary. 6585 // Note: This code will work only in non-relocatable code! 6586 // The relative address of the data elements (arg1, arg2) must not change. 6587 // The reentry point must not move relative to it's users. This prerequisite 6588 // should be given for "hand-written" code, if all chain calls are in the same code blob. 6589 // Generated code must not undergo any transformation, e.g. ShortenBranches, to be safe. 6590 address MacroAssembler::stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation) { 6591 BLOCK_COMMENT(err_msg("stop_chain(%s,%s): %s {", reentry==NULL?"init":"cont", allow_relocation?"reloc ":"static", msg)); 6592 6593 // Setup arguments. 6594 if (allow_relocation) { 6595 // Relocatable version (for comparison purposes). Remove after some time. 6596 load_const(Z_ARG1, (void*) stop_types[type%stop_end]); 6597 load_const(Z_ARG2, (void*) msg); 6598 } else { 6599 load_absolute_address(Z_ARG1, (address)stop_types[type%stop_end]); 6600 load_absolute_address(Z_ARG2, (address)msg); 6601 } 6602 if ((reentry != NULL) && RelAddr::is_in_range_of_RelAddr16(reentry, pc())) { 6603 BLOCK_COMMENT("branch to reentry point:"); 6604 z_brc(bcondAlways, reentry); 6605 } else { 6606 BLOCK_COMMENT("reentry point:"); 6607 reentry = pc(); // Re-entry point for subsequent stop calls. 6608 save_return_pc(); // Saves return pc Z_R14. 6609 push_frame_abi160(0); 6610 if (allow_relocation) { 6611 reentry = NULL; // Prevent reentry if code relocation is allowed. 6612 call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2); 6613 } else { 6614 call_VM_leaf_static(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2); 6615 } 6616 z_illtrap(); // Illegal instruction as emergency stop, should the above call return. 6617 } 6618 BLOCK_COMMENT(" } stop_chain"); 6619 6620 return reentry; 6621 } 6622 6623 // Special version of stop() for code size reduction. 6624 // Assumes constant relative addresses for data and runtime call. 6625 void MacroAssembler::stop_static(int type, const char* msg, int id) { 6626 stop_chain(NULL, type, msg, id, false); 6627 } 6628 6629 void MacroAssembler::stop_subroutine() { 6630 unimplemented("stop_subroutine", 710); 6631 } 6632 6633 // Prints msg to stdout from within generated code.. 6634 void MacroAssembler::warn(const char* msg) { 6635 RegisterSaver::save_live_registers(this, RegisterSaver::all_registers, Z_R14); 6636 load_absolute_address(Z_R1, (address) warning); 6637 load_absolute_address(Z_ARG1, (address) msg); 6638 (void) call(Z_R1); 6639 RegisterSaver::restore_live_registers(this, RegisterSaver::all_registers); 6640 } 6641 6642 #ifndef PRODUCT 6643 6644 // Write pattern 0x0101010101010101 in region [low-before, high+after]. 6645 void MacroAssembler::zap_from_to(Register low, Register high, Register val, Register addr, int before, int after) { 6646 if (!ZapEmptyStackFields) return; 6647 BLOCK_COMMENT("zap memory region {"); 6648 load_const_optimized(val, 0x0101010101010101); 6649 int size = before + after; 6650 if (low == high && size < 5 && size > 0) { 6651 int offset = -before*BytesPerWord; 6652 for (int i = 0; i < size; ++i) { 6653 z_stg(val, Address(low, offset)); 6654 offset +=(1*BytesPerWord); 6655 } 6656 } else { 6657 add2reg(addr, -before*BytesPerWord, low); 6658 if (after) { 6659 #ifdef ASSERT 6660 jlong check = after * BytesPerWord; 6661 assert(Immediate::is_simm32(check) && Immediate::is_simm32(-check), "value not encodable !"); 6662 #endif 6663 add2reg(high, after * BytesPerWord); 6664 } 6665 NearLabel loop; 6666 bind(loop); 6667 z_stg(val, Address(addr)); 6668 add2reg(addr, 8); 6669 compare64_and_branch(addr, high, bcondNotHigh, loop); 6670 if (after) { 6671 add2reg(high, -after * BytesPerWord); 6672 } 6673 } 6674 BLOCK_COMMENT("} zap memory region"); 6675 } 6676 #endif // !PRODUCT 6677 6678 SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value, Register _rscratch) { 6679 _masm = masm; 6680 _masm->load_absolute_address(_rscratch, (address)flag_addr); 6681 _masm->load_and_test_int(_rscratch, Address(_rscratch)); 6682 if (value) { 6683 _masm->z_brne(_label); // Skip if true, i.e. != 0. 6684 } else { 6685 _masm->z_bre(_label); // Skip if false, i.e. == 0. 6686 } 6687 } 6688 6689 SkipIfEqual::~SkipIfEqual() { 6690 _masm->bind(_label); 6691 }