1 /* 2 * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/codeBuffer.hpp" 28 #include "asm/macroAssembler.inline.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "gc/shared/collectedHeap.inline.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "gc/shared/cardTableModRefBS.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/klass.inline.hpp" 36 #include "opto/compile.hpp" 37 #include "opto/intrinsicnode.hpp" 38 #include "opto/matcher.hpp" 39 #include "prims/methodHandles.hpp" 40 #include "registerSaver_s390.hpp" 41 #include "runtime/biasedLocking.hpp" 42 #include "runtime/icache.hpp" 43 #include "runtime/interfaceSupport.hpp" 44 #include "runtime/objectMonitor.hpp" 45 #include "runtime/os.hpp" 46 #include "runtime/sharedRuntime.hpp" 47 #include "runtime/stubRoutines.hpp" 48 #include "utilities/events.hpp" 49 #include "utilities/macros.hpp" 50 #if INCLUDE_ALL_GCS 51 #include "gc/g1/g1CollectedHeap.inline.hpp" 52 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 53 #include "gc/g1/heapRegion.hpp" 54 #endif 55 56 #include <ucontext.h> 57 58 #define BLOCK_COMMENT(str) block_comment(str) 59 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 60 61 // Move 32-bit register if destination and source are different. 62 void MacroAssembler::lr_if_needed(Register rd, Register rs) { 63 if (rs != rd) { z_lr(rd, rs); } 64 } 65 66 // Move register if destination and source are different. 67 void MacroAssembler::lgr_if_needed(Register rd, Register rs) { 68 if (rs != rd) { z_lgr(rd, rs); } 69 } 70 71 // Zero-extend 32-bit register into 64-bit register if destination and source are different. 72 void MacroAssembler::llgfr_if_needed(Register rd, Register rs) { 73 if (rs != rd) { z_llgfr(rd, rs); } 74 } 75 76 // Move float register if destination and source are different. 77 void MacroAssembler::ldr_if_needed(FloatRegister rd, FloatRegister rs) { 78 if (rs != rd) { z_ldr(rd, rs); } 79 } 80 81 // Move integer register if destination and source are different. 82 // It is assumed that shorter-than-int types are already 83 // appropriately sign-extended. 84 void MacroAssembler::move_reg_if_needed(Register dst, BasicType dst_type, Register src, 85 BasicType src_type) { 86 assert((dst_type != T_FLOAT) && (dst_type != T_DOUBLE), "use move_freg for float types"); 87 assert((src_type != T_FLOAT) && (src_type != T_DOUBLE), "use move_freg for float types"); 88 89 if (dst_type == src_type) { 90 lgr_if_needed(dst, src); // Just move all 64 bits. 91 return; 92 } 93 94 switch (dst_type) { 95 // Do not support these types for now. 96 // case T_BOOLEAN: 97 case T_BYTE: // signed byte 98 switch (src_type) { 99 case T_INT: 100 z_lgbr(dst, src); 101 break; 102 default: 103 ShouldNotReachHere(); 104 } 105 return; 106 107 case T_CHAR: 108 case T_SHORT: 109 switch (src_type) { 110 case T_INT: 111 if (dst_type == T_CHAR) { 112 z_llghr(dst, src); 113 } else { 114 z_lghr(dst, src); 115 } 116 break; 117 default: 118 ShouldNotReachHere(); 119 } 120 return; 121 122 case T_INT: 123 switch (src_type) { 124 case T_BOOLEAN: 125 case T_BYTE: 126 case T_CHAR: 127 case T_SHORT: 128 case T_INT: 129 case T_LONG: 130 case T_OBJECT: 131 case T_ARRAY: 132 case T_VOID: 133 case T_ADDRESS: 134 lr_if_needed(dst, src); 135 // llgfr_if_needed(dst, src); // zero-extend (in case we need to find a bug). 136 return; 137 138 default: 139 assert(false, "non-integer src type"); 140 return; 141 } 142 case T_LONG: 143 switch (src_type) { 144 case T_BOOLEAN: 145 case T_BYTE: 146 case T_CHAR: 147 case T_SHORT: 148 case T_INT: 149 z_lgfr(dst, src); // sign extension 150 return; 151 152 case T_LONG: 153 case T_OBJECT: 154 case T_ARRAY: 155 case T_VOID: 156 case T_ADDRESS: 157 lgr_if_needed(dst, src); 158 return; 159 160 default: 161 assert(false, "non-integer src type"); 162 return; 163 } 164 return; 165 case T_OBJECT: 166 case T_ARRAY: 167 case T_VOID: 168 case T_ADDRESS: 169 switch (src_type) { 170 // These types don't make sense to be converted to pointers: 171 // case T_BOOLEAN: 172 // case T_BYTE: 173 // case T_CHAR: 174 // case T_SHORT: 175 176 case T_INT: 177 z_llgfr(dst, src); // zero extension 178 return; 179 180 case T_LONG: 181 case T_OBJECT: 182 case T_ARRAY: 183 case T_VOID: 184 case T_ADDRESS: 185 lgr_if_needed(dst, src); 186 return; 187 188 default: 189 assert(false, "non-integer src type"); 190 return; 191 } 192 return; 193 default: 194 assert(false, "non-integer dst type"); 195 return; 196 } 197 } 198 199 // Move float register if destination and source are different. 200 void MacroAssembler::move_freg_if_needed(FloatRegister dst, BasicType dst_type, 201 FloatRegister src, BasicType src_type) { 202 assert((dst_type == T_FLOAT) || (dst_type == T_DOUBLE), "use move_reg for int types"); 203 assert((src_type == T_FLOAT) || (src_type == T_DOUBLE), "use move_reg for int types"); 204 if (dst_type == src_type) { 205 ldr_if_needed(dst, src); // Just move all 64 bits. 206 } else { 207 switch (dst_type) { 208 case T_FLOAT: 209 assert(src_type == T_DOUBLE, "invalid float type combination"); 210 z_ledbr(dst, src); 211 return; 212 case T_DOUBLE: 213 assert(src_type == T_FLOAT, "invalid float type combination"); 214 z_ldebr(dst, src); 215 return; 216 default: 217 assert(false, "non-float dst type"); 218 return; 219 } 220 } 221 } 222 223 // Optimized emitter for reg to mem operations. 224 // Uses modern instructions if running on modern hardware, classic instructions 225 // otherwise. Prefers (usually shorter) classic instructions if applicable. 226 // Data register (reg) cannot be used as work register. 227 // 228 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default). 229 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs! 230 void MacroAssembler::freg2mem_opt(FloatRegister reg, 231 int64_t disp, 232 Register index, 233 Register base, 234 void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register), 235 void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register), 236 Register scratch) { 237 index = (index == noreg) ? Z_R0 : index; 238 if (Displacement::is_shortDisp(disp)) { 239 (this->*classic)(reg, disp, index, base); 240 } else { 241 if (Displacement::is_validDisp(disp)) { 242 (this->*modern)(reg, disp, index, base); 243 } else { 244 if (scratch != Z_R0 && scratch != Z_R1) { 245 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 246 } else { 247 if (scratch != Z_R0) { // scratch == Z_R1 248 if ((scratch == index) || (index == base)) { 249 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 250 } else { 251 add2reg(scratch, disp, base); 252 (this->*classic)(reg, 0, index, scratch); 253 if (base == scratch) { 254 add2reg(base, -disp); // Restore base. 255 } 256 } 257 } else { // scratch == Z_R0 258 z_lgr(scratch, base); 259 add2reg(base, disp); 260 (this->*classic)(reg, 0, index, base); 261 z_lgr(base, scratch); // Restore base. 262 } 263 } 264 } 265 } 266 } 267 268 void MacroAssembler::freg2mem_opt(FloatRegister reg, const Address &a, bool is_double) { 269 if (is_double) { 270 freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stdy), CLASSIC_FFUN(z_std)); 271 } else { 272 freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stey), CLASSIC_FFUN(z_ste)); 273 } 274 } 275 276 // Optimized emitter for mem to reg operations. 277 // Uses modern instructions if running on modern hardware, classic instructions 278 // otherwise. Prefers (usually shorter) classic instructions if applicable. 279 // data register (reg) cannot be used as work register. 280 // 281 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default). 282 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs! 283 void MacroAssembler::mem2freg_opt(FloatRegister reg, 284 int64_t disp, 285 Register index, 286 Register base, 287 void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register), 288 void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register), 289 Register scratch) { 290 index = (index == noreg) ? Z_R0 : index; 291 if (Displacement::is_shortDisp(disp)) { 292 (this->*classic)(reg, disp, index, base); 293 } else { 294 if (Displacement::is_validDisp(disp)) { 295 (this->*modern)(reg, disp, index, base); 296 } else { 297 if (scratch != Z_R0 && scratch != Z_R1) { 298 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 299 } else { 300 if (scratch != Z_R0) { // scratch == Z_R1 301 if ((scratch == index) || (index == base)) { 302 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 303 } else { 304 add2reg(scratch, disp, base); 305 (this->*classic)(reg, 0, index, scratch); 306 if (base == scratch) { 307 add2reg(base, -disp); // Restore base. 308 } 309 } 310 } else { // scratch == Z_R0 311 z_lgr(scratch, base); 312 add2reg(base, disp); 313 (this->*classic)(reg, 0, index, base); 314 z_lgr(base, scratch); // Restore base. 315 } 316 } 317 } 318 } 319 } 320 321 void MacroAssembler::mem2freg_opt(FloatRegister reg, const Address &a, bool is_double) { 322 if (is_double) { 323 mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ldy), CLASSIC_FFUN(z_ld)); 324 } else { 325 mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ley), CLASSIC_FFUN(z_le)); 326 } 327 } 328 329 // Optimized emitter for reg to mem operations. 330 // Uses modern instructions if running on modern hardware, classic instructions 331 // otherwise. Prefers (usually shorter) classic instructions if applicable. 332 // Data register (reg) cannot be used as work register. 333 // 334 // Don't rely on register locking, instead pass a scratch register 335 // (Z_R0 by default) 336 // CAUTION! passing registers >= Z_R2 may produce bad results on old CPUs! 337 void MacroAssembler::reg2mem_opt(Register reg, 338 int64_t disp, 339 Register index, 340 Register base, 341 void (MacroAssembler::*modern) (Register, int64_t, Register, Register), 342 void (MacroAssembler::*classic)(Register, int64_t, Register, Register), 343 Register scratch) { 344 index = (index == noreg) ? Z_R0 : index; 345 if (Displacement::is_shortDisp(disp)) { 346 (this->*classic)(reg, disp, index, base); 347 } else { 348 if (Displacement::is_validDisp(disp)) { 349 (this->*modern)(reg, disp, index, base); 350 } else { 351 if (scratch != Z_R0 && scratch != Z_R1) { 352 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 353 } else { 354 if (scratch != Z_R0) { // scratch == Z_R1 355 if ((scratch == index) || (index == base)) { 356 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 357 } else { 358 add2reg(scratch, disp, base); 359 (this->*classic)(reg, 0, index, scratch); 360 if (base == scratch) { 361 add2reg(base, -disp); // Restore base. 362 } 363 } 364 } else { // scratch == Z_R0 365 if ((scratch == reg) || (scratch == base) || (reg == base)) { 366 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 367 } else { 368 z_lgr(scratch, base); 369 add2reg(base, disp); 370 (this->*classic)(reg, 0, index, base); 371 z_lgr(base, scratch); // Restore base. 372 } 373 } 374 } 375 } 376 } 377 } 378 379 int MacroAssembler::reg2mem_opt(Register reg, const Address &a, bool is_double) { 380 int store_offset = offset(); 381 if (is_double) { 382 reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_stg), CLASSIC_IFUN(z_stg)); 383 } else { 384 reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_sty), CLASSIC_IFUN(z_st)); 385 } 386 return store_offset; 387 } 388 389 // Optimized emitter for mem to reg operations. 390 // Uses modern instructions if running on modern hardware, classic instructions 391 // otherwise. Prefers (usually shorter) classic instructions if applicable. 392 // Data register (reg) will be used as work register where possible. 393 void MacroAssembler::mem2reg_opt(Register reg, 394 int64_t disp, 395 Register index, 396 Register base, 397 void (MacroAssembler::*modern) (Register, int64_t, Register, Register), 398 void (MacroAssembler::*classic)(Register, int64_t, Register, Register)) { 399 index = (index == noreg) ? Z_R0 : index; 400 if (Displacement::is_shortDisp(disp)) { 401 (this->*classic)(reg, disp, index, base); 402 } else { 403 if (Displacement::is_validDisp(disp)) { 404 (this->*modern)(reg, disp, index, base); 405 } else { 406 if ((reg == index) && (reg == base)) { 407 z_sllg(reg, reg, 1); 408 add2reg(reg, disp); 409 (this->*classic)(reg, 0, noreg, reg); 410 } else if ((reg == index) && (reg != Z_R0)) { 411 add2reg(reg, disp); 412 (this->*classic)(reg, 0, reg, base); 413 } else if (reg == base) { 414 add2reg(reg, disp); 415 (this->*classic)(reg, 0, index, reg); 416 } else if (reg != Z_R0) { 417 add2reg(reg, disp, base); 418 (this->*classic)(reg, 0, index, reg); 419 } else { // reg == Z_R0 && reg != base here 420 add2reg(base, disp); 421 (this->*classic)(reg, 0, index, base); 422 add2reg(base, -disp); 423 } 424 } 425 } 426 } 427 428 void MacroAssembler::mem2reg_opt(Register reg, const Address &a, bool is_double) { 429 if (is_double) { 430 z_lg(reg, a); 431 } else { 432 mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_ly), CLASSIC_IFUN(z_l)); 433 } 434 } 435 436 void MacroAssembler::mem2reg_signed_opt(Register reg, const Address &a) { 437 mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_lgf), CLASSIC_IFUN(z_lgf)); 438 } 439 440 void MacroAssembler::and_imm(Register r, long mask, 441 Register tmp /* = Z_R0 */, 442 bool wide /* = false */) { 443 assert(wide || Immediate::is_simm32(mask), "mask value too large"); 444 445 if (!wide) { 446 z_nilf(r, mask); 447 return; 448 } 449 450 assert(r != tmp, " need a different temporary register !"); 451 load_const_optimized(tmp, mask); 452 z_ngr(r, tmp); 453 } 454 455 // Calculate the 1's complement. 456 // Note: The condition code is neither preserved nor correctly set by this code!!! 457 // Note: (wide == false) does not protect the high order half of the target register 458 // from alteration. It only serves as optimization hint for 32-bit results. 459 void MacroAssembler::not_(Register r1, Register r2, bool wide) { 460 461 if ((r2 == noreg) || (r2 == r1)) { // Calc 1's complement in place. 462 z_xilf(r1, -1); 463 if (wide) { 464 z_xihf(r1, -1); 465 } 466 } else { // Distinct src and dst registers. 467 if (VM_Version::has_DistinctOpnds()) { 468 load_const_optimized(r1, -1); 469 z_xgrk(r1, r2, r1); 470 } else { 471 if (wide) { 472 z_lgr(r1, r2); 473 z_xilf(r1, -1); 474 z_xihf(r1, -1); 475 } else { 476 z_lr(r1, r2); 477 z_xilf(r1, -1); 478 } 479 } 480 } 481 } 482 483 unsigned long MacroAssembler::create_mask(int lBitPos, int rBitPos) { 484 assert(lBitPos >= 0, "zero is leftmost bit position"); 485 assert(rBitPos <= 63, "63 is rightmost bit position"); 486 assert(lBitPos <= rBitPos, "inverted selection interval"); 487 return (lBitPos == 0 ? (unsigned long)(-1L) : ((1UL<<(63-lBitPos+1))-1)) & (~((1UL<<(63-rBitPos))-1)); 488 } 489 490 // Helper function for the "Rotate_then_<logicalOP>" emitters. 491 // Rotate src, then mask register contents such that only bits in range survive. 492 // For oneBits == false, all bits not in range are set to 0. Useful for deleting all bits outside range. 493 // For oneBits == true, all bits not in range are set to 1. Useful for preserving all bits outside range. 494 // The caller must ensure that the selected range only contains bits with defined value. 495 void MacroAssembler::rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos, 496 int nRotate, bool src32bit, bool dst32bit, bool oneBits) { 497 assert(!(dst32bit && lBitPos < 32), "selection interval out of range for int destination"); 498 bool sll4rll = (nRotate >= 0) && (nRotate <= (63-rBitPos)); // Substitute SLL(G) for RLL(G). 499 bool srl4rll = (nRotate < 0) && (-nRotate <= lBitPos); // Substitute SRL(G) for RLL(G). 500 // Pre-determine which parts of dst will be zero after shift/rotate. 501 bool llZero = sll4rll && (nRotate >= 16); 502 bool lhZero = (sll4rll && (nRotate >= 32)) || (srl4rll && (nRotate <= -48)); 503 bool lfZero = llZero && lhZero; 504 bool hlZero = (sll4rll && (nRotate >= 48)) || (srl4rll && (nRotate <= -32)); 505 bool hhZero = (srl4rll && (nRotate <= -16)); 506 bool hfZero = hlZero && hhZero; 507 508 // rotate then mask src operand. 509 // if oneBits == true, all bits outside selected range are 1s. 510 // if oneBits == false, all bits outside selected range are 0s. 511 if (src32bit) { // There might be garbage in the upper 32 bits which will get masked away. 512 if (dst32bit) { 513 z_rll(dst, src, nRotate); // Copy and rotate, upper half of reg remains undisturbed. 514 } else { 515 if (sll4rll) { z_sllg(dst, src, nRotate); } 516 else if (srl4rll) { z_srlg(dst, src, -nRotate); } 517 else { z_rllg(dst, src, nRotate); } 518 } 519 } else { 520 if (sll4rll) { z_sllg(dst, src, nRotate); } 521 else if (srl4rll) { z_srlg(dst, src, -nRotate); } 522 else { z_rllg(dst, src, nRotate); } 523 } 524 525 unsigned long range_mask = create_mask(lBitPos, rBitPos); 526 unsigned int range_mask_h = (unsigned int)(range_mask >> 32); 527 unsigned int range_mask_l = (unsigned int)range_mask; 528 unsigned short range_mask_hh = (unsigned short)(range_mask >> 48); 529 unsigned short range_mask_hl = (unsigned short)(range_mask >> 32); 530 unsigned short range_mask_lh = (unsigned short)(range_mask >> 16); 531 unsigned short range_mask_ll = (unsigned short)range_mask; 532 // Works for z9 and newer H/W. 533 if (oneBits) { 534 if ((~range_mask_l) != 0) { z_oilf(dst, ~range_mask_l); } // All bits outside range become 1s. 535 if (((~range_mask_h) != 0) && !dst32bit) { z_oihf(dst, ~range_mask_h); } 536 } else { 537 // All bits outside range become 0s 538 if (((~range_mask_l) != 0) && !lfZero) { 539 z_nilf(dst, range_mask_l); 540 } 541 if (((~range_mask_h) != 0) && !dst32bit && !hfZero) { 542 z_nihf(dst, range_mask_h); 543 } 544 } 545 } 546 547 // Rotate src, then insert selected range from rotated src into dst. 548 // Clear dst before, if requested. 549 void MacroAssembler::rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos, 550 int nRotate, bool clear_dst) { 551 // This version does not depend on src being zero-extended int2long. 552 nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. 553 z_risbg(dst, src, lBitPos, rBitPos, nRotate, clear_dst); // Rotate, then insert selected, clear the rest. 554 } 555 556 // Rotate src, then and selected range from rotated src into dst. 557 // Set condition code only if so requested. Otherwise it is unpredictable. 558 // See performance note in macroAssembler_s390.hpp for important information. 559 void MacroAssembler::rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos, 560 int nRotate, bool test_only) { 561 guarantee(!test_only, "Emitter not fit for test_only instruction variant."); 562 // This version does not depend on src being zero-extended int2long. 563 nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. 564 z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected. 565 } 566 567 // Rotate src, then or selected range from rotated src into dst. 568 // Set condition code only if so requested. Otherwise it is unpredictable. 569 // See performance note in macroAssembler_s390.hpp for important information. 570 void MacroAssembler::rotate_then_or(Register dst, Register src, int lBitPos, int rBitPos, 571 int nRotate, bool test_only) { 572 guarantee(!test_only, "Emitter not fit for test_only instruction variant."); 573 // This version does not depend on src being zero-extended int2long. 574 nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. 575 z_rosbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected. 576 } 577 578 // Rotate src, then xor selected range from rotated src into dst. 579 // Set condition code only if so requested. Otherwise it is unpredictable. 580 // See performance note in macroAssembler_s390.hpp for important information. 581 void MacroAssembler::rotate_then_xor(Register dst, Register src, int lBitPos, int rBitPos, 582 int nRotate, bool test_only) { 583 guarantee(!test_only, "Emitter not fit for test_only instruction variant."); 584 // This version does not depend on src being zero-extended int2long. 585 nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. 586 z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected. 587 } 588 589 void MacroAssembler::add64(Register r1, RegisterOrConstant inc) { 590 if (inc.is_register()) { 591 z_agr(r1, inc.as_register()); 592 } else { // constant 593 intptr_t imm = inc.as_constant(); 594 add2reg(r1, imm); 595 } 596 } 597 // Helper function to multiply the 64bit contents of a register by a 16bit constant. 598 // The optimization tries to avoid the mghi instruction, since it uses the FPU for 599 // calculation and is thus rather slow. 600 // 601 // There is no handling for special cases, e.g. cval==0 or cval==1. 602 // 603 // Returns len of generated code block. 604 unsigned int MacroAssembler::mul_reg64_const16(Register rval, Register work, int cval) { 605 int block_start = offset(); 606 607 bool sign_flip = cval < 0; 608 cval = sign_flip ? -cval : cval; 609 610 BLOCK_COMMENT("Reg64*Con16 {"); 611 612 int bit1 = cval & -cval; 613 if (bit1 == cval) { 614 z_sllg(rval, rval, exact_log2(bit1)); 615 if (sign_flip) { z_lcgr(rval, rval); } 616 } else { 617 int bit2 = (cval-bit1) & -(cval-bit1); 618 if ((bit1+bit2) == cval) { 619 z_sllg(work, rval, exact_log2(bit1)); 620 z_sllg(rval, rval, exact_log2(bit2)); 621 z_agr(rval, work); 622 if (sign_flip) { z_lcgr(rval, rval); } 623 } else { 624 if (sign_flip) { z_mghi(rval, -cval); } 625 else { z_mghi(rval, cval); } 626 } 627 } 628 BLOCK_COMMENT("} Reg64*Con16"); 629 630 int block_end = offset(); 631 return block_end - block_start; 632 } 633 634 // Generic operation r1 := r2 + imm. 635 // 636 // Should produce the best code for each supported CPU version. 637 // r2 == noreg yields r1 := r1 + imm 638 // imm == 0 emits either no instruction or r1 := r2 ! 639 // NOTES: 1) Don't use this function where fixed sized 640 // instruction sequences are required!!! 641 // 2) Don't use this function if condition code 642 // setting is required! 643 // 3) Despite being declared as int64_t, the parameter imm 644 // must be a simm_32 value (= signed 32-bit integer). 645 void MacroAssembler::add2reg(Register r1, int64_t imm, Register r2) { 646 assert(Immediate::is_simm32(imm), "probably an implicit conversion went wrong"); 647 648 if (r2 == noreg) { r2 = r1; } 649 650 // Handle special case imm == 0. 651 if (imm == 0) { 652 lgr_if_needed(r1, r2); 653 // Nothing else to do. 654 return; 655 } 656 657 if (!PreferLAoverADD || (r2 == Z_R0)) { 658 bool distinctOpnds = VM_Version::has_DistinctOpnds(); 659 660 // Can we encode imm in 16 bits signed? 661 if (Immediate::is_simm16(imm)) { 662 if (r1 == r2) { 663 z_aghi(r1, imm); 664 return; 665 } 666 if (distinctOpnds) { 667 z_aghik(r1, r2, imm); 668 return; 669 } 670 z_lgr(r1, r2); 671 z_aghi(r1, imm); 672 return; 673 } 674 } else { 675 // Can we encode imm in 12 bits unsigned? 676 if (Displacement::is_shortDisp(imm)) { 677 z_la(r1, imm, r2); 678 return; 679 } 680 // Can we encode imm in 20 bits signed? 681 if (Displacement::is_validDisp(imm)) { 682 // Always use LAY instruction, so we don't need the tmp register. 683 z_lay(r1, imm, r2); 684 return; 685 } 686 687 } 688 689 // Can handle it (all possible values) with long immediates. 690 lgr_if_needed(r1, r2); 691 z_agfi(r1, imm); 692 } 693 694 // Generic operation r := b + x + d 695 // 696 // Addition of several operands with address generation semantics - sort of: 697 // - no restriction on the registers. Any register will do for any operand. 698 // - x == noreg: operand will be disregarded. 699 // - b == noreg: will use (contents of) result reg as operand (r := r + d). 700 // - x == Z_R0: just disregard 701 // - b == Z_R0: use as operand. This is not address generation semantics!!! 702 // 703 // The same restrictions as on add2reg() are valid!!! 704 void MacroAssembler::add2reg_with_index(Register r, int64_t d, Register x, Register b) { 705 assert(Immediate::is_simm32(d), "probably an implicit conversion went wrong"); 706 707 if (x == noreg) { x = Z_R0; } 708 if (b == noreg) { b = r; } 709 710 // Handle special case x == R0. 711 if (x == Z_R0) { 712 // Can simply add the immediate value to the base register. 713 add2reg(r, d, b); 714 return; 715 } 716 717 if (!PreferLAoverADD || (b == Z_R0)) { 718 bool distinctOpnds = VM_Version::has_DistinctOpnds(); 719 // Handle special case d == 0. 720 if (d == 0) { 721 if (b == x) { z_sllg(r, b, 1); return; } 722 if (r == x) { z_agr(r, b); return; } 723 if (r == b) { z_agr(r, x); return; } 724 if (distinctOpnds) { z_agrk(r, x, b); return; } 725 z_lgr(r, b); 726 z_agr(r, x); 727 } else { 728 if (x == b) { z_sllg(r, x, 1); } 729 else if (r == x) { z_agr(r, b); } 730 else if (r == b) { z_agr(r, x); } 731 else if (distinctOpnds) { z_agrk(r, x, b); } 732 else { 733 z_lgr(r, b); 734 z_agr(r, x); 735 } 736 add2reg(r, d); 737 } 738 } else { 739 // Can we encode imm in 12 bits unsigned? 740 if (Displacement::is_shortDisp(d)) { 741 z_la(r, d, x, b); 742 return; 743 } 744 // Can we encode imm in 20 bits signed? 745 if (Displacement::is_validDisp(d)) { 746 z_lay(r, d, x, b); 747 return; 748 } 749 z_la(r, 0, x, b); 750 add2reg(r, d); 751 } 752 } 753 754 // Generic emitter (32bit) for direct memory increment. 755 // For optimal code, do not specify Z_R0 as temp register. 756 void MacroAssembler::add2mem_32(const Address &a, int64_t imm, Register tmp) { 757 if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) { 758 z_asi(a, imm); 759 } else { 760 z_lgf(tmp, a); 761 add2reg(tmp, imm); 762 z_st(tmp, a); 763 } 764 } 765 766 void MacroAssembler::add2mem_64(const Address &a, int64_t imm, Register tmp) { 767 if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) { 768 z_agsi(a, imm); 769 } else { 770 z_lg(tmp, a); 771 add2reg(tmp, imm); 772 z_stg(tmp, a); 773 } 774 } 775 776 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) { 777 switch (size_in_bytes) { 778 case 8: z_lg(dst, src); break; 779 case 4: is_signed ? z_lgf(dst, src) : z_llgf(dst, src); break; 780 case 2: is_signed ? z_lgh(dst, src) : z_llgh(dst, src); break; 781 case 1: is_signed ? z_lgb(dst, src) : z_llgc(dst, src); break; 782 default: ShouldNotReachHere(); 783 } 784 } 785 786 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { 787 switch (size_in_bytes) { 788 case 8: z_stg(src, dst); break; 789 case 4: z_st(src, dst); break; 790 case 2: z_sth(src, dst); break; 791 case 1: z_stc(src, dst); break; 792 default: ShouldNotReachHere(); 793 } 794 } 795 796 // Split a si20 offset (20bit, signed) into an ui12 offset (12bit, unsigned) and 797 // a high-order summand in register tmp. 798 // 799 // return value: < 0: No split required, si20 actually has property uimm12. 800 // >= 0: Split performed. Use return value as uimm12 displacement and 801 // tmp as index register. 802 int MacroAssembler::split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate) { 803 assert(Immediate::is_simm20(si20_offset), "sanity"); 804 int lg_off = (int)si20_offset & 0x0fff; // Punch out low-order 12 bits, always positive. 805 int ll_off = (int)si20_offset & ~0x0fff; // Force low-order 12 bits to zero. 806 assert((Displacement::is_shortDisp(si20_offset) && (ll_off == 0)) || 807 !Displacement::is_shortDisp(si20_offset), "unexpected offset values"); 808 assert((lg_off+ll_off) == si20_offset, "offset splitup error"); 809 810 Register work = accumulate? Z_R0 : tmp; 811 812 if (fixed_codelen) { // Len of code = 10 = 4 + 6. 813 z_lghi(work, ll_off>>12); // Implicit sign extension. 814 z_slag(work, work, 12); 815 } else { // Len of code = 0..10. 816 if (ll_off == 0) { return -1; } 817 // ll_off has 8 significant bits (at most) plus sign. 818 if ((ll_off & 0x0000f000) == 0) { // Non-zero bits only in upper halfbyte. 819 z_llilh(work, ll_off >> 16); 820 if (ll_off < 0) { // Sign-extension required. 821 z_lgfr(work, work); 822 } 823 } else { 824 if ((ll_off & 0x000f0000) == 0) { // Non-zero bits only in lower halfbyte. 825 z_llill(work, ll_off); 826 } else { // Non-zero bits in both halfbytes. 827 z_lghi(work, ll_off>>12); // Implicit sign extension. 828 z_slag(work, work, 12); 829 } 830 } 831 } 832 if (accumulate) { z_algr(tmp, work); } // len of code += 4 833 return lg_off; 834 } 835 836 void MacroAssembler::load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) { 837 if (Displacement::is_validDisp(si20)) { 838 z_ley(t, si20, a); 839 } else { 840 // Fixed_codelen = true is a simple way to ensure that the size of load_float_largeoffset 841 // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant 842 // pool loads). 843 bool accumulate = true; 844 bool fixed_codelen = true; 845 Register work; 846 847 if (fixed_codelen) { 848 z_lgr(tmp, a); // Lgr_if_needed not applicable due to fixed_codelen. 849 } else { 850 accumulate = (a == tmp); 851 } 852 work = tmp; 853 854 int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate); 855 if (disp12 < 0) { 856 z_le(t, si20, work); 857 } else { 858 if (accumulate) { 859 z_le(t, disp12, work); 860 } else { 861 z_le(t, disp12, work, a); 862 } 863 } 864 } 865 } 866 867 void MacroAssembler::load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) { 868 if (Displacement::is_validDisp(si20)) { 869 z_ldy(t, si20, a); 870 } else { 871 // Fixed_codelen = true is a simple way to ensure that the size of load_double_largeoffset 872 // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant 873 // pool loads). 874 bool accumulate = true; 875 bool fixed_codelen = true; 876 Register work; 877 878 if (fixed_codelen) { 879 z_lgr(tmp, a); // Lgr_if_needed not applicable due to fixed_codelen. 880 } else { 881 accumulate = (a == tmp); 882 } 883 work = tmp; 884 885 int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate); 886 if (disp12 < 0) { 887 z_ld(t, si20, work); 888 } else { 889 if (accumulate) { 890 z_ld(t, disp12, work); 891 } else { 892 z_ld(t, disp12, work, a); 893 } 894 } 895 } 896 } 897 898 // PCrelative TOC access. 899 // Returns distance (in bytes) from current position to start of consts section. 900 // Returns 0 (zero) if no consts section exists or if it has size zero. 901 long MacroAssembler::toc_distance() { 902 CodeSection* cs = code()->consts(); 903 return (long)((cs != NULL) ? cs->start()-pc() : 0); 904 } 905 906 // Implementation on x86/sparc assumes that constant and instruction section are 907 // adjacent, but this doesn't hold. Two special situations may occur, that we must 908 // be able to handle: 909 // 1. const section may be located apart from the inst section. 910 // 2. const section may be empty 911 // In both cases, we use the const section's start address to compute the "TOC", 912 // this seems to occur only temporarily; in the final step we always seem to end up 913 // with the pc-relatice variant. 914 // 915 // PC-relative offset could be +/-2**32 -> use long for disp 916 // Furthermore: makes no sense to have special code for 917 // adjacent const and inst sections. 918 void MacroAssembler::load_toc(Register Rtoc) { 919 // Simply use distance from start of const section (should be patched in the end). 920 long disp = toc_distance(); 921 922 RelocationHolder rspec = internal_word_Relocation::spec(pc() + disp); 923 relocate(rspec); 924 z_larl(Rtoc, RelAddr::pcrel_off32(disp)); // Offset is in halfwords. 925 } 926 927 // PCrelative TOC access. 928 // Load from anywhere pcrelative (with relocation of load instr) 929 void MacroAssembler::load_long_pcrelative(Register Rdst, address dataLocation) { 930 address pc = this->pc(); 931 ptrdiff_t total_distance = dataLocation - pc; 932 RelocationHolder rspec = internal_word_Relocation::spec(dataLocation); 933 934 assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory"); 935 assert(total_distance != 0, "sanity"); 936 937 // Some extra safety net. 938 if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) { 939 guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "too far away"); 940 } 941 942 (this)->relocate(rspec, relocInfo::pcrel_addr_format); 943 z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance)); 944 } 945 946 947 // PCrelative TOC access. 948 // Load from anywhere pcrelative (with relocation of load instr) 949 // loaded addr has to be relocated when added to constant pool. 950 void MacroAssembler::load_addr_pcrelative(Register Rdst, address addrLocation) { 951 address pc = this->pc(); 952 ptrdiff_t total_distance = addrLocation - pc; 953 RelocationHolder rspec = internal_word_Relocation::spec(addrLocation); 954 955 assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory"); 956 957 // Some extra safety net. 958 if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) { 959 guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "too far away"); 960 } 961 962 (this)->relocate(rspec, relocInfo::pcrel_addr_format); 963 z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance)); 964 } 965 966 // Generic operation: load a value from memory and test. 967 // CondCode indicates the sign (<0, ==0, >0) of the loaded value. 968 void MacroAssembler::load_and_test_byte(Register dst, const Address &a) { 969 z_lb(dst, a); 970 z_ltr(dst, dst); 971 } 972 973 void MacroAssembler::load_and_test_short(Register dst, const Address &a) { 974 int64_t disp = a.disp20(); 975 if (Displacement::is_shortDisp(disp)) { 976 z_lh(dst, a); 977 } else if (Displacement::is_longDisp(disp)) { 978 z_lhy(dst, a); 979 } else { 980 guarantee(false, "displacement out of range"); 981 } 982 z_ltr(dst, dst); 983 } 984 985 void MacroAssembler::load_and_test_int(Register dst, const Address &a) { 986 z_lt(dst, a); 987 } 988 989 void MacroAssembler::load_and_test_int2long(Register dst, const Address &a) { 990 z_ltgf(dst, a); 991 } 992 993 void MacroAssembler::load_and_test_long(Register dst, const Address &a) { 994 z_ltg(dst, a); 995 } 996 997 // Test a bit in memory. 998 void MacroAssembler::testbit(const Address &a, unsigned int bit) { 999 assert(a.index() == noreg, "no index reg allowed in testbit"); 1000 if (bit <= 7) { 1001 z_tm(a.disp() + 3, a.base(), 1 << bit); 1002 } else if (bit <= 15) { 1003 z_tm(a.disp() + 2, a.base(), 1 << (bit - 8)); 1004 } else if (bit <= 23) { 1005 z_tm(a.disp() + 1, a.base(), 1 << (bit - 16)); 1006 } else if (bit <= 31) { 1007 z_tm(a.disp() + 0, a.base(), 1 << (bit - 24)); 1008 } else { 1009 ShouldNotReachHere(); 1010 } 1011 } 1012 1013 // Test a bit in a register. Result is reflected in CC. 1014 void MacroAssembler::testbit(Register r, unsigned int bitPos) { 1015 if (bitPos < 16) { 1016 z_tmll(r, 1U<<bitPos); 1017 } else if (bitPos < 32) { 1018 z_tmlh(r, 1U<<(bitPos-16)); 1019 } else if (bitPos < 48) { 1020 z_tmhl(r, 1U<<(bitPos-32)); 1021 } else if (bitPos < 64) { 1022 z_tmhh(r, 1U<<(bitPos-48)); 1023 } else { 1024 ShouldNotReachHere(); 1025 } 1026 } 1027 1028 // Clear a register, i.e. load const zero into reg. 1029 // Return len (in bytes) of generated instruction(s). 1030 // whole_reg: Clear 64 bits if true, 32 bits otherwise. 1031 // set_cc: Use instruction that sets the condition code, if true. 1032 int MacroAssembler::clear_reg(Register r, bool whole_reg, bool set_cc) { 1033 unsigned int start_off = offset(); 1034 if (whole_reg) { 1035 set_cc ? z_xgr(r, r) : z_laz(r, 0, Z_R0); 1036 } else { // Only 32bit register. 1037 set_cc ? z_xr(r, r) : z_lhi(r, 0); 1038 } 1039 return offset() - start_off; 1040 } 1041 1042 #ifdef ASSERT 1043 int MacroAssembler::preset_reg(Register r, unsigned long pattern, int pattern_len) { 1044 switch (pattern_len) { 1045 case 1: 1046 pattern = (pattern & 0x000000ff) | ((pattern & 0x000000ff)<<8); 1047 case 2: 1048 pattern = (pattern & 0x0000ffff) | ((pattern & 0x0000ffff)<<16); 1049 case 4: 1050 pattern = (pattern & 0xffffffffL) | ((pattern & 0xffffffffL)<<32); 1051 case 8: 1052 return load_const_optimized_rtn_len(r, pattern, true); 1053 break; 1054 default: 1055 guarantee(false, "preset_reg: bad len"); 1056 } 1057 return 0; 1058 } 1059 #endif 1060 1061 // addr: Address descriptor of memory to clear index register will not be used ! 1062 // size: Number of bytes to clear. 1063 // !!! DO NOT USE THEM FOR ATOMIC MEMORY CLEARING !!! 1064 // !!! Use store_const() instead !!! 1065 void MacroAssembler::clear_mem(const Address& addr, unsigned size) { 1066 guarantee(size <= 256, "MacroAssembler::clear_mem: size too large"); 1067 1068 if (size == 1) { 1069 z_mvi(addr, 0); 1070 return; 1071 } 1072 1073 switch (size) { 1074 case 2: z_mvhhi(addr, 0); 1075 return; 1076 case 4: z_mvhi(addr, 0); 1077 return; 1078 case 8: z_mvghi(addr, 0); 1079 return; 1080 default: ; // Fallthru to xc. 1081 } 1082 1083 z_xc(addr, size, addr); 1084 } 1085 1086 void MacroAssembler::align(int modulus) { 1087 while (offset() % modulus != 0) z_nop(); 1088 } 1089 1090 // Special version for non-relocateable code if required alignment 1091 // is larger than CodeEntryAlignment. 1092 void MacroAssembler::align_address(int modulus) { 1093 while ((uintptr_t)pc() % modulus != 0) z_nop(); 1094 } 1095 1096 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 1097 Register temp_reg, 1098 int64_t extra_slot_offset) { 1099 // On Z, we can have index and disp in an Address. So don't call argument_offset, 1100 // which issues an unnecessary add instruction. 1101 int stackElementSize = Interpreter::stackElementSize; 1102 int64_t offset = extra_slot_offset * stackElementSize; 1103 const Register argbase = Z_esp; 1104 if (arg_slot.is_constant()) { 1105 offset += arg_slot.as_constant() * stackElementSize; 1106 return Address(argbase, offset); 1107 } 1108 // else 1109 assert(temp_reg != noreg, "must specify"); 1110 assert(temp_reg != Z_ARG1, "base and index are conflicting"); 1111 z_sllg(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize)); // tempreg = arg_slot << 3 1112 return Address(argbase, temp_reg, offset); 1113 } 1114 1115 1116 //=================================================================== 1117 //=== START C O N S T A N T S I N C O D E S T R E A M === 1118 //=================================================================== 1119 //=== P A T CH A B L E C O N S T A N T S === 1120 //=================================================================== 1121 1122 1123 //--------------------------------------------------- 1124 // Load (patchable) constant into register 1125 //--------------------------------------------------- 1126 1127 1128 // Load absolute address (and try to optimize). 1129 // Note: This method is usable only for position-fixed code, 1130 // referring to a position-fixed target location. 1131 // If not so, relocations and patching must be used. 1132 void MacroAssembler::load_absolute_address(Register d, address addr) { 1133 assert(addr != NULL, "should not happen"); 1134 BLOCK_COMMENT("load_absolute_address:"); 1135 if (addr == NULL) { 1136 z_larl(d, pc()); // Dummy emit for size calc. 1137 return; 1138 } 1139 1140 if (RelAddr::is_in_range_of_RelAddr32(addr, pc())) { 1141 z_larl(d, addr); 1142 return; 1143 } 1144 1145 load_const_optimized(d, (long)addr); 1146 } 1147 1148 // Load a 64bit constant. 1149 // Patchable code sequence, but not atomically patchable. 1150 // Make sure to keep code size constant -> no value-dependent optimizations. 1151 // Do not kill condition code. 1152 void MacroAssembler::load_const(Register t, long x) { 1153 Assembler::z_iihf(t, (int)(x >> 32)); 1154 Assembler::z_iilf(t, (int)(x & 0xffffffff)); 1155 } 1156 1157 // Load a 32bit constant into a 64bit register, sign-extend or zero-extend. 1158 // Patchable code sequence, but not atomically patchable. 1159 // Make sure to keep code size constant -> no value-dependent optimizations. 1160 // Do not kill condition code. 1161 void MacroAssembler::load_const_32to64(Register t, int64_t x, bool sign_extend) { 1162 if (sign_extend) { Assembler::z_lgfi(t, x); } 1163 else { Assembler::z_llilf(t, x); } 1164 } 1165 1166 // Load narrow oop constant, no decompression. 1167 void MacroAssembler::load_narrow_oop(Register t, narrowOop a) { 1168 assert(UseCompressedOops, "must be on to call this method"); 1169 load_const_32to64(t, a, false /*sign_extend*/); 1170 } 1171 1172 // Load narrow klass constant, compression required. 1173 void MacroAssembler::load_narrow_klass(Register t, Klass* k) { 1174 assert(UseCompressedClassPointers, "must be on to call this method"); 1175 narrowKlass encoded_k = Klass::encode_klass(k); 1176 load_const_32to64(t, encoded_k, false /*sign_extend*/); 1177 } 1178 1179 //------------------------------------------------------ 1180 // Compare (patchable) constant with register. 1181 //------------------------------------------------------ 1182 1183 // Compare narrow oop in reg with narrow oop constant, no decompression. 1184 void MacroAssembler::compare_immediate_narrow_oop(Register oop1, narrowOop oop2) { 1185 assert(UseCompressedOops, "must be on to call this method"); 1186 1187 Assembler::z_clfi(oop1, oop2); 1188 } 1189 1190 // Compare narrow oop in reg with narrow oop constant, no decompression. 1191 void MacroAssembler::compare_immediate_narrow_klass(Register klass1, Klass* klass2) { 1192 assert(UseCompressedClassPointers, "must be on to call this method"); 1193 narrowKlass encoded_k = Klass::encode_klass(klass2); 1194 1195 Assembler::z_clfi(klass1, encoded_k); 1196 } 1197 1198 //---------------------------------------------------------- 1199 // Check which kind of load_constant we have here. 1200 //---------------------------------------------------------- 1201 1202 // Detection of CPU version dependent load_const sequence. 1203 // The detection is valid only for code sequences generated by load_const, 1204 // not load_const_optimized. 1205 bool MacroAssembler::is_load_const(address a) { 1206 unsigned long inst1, inst2; 1207 unsigned int len1, len2; 1208 1209 len1 = get_instruction(a, &inst1); 1210 len2 = get_instruction(a + len1, &inst2); 1211 1212 return is_z_iihf(inst1) && is_z_iilf(inst2); 1213 } 1214 1215 // Detection of CPU version dependent load_const_32to64 sequence. 1216 // Mostly used for narrow oops and narrow Klass pointers. 1217 // The detection is valid only for code sequences generated by load_const_32to64. 1218 bool MacroAssembler::is_load_const_32to64(address pos) { 1219 unsigned long inst1, inst2; 1220 unsigned int len1; 1221 1222 len1 = get_instruction(pos, &inst1); 1223 return is_z_llilf(inst1); 1224 } 1225 1226 // Detection of compare_immediate_narrow sequence. 1227 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop. 1228 bool MacroAssembler::is_compare_immediate32(address pos) { 1229 return is_equal(pos, CLFI_ZOPC, RIL_MASK); 1230 } 1231 1232 // Detection of compare_immediate_narrow sequence. 1233 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop. 1234 bool MacroAssembler::is_compare_immediate_narrow_oop(address pos) { 1235 return is_compare_immediate32(pos); 1236 } 1237 1238 // Detection of compare_immediate_narrow sequence. 1239 // The detection is valid only for code sequences generated by compare_immediate_narrow_klass. 1240 bool MacroAssembler::is_compare_immediate_narrow_klass(address pos) { 1241 return is_compare_immediate32(pos); 1242 } 1243 1244 //----------------------------------- 1245 // patch the load_constant 1246 //----------------------------------- 1247 1248 // CPU-version dependend patching of load_const. 1249 void MacroAssembler::patch_const(address a, long x) { 1250 assert(is_load_const(a), "not a load of a constant"); 1251 set_imm32((address)a, (int) ((x >> 32) & 0xffffffff)); 1252 set_imm32((address)(a + 6), (int)(x & 0xffffffff)); 1253 } 1254 1255 // Patching the value of CPU version dependent load_const_32to64 sequence. 1256 // The passed ptr MUST be in compressed format! 1257 int MacroAssembler::patch_load_const_32to64(address pos, int64_t np) { 1258 assert(is_load_const_32to64(pos), "not a load of a narrow ptr (oop or klass)"); 1259 1260 set_imm32(pos, np); 1261 return 6; 1262 } 1263 1264 // Patching the value of CPU version dependent compare_immediate_narrow sequence. 1265 // The passed ptr MUST be in compressed format! 1266 int MacroAssembler::patch_compare_immediate_32(address pos, int64_t np) { 1267 assert(is_compare_immediate32(pos), "not a compressed ptr compare"); 1268 1269 set_imm32(pos, np); 1270 return 6; 1271 } 1272 1273 // Patching the immediate value of CPU version dependent load_narrow_oop sequence. 1274 // The passed ptr must NOT be in compressed format! 1275 int MacroAssembler::patch_load_narrow_oop(address pos, oop o) { 1276 assert(UseCompressedOops, "Can only patch compressed oops"); 1277 1278 narrowOop no = oopDesc::encode_heap_oop(o); 1279 return patch_load_const_32to64(pos, no); 1280 } 1281 1282 // Patching the immediate value of CPU version dependent load_narrow_klass sequence. 1283 // The passed ptr must NOT be in compressed format! 1284 int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) { 1285 assert(UseCompressedClassPointers, "Can only patch compressed klass pointers"); 1286 1287 narrowKlass nk = Klass::encode_klass(k); 1288 return patch_load_const_32to64(pos, nk); 1289 } 1290 1291 // Patching the immediate value of CPU version dependent compare_immediate_narrow_oop sequence. 1292 // The passed ptr must NOT be in compressed format! 1293 int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) { 1294 assert(UseCompressedOops, "Can only patch compressed oops"); 1295 1296 narrowOop no = oopDesc::encode_heap_oop(o); 1297 return patch_compare_immediate_32(pos, no); 1298 } 1299 1300 // Patching the immediate value of CPU version dependent compare_immediate_narrow_klass sequence. 1301 // The passed ptr must NOT be in compressed format! 1302 int MacroAssembler::patch_compare_immediate_narrow_klass(address pos, Klass* k) { 1303 assert(UseCompressedClassPointers, "Can only patch compressed klass pointers"); 1304 1305 narrowKlass nk = Klass::encode_klass(k); 1306 return patch_compare_immediate_32(pos, nk); 1307 } 1308 1309 //------------------------------------------------------------------------ 1310 // Extract the constant from a load_constant instruction stream. 1311 //------------------------------------------------------------------------ 1312 1313 // Get constant from a load_const sequence. 1314 long MacroAssembler::get_const(address a) { 1315 assert(is_load_const(a), "not a load of a constant"); 1316 unsigned long x; 1317 x = (((unsigned long) (get_imm32(a,0) & 0xffffffff)) << 32); 1318 x |= (((unsigned long) (get_imm32(a,1) & 0xffffffff))); 1319 return (long) x; 1320 } 1321 1322 //-------------------------------------- 1323 // Store a constant in memory. 1324 //-------------------------------------- 1325 1326 // General emitter to move a constant to memory. 1327 // The store is atomic. 1328 // o Address must be given in RS format (no index register) 1329 // o Displacement should be 12bit unsigned for efficiency. 20bit signed also supported. 1330 // o Constant can be 1, 2, 4, or 8 bytes, signed or unsigned. 1331 // o Memory slot can be 1, 2, 4, or 8 bytes, signed or unsigned. 1332 // o Memory slot must be at least as wide as constant, will assert otherwise. 1333 // o Signed constants will sign-extend, unsigned constants will zero-extend to slot width. 1334 int MacroAssembler::store_const(const Address &dest, long imm, 1335 unsigned int lm, unsigned int lc, 1336 Register scratch) { 1337 int64_t disp = dest.disp(); 1338 Register base = dest.base(); 1339 assert(!dest.has_index(), "not supported"); 1340 assert((lm==1)||(lm==2)||(lm==4)||(lm==8), "memory length not supported"); 1341 assert((lc==1)||(lc==2)||(lc==4)||(lc==8), "constant length not supported"); 1342 assert(lm>=lc, "memory slot too small"); 1343 assert(lc==8 || Immediate::is_simm(imm, lc*8), "const out of range"); 1344 assert(Displacement::is_validDisp(disp), "displacement out of range"); 1345 1346 bool is_shortDisp = Displacement::is_shortDisp(disp); 1347 int store_offset = -1; 1348 1349 // For target len == 1 it's easy. 1350 if (lm == 1) { 1351 store_offset = offset(); 1352 if (is_shortDisp) { 1353 z_mvi(disp, base, imm); 1354 return store_offset; 1355 } else { 1356 z_mviy(disp, base, imm); 1357 return store_offset; 1358 } 1359 } 1360 1361 // All the "good stuff" takes an unsigned displacement. 1362 if (is_shortDisp) { 1363 // NOTE: Cannot use clear_mem for imm==0, because it is not atomic. 1364 1365 store_offset = offset(); 1366 switch (lm) { 1367 case 2: // Lc == 1 handled correctly here, even for unsigned. Instruction does no widening. 1368 z_mvhhi(disp, base, imm); 1369 return store_offset; 1370 case 4: 1371 if (Immediate::is_simm16(imm)) { 1372 z_mvhi(disp, base, imm); 1373 return store_offset; 1374 } 1375 break; 1376 case 8: 1377 if (Immediate::is_simm16(imm)) { 1378 z_mvghi(disp, base, imm); 1379 return store_offset; 1380 } 1381 break; 1382 default: 1383 ShouldNotReachHere(); 1384 break; 1385 } 1386 } 1387 1388 // Can't optimize, so load value and store it. 1389 guarantee(scratch != noreg, " need a scratch register here !"); 1390 if (imm != 0) { 1391 load_const_optimized(scratch, imm); // Preserves CC anyway. 1392 } else { 1393 // Leave CC alone!! 1394 (void) clear_reg(scratch, true, false); // Indicate unused result. 1395 } 1396 1397 store_offset = offset(); 1398 if (is_shortDisp) { 1399 switch (lm) { 1400 case 2: 1401 z_sth(scratch, disp, Z_R0, base); 1402 return store_offset; 1403 case 4: 1404 z_st(scratch, disp, Z_R0, base); 1405 return store_offset; 1406 case 8: 1407 z_stg(scratch, disp, Z_R0, base); 1408 return store_offset; 1409 default: 1410 ShouldNotReachHere(); 1411 break; 1412 } 1413 } else { 1414 switch (lm) { 1415 case 2: 1416 z_sthy(scratch, disp, Z_R0, base); 1417 return store_offset; 1418 case 4: 1419 z_sty(scratch, disp, Z_R0, base); 1420 return store_offset; 1421 case 8: 1422 z_stg(scratch, disp, Z_R0, base); 1423 return store_offset; 1424 default: 1425 ShouldNotReachHere(); 1426 break; 1427 } 1428 } 1429 return -1; // should not reach here 1430 } 1431 1432 //=================================================================== 1433 //=== N O T P A T CH A B L E C O N S T A N T S === 1434 //=================================================================== 1435 1436 // Load constant x into register t with a fast instrcution sequence 1437 // depending on the bits in x. Preserves CC under all circumstances. 1438 int MacroAssembler::load_const_optimized_rtn_len(Register t, long x, bool emit) { 1439 if (x == 0) { 1440 int len; 1441 if (emit) { 1442 len = clear_reg(t, true, false); 1443 } else { 1444 len = 4; 1445 } 1446 return len; 1447 } 1448 1449 if (Immediate::is_simm16(x)) { 1450 if (emit) { z_lghi(t, x); } 1451 return 4; 1452 } 1453 1454 // 64 bit value: | part1 | part2 | part3 | part4 | 1455 // At least one part is not zero! 1456 int part1 = ((x >> 32) & 0xffff0000) >> 16; 1457 int part2 = (x >> 32) & 0x0000ffff; 1458 int part3 = (x & 0xffff0000) >> 16; 1459 int part4 = (x & 0x0000ffff); 1460 1461 // Lower word only (unsigned). 1462 if ((part1 == 0) && (part2 == 0)) { 1463 if (part3 == 0) { 1464 if (emit) z_llill(t, part4); 1465 return 4; 1466 } 1467 if (part4 == 0) { 1468 if (emit) z_llilh(t, part3); 1469 return 4; 1470 } 1471 if (emit) z_llilf(t, (int)(x & 0xffffffff)); 1472 return 6; 1473 } 1474 1475 // Upper word only. 1476 if ((part3 == 0) && (part4 == 0)) { 1477 if (part1 == 0) { 1478 if (emit) z_llihl(t, part2); 1479 return 4; 1480 } 1481 if (part2 == 0) { 1482 if (emit) z_llihh(t, part1); 1483 return 4; 1484 } 1485 if (emit) z_llihf(t, (int)(x >> 32)); 1486 return 6; 1487 } 1488 1489 // Lower word only (signed). 1490 if ((part1 == 0x0000ffff) && (part2 == 0x0000ffff) && ((part3 & 0x00008000) != 0)) { 1491 if (emit) z_lgfi(t, (int)(x & 0xffffffff)); 1492 return 6; 1493 } 1494 1495 int len = 0; 1496 1497 if ((part1 == 0) || (part2 == 0)) { 1498 if (part1 == 0) { 1499 if (emit) z_llihl(t, part2); 1500 len += 4; 1501 } else { 1502 if (emit) z_llihh(t, part1); 1503 len += 4; 1504 } 1505 } else { 1506 if (emit) z_llihf(t, (int)(x >> 32)); 1507 len += 6; 1508 } 1509 1510 if ((part3 == 0) || (part4 == 0)) { 1511 if (part3 == 0) { 1512 if (emit) z_iill(t, part4); 1513 len += 4; 1514 } else { 1515 if (emit) z_iilh(t, part3); 1516 len += 4; 1517 } 1518 } else { 1519 if (emit) z_iilf(t, (int)(x & 0xffffffff)); 1520 len += 6; 1521 } 1522 return len; 1523 } 1524 1525 //===================================================================== 1526 //=== H I G H E R L E V E L B R A N C H E M I T T E R S === 1527 //===================================================================== 1528 1529 // Note: In the worst case, one of the scratch registers is destroyed!!! 1530 void MacroAssembler::compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { 1531 // Right operand is constant. 1532 if (x2.is_constant()) { 1533 jlong value = x2.as_constant(); 1534 compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/true); 1535 return; 1536 } 1537 1538 // Right operand is in register. 1539 compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/true); 1540 } 1541 1542 // Note: In the worst case, one of the scratch registers is destroyed!!! 1543 void MacroAssembler::compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { 1544 // Right operand is constant. 1545 if (x2.is_constant()) { 1546 jlong value = x2.as_constant(); 1547 compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/false); 1548 return; 1549 } 1550 1551 // Right operand is in register. 1552 compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/false); 1553 } 1554 1555 // Note: In the worst case, one of the scratch registers is destroyed!!! 1556 void MacroAssembler::compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { 1557 // Right operand is constant. 1558 if (x2.is_constant()) { 1559 jlong value = x2.as_constant(); 1560 compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/true); 1561 return; 1562 } 1563 1564 // Right operand is in register. 1565 compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/true); 1566 } 1567 1568 void MacroAssembler::compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { 1569 // Right operand is constant. 1570 if (x2.is_constant()) { 1571 jlong value = x2.as_constant(); 1572 compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/false); 1573 return; 1574 } 1575 1576 // Right operand is in register. 1577 compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/false); 1578 } 1579 1580 // Generate an optimal branch to the branch target. 1581 // Optimal means that a relative branch (brc or brcl) is used if the 1582 // branch distance is short enough. Loading the target address into a 1583 // register and branching via reg is used as fallback only. 1584 // 1585 // Used registers: 1586 // Z_R1 - work reg. Holds branch target address. 1587 // Used in fallback case only. 1588 // 1589 // This version of branch_optimized is good for cases where the target address is known 1590 // and constant, i.e. is never changed (no relocation, no patching). 1591 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, address branch_addr) { 1592 address branch_origin = pc(); 1593 1594 if (RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) { 1595 z_brc(cond, branch_addr); 1596 } else if (RelAddr::is_in_range_of_RelAddr32(branch_addr, branch_origin)) { 1597 z_brcl(cond, branch_addr); 1598 } else { 1599 load_const_optimized(Z_R1, branch_addr); // CC must not get killed by load_const_optimized. 1600 z_bcr(cond, Z_R1); 1601 } 1602 } 1603 1604 // This version of branch_optimized is good for cases where the target address 1605 // is potentially not yet known at the time the code is emitted. 1606 // 1607 // One very common case is a branch to an unbound label which is handled here. 1608 // The caller might know (or hope) that the branch distance is short enough 1609 // to be encoded in a 16bit relative address. In this case he will pass a 1610 // NearLabel branch_target. 1611 // Care must be taken with unbound labels. Each call to target(label) creates 1612 // an entry in the patch queue for that label to patch all references of the label 1613 // once it gets bound. Those recorded patch locations must be patchable. Otherwise, 1614 // an assertion fires at patch time. 1615 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& branch_target) { 1616 if (branch_target.is_bound()) { 1617 address branch_addr = target(branch_target); 1618 branch_optimized(cond, branch_addr); 1619 } else if (branch_target.is_near()) { 1620 z_brc(cond, branch_target); // Caller assures that the target will be in range for z_brc. 1621 } else { 1622 z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time. 1623 } 1624 } 1625 1626 // Generate an optimal compare and branch to the branch target. 1627 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the 1628 // branch distance is short enough. Loading the target address into a 1629 // register and branching via reg is used as fallback only. 1630 // 1631 // Input: 1632 // r1 - left compare operand 1633 // r2 - right compare operand 1634 void MacroAssembler::compare_and_branch_optimized(Register r1, 1635 Register r2, 1636 Assembler::branch_condition cond, 1637 address branch_addr, 1638 bool len64, 1639 bool has_sign) { 1640 unsigned int casenum = (len64?2:0)+(has_sign?0:1); 1641 1642 address branch_origin = pc(); 1643 if (VM_Version::has_CompareBranch() && RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) { 1644 switch (casenum) { 1645 case 0: z_crj( r1, r2, cond, branch_addr); break; 1646 case 1: z_clrj (r1, r2, cond, branch_addr); break; 1647 case 2: z_cgrj(r1, r2, cond, branch_addr); break; 1648 case 3: z_clgrj(r1, r2, cond, branch_addr); break; 1649 default: ShouldNotReachHere(); break; 1650 } 1651 } else { 1652 switch (casenum) { 1653 case 0: z_cr( r1, r2); break; 1654 case 1: z_clr(r1, r2); break; 1655 case 2: z_cgr(r1, r2); break; 1656 case 3: z_clgr(r1, r2); break; 1657 default: ShouldNotReachHere(); break; 1658 } 1659 branch_optimized(cond, branch_addr); 1660 } 1661 } 1662 1663 // Generate an optimal compare and branch to the branch target. 1664 // Optimal means that a relative branch (clgij, brc or brcl) is used if the 1665 // branch distance is short enough. Loading the target address into a 1666 // register and branching via reg is used as fallback only. 1667 // 1668 // Input: 1669 // r1 - left compare operand (in register) 1670 // x2 - right compare operand (immediate) 1671 void MacroAssembler::compare_and_branch_optimized(Register r1, 1672 jlong x2, 1673 Assembler::branch_condition cond, 1674 Label& branch_target, 1675 bool len64, 1676 bool has_sign) { 1677 address branch_origin = pc(); 1678 bool x2_imm8 = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2)); 1679 bool is_RelAddr16 = branch_target.is_near() || 1680 (branch_target.is_bound() && 1681 RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin)); 1682 unsigned int casenum = (len64?2:0)+(has_sign?0:1); 1683 1684 if (VM_Version::has_CompareBranch() && is_RelAddr16 && x2_imm8) { 1685 switch (casenum) { 1686 case 0: z_cij( r1, x2, cond, branch_target); break; 1687 case 1: z_clij(r1, x2, cond, branch_target); break; 1688 case 2: z_cgij(r1, x2, cond, branch_target); break; 1689 case 3: z_clgij(r1, x2, cond, branch_target); break; 1690 default: ShouldNotReachHere(); break; 1691 } 1692 return; 1693 } 1694 1695 if (x2 == 0) { 1696 switch (casenum) { 1697 case 0: z_ltr(r1, r1); break; 1698 case 1: z_ltr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication! 1699 case 2: z_ltgr(r1, r1); break; 1700 case 3: z_ltgr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication! 1701 default: ShouldNotReachHere(); break; 1702 } 1703 } else { 1704 if ((has_sign && Immediate::is_simm16(x2)) || (!has_sign && Immediate::is_uimm(x2, 15))) { 1705 switch (casenum) { 1706 case 0: z_chi(r1, x2); break; 1707 case 1: z_chi(r1, x2); break; // positive immediate < 2**15 1708 case 2: z_cghi(r1, x2); break; 1709 case 3: z_cghi(r1, x2); break; // positive immediate < 2**15 1710 default: break; 1711 } 1712 } else if ( (has_sign && Immediate::is_simm32(x2)) || (!has_sign && Immediate::is_uimm32(x2)) ) { 1713 switch (casenum) { 1714 case 0: z_cfi( r1, x2); break; 1715 case 1: z_clfi(r1, x2); break; 1716 case 2: z_cgfi(r1, x2); break; 1717 case 3: z_clgfi(r1, x2); break; 1718 default: ShouldNotReachHere(); break; 1719 } 1720 } else { 1721 // No instruction with immediate operand possible, so load into register. 1722 Register scratch = (r1 != Z_R0) ? Z_R0 : Z_R1; 1723 load_const_optimized(scratch, x2); 1724 switch (casenum) { 1725 case 0: z_cr( r1, scratch); break; 1726 case 1: z_clr(r1, scratch); break; 1727 case 2: z_cgr(r1, scratch); break; 1728 case 3: z_clgr(r1, scratch); break; 1729 default: ShouldNotReachHere(); break; 1730 } 1731 } 1732 } 1733 branch_optimized(cond, branch_target); 1734 } 1735 1736 // Generate an optimal compare and branch to the branch target. 1737 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the 1738 // branch distance is short enough. Loading the target address into a 1739 // register and branching via reg is used as fallback only. 1740 // 1741 // Input: 1742 // r1 - left compare operand 1743 // r2 - right compare operand 1744 void MacroAssembler::compare_and_branch_optimized(Register r1, 1745 Register r2, 1746 Assembler::branch_condition cond, 1747 Label& branch_target, 1748 bool len64, 1749 bool has_sign) { 1750 unsigned int casenum = (len64 ? 2 : 0) + (has_sign ? 0 : 1); 1751 1752 if (branch_target.is_bound()) { 1753 address branch_addr = target(branch_target); 1754 compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign); 1755 } else { 1756 if (VM_Version::has_CompareBranch() && branch_target.is_near()) { 1757 switch (casenum) { 1758 case 0: z_crj( r1, r2, cond, branch_target); break; 1759 case 1: z_clrj( r1, r2, cond, branch_target); break; 1760 case 2: z_cgrj( r1, r2, cond, branch_target); break; 1761 case 3: z_clgrj(r1, r2, cond, branch_target); break; 1762 default: ShouldNotReachHere(); break; 1763 } 1764 } else { 1765 switch (casenum) { 1766 case 0: z_cr( r1, r2); break; 1767 case 1: z_clr(r1, r2); break; 1768 case 2: z_cgr(r1, r2); break; 1769 case 3: z_clgr(r1, r2); break; 1770 default: ShouldNotReachHere(); break; 1771 } 1772 branch_optimized(cond, branch_target); 1773 } 1774 } 1775 } 1776 1777 //=========================================================================== 1778 //=== END H I G H E R L E V E L B R A N C H E M I T T E R S === 1779 //=========================================================================== 1780 1781 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { 1782 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1783 int index = oop_recorder()->allocate_metadata_index(obj); 1784 RelocationHolder rspec = metadata_Relocation::spec(index); 1785 return AddressLiteral((address)obj, rspec); 1786 } 1787 1788 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { 1789 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1790 int index = oop_recorder()->find_index(obj); 1791 RelocationHolder rspec = metadata_Relocation::spec(index); 1792 return AddressLiteral((address)obj, rspec); 1793 } 1794 1795 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) { 1796 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1797 int oop_index = oop_recorder()->allocate_oop_index(obj); 1798 return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); 1799 } 1800 1801 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { 1802 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1803 int oop_index = oop_recorder()->find_index(obj); 1804 return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); 1805 } 1806 1807 // NOTE: destroys r 1808 void MacroAssembler::c2bool(Register r, Register t) { 1809 z_lcr(t, r); // t = -r 1810 z_or(r, t); // r = -r OR r 1811 z_srl(r, 31); // Yields 0 if r was 0, 1 otherwise. 1812 } 1813 1814 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 1815 Register tmp, 1816 int offset) { 1817 intptr_t value = *delayed_value_addr; 1818 if (value != 0) { 1819 return RegisterOrConstant(value + offset); 1820 } 1821 1822 BLOCK_COMMENT("delayed_value {"); 1823 // Load indirectly to solve generation ordering problem. 1824 load_absolute_address(tmp, (address) delayed_value_addr); // tmp = a; 1825 z_lg(tmp, 0, tmp); // tmp = *tmp; 1826 1827 #ifdef ASSERT 1828 NearLabel L; 1829 compare64_and_branch(tmp, (intptr_t)0L, Assembler::bcondNotEqual, L); 1830 z_illtrap(); 1831 bind(L); 1832 #endif 1833 1834 if (offset != 0) { 1835 z_agfi(tmp, offset); // tmp = tmp + offset; 1836 } 1837 1838 BLOCK_COMMENT("} delayed_value"); 1839 return RegisterOrConstant(tmp); 1840 } 1841 1842 // Patch instruction `inst' at offset `inst_pos' to refer to `dest_pos' 1843 // and return the resulting instruction. 1844 // Dest_pos and inst_pos are 32 bit only. These parms can only designate 1845 // relative positions. 1846 // Use correct argument types. Do not pre-calculate distance. 1847 unsigned long MacroAssembler::patched_branch(address dest_pos, unsigned long inst, address inst_pos) { 1848 int c = 0; 1849 unsigned long patched_inst = 0; 1850 if (is_call_pcrelative_short(inst) || 1851 is_branch_pcrelative_short(inst) || 1852 is_branchoncount_pcrelative_short(inst) || 1853 is_branchonindex32_pcrelative_short(inst)) { 1854 c = 1; 1855 int m = fmask(15, 0); // simm16(-1, 16, 32); 1856 int v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 32); 1857 patched_inst = (inst & ~m) | v; 1858 } else if (is_compareandbranch_pcrelative_short(inst)) { 1859 c = 2; 1860 long m = fmask(31, 16); // simm16(-1, 16, 48); 1861 long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48); 1862 patched_inst = (inst & ~m) | v; 1863 } else if (is_branchonindex64_pcrelative_short(inst)) { 1864 c = 3; 1865 long m = fmask(31, 16); // simm16(-1, 16, 48); 1866 long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48); 1867 patched_inst = (inst & ~m) | v; 1868 } else if (is_call_pcrelative_long(inst) || is_branch_pcrelative_long(inst)) { 1869 c = 4; 1870 long m = fmask(31, 0); // simm32(-1, 16, 48); 1871 long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48); 1872 patched_inst = (inst & ~m) | v; 1873 } else if (is_pcrelative_long(inst)) { // These are the non-branch pc-relative instructions. 1874 c = 5; 1875 long m = fmask(31, 0); // simm32(-1, 16, 48); 1876 long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48); 1877 patched_inst = (inst & ~m) | v; 1878 } else { 1879 print_dbg_msg(tty, inst, "not a relative branch", 0); 1880 dump_code_range(tty, inst_pos, 32, "not a pcrelative branch"); 1881 ShouldNotReachHere(); 1882 } 1883 1884 long new_off = get_pcrel_offset(patched_inst); 1885 if (new_off != (dest_pos-inst_pos)) { 1886 tty->print_cr("case %d: dest_pos = %p, inst_pos = %p, disp = %ld(%12.12lx)", c, dest_pos, inst_pos, new_off, new_off); 1887 print_dbg_msg(tty, inst, "<- original instruction: branch patching error", 0); 1888 print_dbg_msg(tty, patched_inst, "<- patched instruction: branch patching error", 0); 1889 #ifdef LUCY_DBG 1890 VM_Version::z_SIGSEGV(); 1891 #endif 1892 ShouldNotReachHere(); 1893 } 1894 return patched_inst; 1895 } 1896 1897 // Only called when binding labels (share/vm/asm/assembler.cpp) 1898 // Pass arguments as intended. Do not pre-calculate distance. 1899 void MacroAssembler::pd_patch_instruction(address branch, address target) { 1900 unsigned long stub_inst; 1901 int inst_len = get_instruction(branch, &stub_inst); 1902 1903 set_instruction(branch, patched_branch(target, stub_inst, branch), inst_len); 1904 } 1905 1906 1907 // Extract relative address (aka offset). 1908 // inv_simm16 works for 4-byte instructions only. 1909 // compare and branch instructions are 6-byte and have a 16bit offset "in the middle". 1910 long MacroAssembler::get_pcrel_offset(unsigned long inst) { 1911 1912 if (MacroAssembler::is_pcrelative_short(inst)) { 1913 if (((inst&0xFFFFffff00000000UL) == 0) && ((inst&0x00000000FFFF0000UL) != 0)) { 1914 return RelAddr::inv_pcrel_off16(inv_simm16(inst)); 1915 } else { 1916 return RelAddr::inv_pcrel_off16(inv_simm16_48(inst)); 1917 } 1918 } 1919 1920 if (MacroAssembler::is_pcrelative_long(inst)) { 1921 return RelAddr::inv_pcrel_off32(inv_simm32(inst)); 1922 } 1923 1924 print_dbg_msg(tty, inst, "not a pcrelative instruction", 6); 1925 #ifdef LUCY_DBG 1926 VM_Version::z_SIGSEGV(); 1927 #else 1928 ShouldNotReachHere(); 1929 #endif 1930 return -1; 1931 } 1932 1933 long MacroAssembler::get_pcrel_offset(address pc) { 1934 unsigned long inst; 1935 unsigned int len = get_instruction(pc, &inst); 1936 1937 #ifdef ASSERT 1938 long offset; 1939 if (MacroAssembler::is_pcrelative_short(inst) || MacroAssembler::is_pcrelative_long(inst)) { 1940 offset = get_pcrel_offset(inst); 1941 } else { 1942 offset = -1; 1943 } 1944 1945 if (offset == -1) { 1946 dump_code_range(tty, pc, 32, "not a pcrelative instruction"); 1947 #ifdef LUCY_DBG 1948 VM_Version::z_SIGSEGV(); 1949 #else 1950 ShouldNotReachHere(); 1951 #endif 1952 } 1953 return offset; 1954 #else 1955 return get_pcrel_offset(inst); 1956 #endif // ASSERT 1957 } 1958 1959 // Get target address from pc-relative instructions. 1960 address MacroAssembler::get_target_addr_pcrel(address pc) { 1961 assert(is_pcrelative_long(pc), "not a pcrelative instruction"); 1962 return pc + get_pcrel_offset(pc); 1963 } 1964 1965 // Patch pc relative load address. 1966 void MacroAssembler::patch_target_addr_pcrel(address pc, address con) { 1967 unsigned long inst; 1968 // Offset is +/- 2**32 -> use long. 1969 ptrdiff_t distance = con - pc; 1970 1971 get_instruction(pc, &inst); 1972 1973 if (is_pcrelative_short(inst)) { 1974 *(short *)(pc+2) = RelAddr::pcrel_off16(con, pc); // Instructions are at least 2-byte aligned, no test required. 1975 1976 // Some extra safety net. 1977 if (!RelAddr::is_in_range_of_RelAddr16(distance)) { 1978 print_dbg_msg(tty, inst, "distance out of range (16bit)", 4); 1979 dump_code_range(tty, pc, 32, "distance out of range (16bit)"); 1980 guarantee(RelAddr::is_in_range_of_RelAddr16(distance), "too far away (more than +/- 2**16"); 1981 } 1982 return; 1983 } 1984 1985 if (is_pcrelative_long(inst)) { 1986 *(int *)(pc+2) = RelAddr::pcrel_off32(con, pc); 1987 1988 // Some Extra safety net. 1989 if (!RelAddr::is_in_range_of_RelAddr32(distance)) { 1990 print_dbg_msg(tty, inst, "distance out of range (32bit)", 6); 1991 dump_code_range(tty, pc, 32, "distance out of range (32bit)"); 1992 guarantee(RelAddr::is_in_range_of_RelAddr32(distance), "too far away (more than +/- 2**32"); 1993 } 1994 return; 1995 } 1996 1997 guarantee(false, "not a pcrelative instruction to patch!"); 1998 } 1999 2000 // "Current PC" here means the address just behind the basr instruction. 2001 address MacroAssembler::get_PC(Register result) { 2002 z_basr(result, Z_R0); // Don't branch, just save next instruction address in result. 2003 return pc(); 2004 } 2005 2006 // Get current PC + offset. 2007 // Offset given in bytes, must be even! 2008 // "Current PC" here means the address of the larl instruction plus the given offset. 2009 address MacroAssembler::get_PC(Register result, int64_t offset) { 2010 address here = pc(); 2011 z_larl(result, offset/2); // Save target instruction address in result. 2012 return here + offset; 2013 } 2014 2015 // Resize_frame with SP(new) = SP(old) - [offset]. 2016 void MacroAssembler::resize_frame_sub(Register offset, Register fp, bool load_fp) 2017 { 2018 assert_different_registers(offset, fp, Z_SP); 2019 if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); } 2020 2021 z_sgr(Z_SP, offset); 2022 z_stg(fp, _z_abi(callers_sp), Z_SP); 2023 } 2024 2025 // Resize_frame with SP(new) = [addr]. 2026 void MacroAssembler::resize_frame_absolute(Register addr, Register fp, bool load_fp) { 2027 assert_different_registers(addr, fp, Z_SP); 2028 if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); } 2029 2030 if (addr != Z_R0) { 2031 // Minimize stalls by not using Z_SP immediately after update. 2032 z_stg(fp, _z_abi(callers_sp), addr); 2033 z_lgr(Z_SP, addr); 2034 } else { 2035 z_lgr(Z_SP, addr); 2036 z_stg(fp, _z_abi(callers_sp), Z_SP); 2037 } 2038 } 2039 2040 // Resize_frame with SP(new) = SP(old) + offset. 2041 void MacroAssembler::resize_frame(RegisterOrConstant offset, Register fp, bool load_fp) { 2042 assert_different_registers(fp, Z_SP); 2043 if (load_fp) z_lg(fp, _z_abi(callers_sp), Z_SP); 2044 2045 if (Displacement::is_validDisp((int)_z_abi(callers_sp) + offset.constant_or_zero())) { 2046 // Minimize stalls by first using, then updating Z_SP. 2047 // Do that only if we have a small positive offset or if ExtImm are available. 2048 z_stg(fp, Address(Z_SP, offset, _z_abi(callers_sp))); 2049 add64(Z_SP, offset); 2050 } else { 2051 add64(Z_SP, offset); 2052 z_stg(fp, _z_abi(callers_sp), Z_SP); 2053 } 2054 } 2055 2056 void MacroAssembler::push_frame(Register bytes, Register old_sp, bool copy_sp, bool bytes_with_inverted_sign) { 2057 #ifdef ASSERT 2058 assert_different_registers(bytes, old_sp, Z_SP); 2059 if (!copy_sp) { 2060 z_cgr(old_sp, Z_SP); 2061 asm_assert_eq("[old_sp]!=[Z_SP]", 0x211); 2062 } 2063 #endif 2064 if (copy_sp) { z_lgr(old_sp, Z_SP); } 2065 if (bytes_with_inverted_sign) { 2066 z_stg(old_sp, 0, bytes, Z_SP); 2067 add2reg_with_index(Z_SP, 0, bytes, Z_SP); 2068 } else { 2069 z_sgr(Z_SP, bytes); // Z_sgfr sufficient, but probably not faster. 2070 z_stg(old_sp, 0, Z_SP); 2071 } 2072 } 2073 2074 unsigned int MacroAssembler::push_frame(unsigned int bytes, Register scratch) { 2075 long offset = Assembler::align(bytes, frame::alignment_in_bytes); 2076 2077 if (Displacement::is_validDisp(-offset)) { 2078 // Minimize stalls by first using, then updating Z_SP. 2079 // Do that only if we have ExtImm available. 2080 z_stg(Z_SP, -offset, Z_SP); 2081 add2reg(Z_SP, -offset); 2082 } else { 2083 if (scratch != Z_R0 && scratch != Z_R1) { 2084 z_stg(Z_SP, -offset, Z_SP); 2085 add2reg(Z_SP, -offset); 2086 } else { // scratch == Z_R0 || scratch == Z_R1 2087 z_lgr(scratch, Z_SP); 2088 add2reg(Z_SP, -offset); 2089 z_stg(scratch, 0, Z_SP); 2090 } 2091 } 2092 return offset; 2093 } 2094 2095 // Push a frame of size `bytes' plus abi160 on top. 2096 unsigned int MacroAssembler::push_frame_abi160(unsigned int bytes) { 2097 BLOCK_COMMENT("push_frame_abi160 {"); 2098 unsigned int res = push_frame(bytes + frame::z_abi_160_size); 2099 BLOCK_COMMENT("} push_frame_abi160"); 2100 return res; 2101 } 2102 2103 // Pop current C frame. 2104 void MacroAssembler::pop_frame() { 2105 BLOCK_COMMENT("pop_frame:"); 2106 Assembler::z_lg(Z_SP, _z_abi(callers_sp), Z_SP); 2107 } 2108 2109 void MacroAssembler::call_VM_leaf_base(address entry_point, bool allow_relocation) { 2110 if (allow_relocation) { 2111 call_c(entry_point); 2112 } else { 2113 call_c_static(entry_point); 2114 } 2115 } 2116 2117 void MacroAssembler::call_VM_leaf_base(address entry_point) { 2118 bool allow_relocation = true; 2119 call_VM_leaf_base(entry_point, allow_relocation); 2120 } 2121 2122 void MacroAssembler::call_VM_base(Register oop_result, 2123 Register last_java_sp, 2124 address entry_point, 2125 bool allow_relocation, 2126 bool check_exceptions) { // Defaults to true. 2127 // Allow_relocation indicates, if true, that the generated code shall 2128 // be fit for code relocation or referenced data relocation. In other 2129 // words: all addresses must be considered variable. PC-relative addressing 2130 // is not possible then. 2131 // On the other hand, if (allow_relocation == false), addresses and offsets 2132 // may be considered stable, enabling us to take advantage of some PC-relative 2133 // addressing tweaks. These might improve performance and reduce code size. 2134 2135 // Determine last_java_sp register. 2136 if (!last_java_sp->is_valid()) { 2137 last_java_sp = Z_SP; // Load Z_SP as SP. 2138 } 2139 2140 set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, Z_R1, allow_relocation); 2141 2142 // ARG1 must hold thread address. 2143 z_lgr(Z_ARG1, Z_thread); 2144 2145 address return_pc = NULL; 2146 if (allow_relocation) { 2147 return_pc = call_c(entry_point); 2148 } else { 2149 return_pc = call_c_static(entry_point); 2150 } 2151 2152 reset_last_Java_frame(allow_relocation); 2153 2154 // C++ interp handles this in the interpreter. 2155 check_and_handle_popframe(Z_thread); 2156 check_and_handle_earlyret(Z_thread); 2157 2158 // Check for pending exceptions. 2159 if (check_exceptions) { 2160 // Check for pending exceptions (java_thread is set upon return). 2161 load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset())); 2162 2163 // This used to conditionally jump to forward_exception however it is 2164 // possible if we relocate that the branch will not reach. So we must jump 2165 // around so we can always reach. 2166 2167 Label ok; 2168 z_bre(ok); // Bcondequal is the same as bcondZero. 2169 call_stub(StubRoutines::forward_exception_entry()); 2170 bind(ok); 2171 } 2172 2173 // Get oop result if there is one and reset the value in the thread. 2174 if (oop_result->is_valid()) { 2175 get_vm_result(oop_result); 2176 } 2177 2178 _last_calls_return_pc = return_pc; // Wipe out other (error handling) calls. 2179 } 2180 2181 void MacroAssembler::call_VM_base(Register oop_result, 2182 Register last_java_sp, 2183 address entry_point, 2184 bool check_exceptions) { // Defaults to true. 2185 bool allow_relocation = true; 2186 call_VM_base(oop_result, last_java_sp, entry_point, allow_relocation, check_exceptions); 2187 } 2188 2189 // VM calls without explicit last_java_sp. 2190 2191 void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) { 2192 // Call takes possible detour via InterpreterMacroAssembler. 2193 call_VM_base(oop_result, noreg, entry_point, true, check_exceptions); 2194 } 2195 2196 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 2197 // Z_ARG1 is reserved for the thread. 2198 lgr_if_needed(Z_ARG2, arg_1); 2199 call_VM(oop_result, entry_point, check_exceptions); 2200 } 2201 2202 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 2203 // Z_ARG1 is reserved for the thread. 2204 lgr_if_needed(Z_ARG2, arg_1); 2205 assert(arg_2 != Z_ARG2, "smashed argument"); 2206 lgr_if_needed(Z_ARG3, arg_2); 2207 call_VM(oop_result, entry_point, check_exceptions); 2208 } 2209 2210 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, 2211 Register arg_3, bool check_exceptions) { 2212 // Z_ARG1 is reserved for the thread. 2213 lgr_if_needed(Z_ARG2, arg_1); 2214 assert(arg_2 != Z_ARG2, "smashed argument"); 2215 lgr_if_needed(Z_ARG3, arg_2); 2216 assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument"); 2217 lgr_if_needed(Z_ARG4, arg_3); 2218 call_VM(oop_result, entry_point, check_exceptions); 2219 } 2220 2221 // VM static calls without explicit last_java_sp. 2222 2223 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, bool check_exceptions) { 2224 // Call takes possible detour via InterpreterMacroAssembler. 2225 call_VM_base(oop_result, noreg, entry_point, false, check_exceptions); 2226 } 2227 2228 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2, 2229 Register arg_3, bool check_exceptions) { 2230 // Z_ARG1 is reserved for the thread. 2231 lgr_if_needed(Z_ARG2, arg_1); 2232 assert(arg_2 != Z_ARG2, "smashed argument"); 2233 lgr_if_needed(Z_ARG3, arg_2); 2234 assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument"); 2235 lgr_if_needed(Z_ARG4, arg_3); 2236 call_VM_static(oop_result, entry_point, check_exceptions); 2237 } 2238 2239 // VM calls with explicit last_java_sp. 2240 2241 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions) { 2242 // Call takes possible detour via InterpreterMacroAssembler. 2243 call_VM_base(oop_result, last_java_sp, entry_point, true, check_exceptions); 2244 } 2245 2246 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { 2247 // Z_ARG1 is reserved for the thread. 2248 lgr_if_needed(Z_ARG2, arg_1); 2249 call_VM(oop_result, last_java_sp, entry_point, check_exceptions); 2250 } 2251 2252 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, 2253 Register arg_2, bool check_exceptions) { 2254 // Z_ARG1 is reserved for the thread. 2255 lgr_if_needed(Z_ARG2, arg_1); 2256 assert(arg_2 != Z_ARG2, "smashed argument"); 2257 lgr_if_needed(Z_ARG3, arg_2); 2258 call_VM(oop_result, last_java_sp, entry_point, check_exceptions); 2259 } 2260 2261 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, 2262 Register arg_2, Register arg_3, bool check_exceptions) { 2263 // Z_ARG1 is reserved for the thread. 2264 lgr_if_needed(Z_ARG2, arg_1); 2265 assert(arg_2 != Z_ARG2, "smashed argument"); 2266 lgr_if_needed(Z_ARG3, arg_2); 2267 assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument"); 2268 lgr_if_needed(Z_ARG4, arg_3); 2269 call_VM(oop_result, last_java_sp, entry_point, check_exceptions); 2270 } 2271 2272 // VM leaf calls. 2273 2274 void MacroAssembler::call_VM_leaf(address entry_point) { 2275 // Call takes possible detour via InterpreterMacroAssembler. 2276 call_VM_leaf_base(entry_point, true); 2277 } 2278 2279 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) { 2280 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2281 call_VM_leaf(entry_point); 2282 } 2283 2284 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) { 2285 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2286 assert(arg_2 != Z_ARG1, "smashed argument"); 2287 if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); 2288 call_VM_leaf(entry_point); 2289 } 2290 2291 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) { 2292 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2293 assert(arg_2 != Z_ARG1, "smashed argument"); 2294 if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); 2295 assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument"); 2296 if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3); 2297 call_VM_leaf(entry_point); 2298 } 2299 2300 // Static VM leaf calls. 2301 // Really static VM leaf calls are never patched. 2302 2303 void MacroAssembler::call_VM_leaf_static(address entry_point) { 2304 // Call takes possible detour via InterpreterMacroAssembler. 2305 call_VM_leaf_base(entry_point, false); 2306 } 2307 2308 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1) { 2309 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2310 call_VM_leaf_static(entry_point); 2311 } 2312 2313 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2) { 2314 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2315 assert(arg_2 != Z_ARG1, "smashed argument"); 2316 if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); 2317 call_VM_leaf_static(entry_point); 2318 } 2319 2320 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3) { 2321 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2322 assert(arg_2 != Z_ARG1, "smashed argument"); 2323 if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); 2324 assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument"); 2325 if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3); 2326 call_VM_leaf_static(entry_point); 2327 } 2328 2329 // Don't use detour via call_c(reg). 2330 address MacroAssembler::call_c(address function_entry) { 2331 load_const(Z_R1, function_entry); 2332 return call(Z_R1); 2333 } 2334 2335 // Variant for really static (non-relocatable) calls which are never patched. 2336 address MacroAssembler::call_c_static(address function_entry) { 2337 load_absolute_address(Z_R1, function_entry); 2338 #if 0 // def ASSERT 2339 // Verify that call site did not move. 2340 load_const_optimized(Z_R0, function_entry); 2341 z_cgr(Z_R1, Z_R0); 2342 z_brc(bcondEqual, 3); 2343 z_illtrap(0xba); 2344 #endif 2345 return call(Z_R1); 2346 } 2347 2348 address MacroAssembler::call_c_opt(address function_entry) { 2349 bool success = call_far_patchable(function_entry, -2 /* emit relocation + constant */); 2350 _last_calls_return_pc = success ? pc() : NULL; 2351 return _last_calls_return_pc; 2352 } 2353 2354 // Identify a call_far_patchable instruction: LARL + LG + BASR 2355 // 2356 // nop ; optionally, if required for alignment 2357 // lgrl rx,A(TOC entry) ; PC-relative access into constant pool 2358 // basr Z_R14,rx ; end of this instruction must be aligned to a word boundary 2359 // 2360 // Code pattern will eventually get patched into variant2 (see below for detection code). 2361 // 2362 bool MacroAssembler::is_call_far_patchable_variant0_at(address instruction_addr) { 2363 address iaddr = instruction_addr; 2364 2365 // Check for the actual load instruction. 2366 if (!is_load_const_from_toc(iaddr)) { return false; } 2367 iaddr += load_const_from_toc_size(); 2368 2369 // Check for the call (BASR) instruction, finally. 2370 assert(iaddr-instruction_addr+call_byregister_size() == call_far_patchable_size(), "size mismatch"); 2371 return is_call_byregister(iaddr); 2372 } 2373 2374 // Identify a call_far_patchable instruction: BRASL 2375 // 2376 // Code pattern to suits atomic patching: 2377 // nop ; Optionally, if required for alignment. 2378 // nop ... ; Multiple filler nops to compensate for size difference (variant0 is longer). 2379 // nop ; For code pattern detection: Prepend each BRASL with a nop. 2380 // brasl Z_R14,<reladdr> ; End of code must be 4-byte aligned ! 2381 bool MacroAssembler::is_call_far_patchable_variant2_at(address instruction_addr) { 2382 const address call_addr = (address)((intptr_t)instruction_addr + call_far_patchable_size() - call_far_pcrelative_size()); 2383 2384 // Check for correct number of leading nops. 2385 address iaddr; 2386 for (iaddr = instruction_addr; iaddr < call_addr; iaddr += nop_size()) { 2387 if (!is_z_nop(iaddr)) { return false; } 2388 } 2389 assert(iaddr == call_addr, "sanity"); 2390 2391 // --> Check for call instruction. 2392 if (is_call_far_pcrelative(call_addr)) { 2393 assert(call_addr-instruction_addr+call_far_pcrelative_size() == call_far_patchable_size(), "size mismatch"); 2394 return true; 2395 } 2396 2397 return false; 2398 } 2399 2400 // Emit a NOT mt-safely patchable 64 bit absolute call. 2401 // If toc_offset == -2, then the destination of the call (= target) is emitted 2402 // to the constant pool and a runtime_call relocation is added 2403 // to the code buffer. 2404 // If toc_offset != -2, target must already be in the constant pool at 2405 // _ctableStart+toc_offset (a caller can retrieve toc_offset 2406 // from the runtime_call relocation). 2407 // Special handling of emitting to scratch buffer when there is no constant pool. 2408 // Slightly changed code pattern. We emit an additional nop if we would 2409 // not end emitting at a word aligned address. This is to ensure 2410 // an atomically patchable displacement in brasl instructions. 2411 // 2412 // A call_far_patchable comes in different flavors: 2413 // - LARL(CP) / LG(CP) / BR (address in constant pool, access via CP register) 2414 // - LGRL(CP) / BR (address in constant pool, pc-relative accesss) 2415 // - BRASL (relative address of call target coded in instruction) 2416 // All flavors occupy the same amount of space. Length differences are compensated 2417 // by leading nops, such that the instruction sequence always ends at the same 2418 // byte offset. This is required to keep the return offset constant. 2419 // Furthermore, the return address (the end of the instruction sequence) is forced 2420 // to be on a 4-byte boundary. This is required for atomic patching, should we ever 2421 // need to patch the call target of the BRASL flavor. 2422 // RETURN value: false, if no constant pool entry could be allocated, true otherwise. 2423 bool MacroAssembler::call_far_patchable(address target, int64_t tocOffset) { 2424 // Get current pc and ensure word alignment for end of instr sequence. 2425 const address start_pc = pc(); 2426 const intptr_t start_off = offset(); 2427 assert(!call_far_patchable_requires_alignment_nop(start_pc), "call_far_patchable requires aligned address"); 2428 const ptrdiff_t dist = (ptrdiff_t)(target - (start_pc + 2)); // Prepend each BRASL with a nop. 2429 const bool emit_target_to_pool = (tocOffset == -2) && !code_section()->scratch_emit(); 2430 const bool emit_relative_call = !emit_target_to_pool && 2431 RelAddr::is_in_range_of_RelAddr32(dist) && 2432 ReoptimizeCallSequences && 2433 !code_section()->scratch_emit(); 2434 2435 if (emit_relative_call) { 2436 // Add padding to get the same size as below. 2437 const unsigned int padding = call_far_patchable_size() - call_far_pcrelative_size(); 2438 unsigned int current_padding; 2439 for (current_padding = 0; current_padding < padding; current_padding += nop_size()) { z_nop(); } 2440 assert(current_padding == padding, "sanity"); 2441 2442 // relative call: len = 2(nop) + 6 (brasl) 2443 // CodeBlob resize cannot occur in this case because 2444 // this call is emitted into pre-existing space. 2445 z_nop(); // Prepend each BRASL with a nop. 2446 z_brasl(Z_R14, target); 2447 } else { 2448 // absolute call: Get address from TOC. 2449 // len = (load TOC){6|0} + (load from TOC){6} + (basr){2} = {14|8} 2450 if (emit_target_to_pool) { 2451 // When emitting the call for the first time, we do not need to use 2452 // the pc-relative version. It will be patched anyway, when the code 2453 // buffer is copied. 2454 // Relocation is not needed when !ReoptimizeCallSequences. 2455 relocInfo::relocType rt = ReoptimizeCallSequences ? relocInfo::runtime_call_w_cp_type : relocInfo::none; 2456 AddressLiteral dest(target, rt); 2457 // Store_oop_in_toc() adds dest to the constant table. As side effect, this kills 2458 // inst_mark(). Reset if possible. 2459 bool reset_mark = (inst_mark() == pc()); 2460 tocOffset = store_oop_in_toc(dest); 2461 if (reset_mark) { set_inst_mark(); } 2462 if (tocOffset == -1) { 2463 return false; // Couldn't create constant pool entry. 2464 } 2465 } 2466 assert(offset() == start_off, "emit no code before this point!"); 2467 2468 address tocPos = pc() + tocOffset; 2469 if (emit_target_to_pool) { 2470 tocPos = code()->consts()->start() + tocOffset; 2471 } 2472 load_long_pcrelative(Z_R14, tocPos); 2473 z_basr(Z_R14, Z_R14); 2474 } 2475 2476 #ifdef ASSERT 2477 // Assert that we can identify the emitted call. 2478 assert(is_call_far_patchable_at(addr_at(start_off)), "can't identify emitted call"); 2479 assert(offset() == start_off+call_far_patchable_size(), "wrong size"); 2480 2481 if (emit_target_to_pool) { 2482 assert(get_dest_of_call_far_patchable_at(addr_at(start_off), code()->consts()->start()) == target, 2483 "wrong encoding of dest address"); 2484 } 2485 #endif 2486 return true; // success 2487 } 2488 2489 // Identify a call_far_patchable instruction. 2490 // For more detailed information see header comment of call_far_patchable. 2491 bool MacroAssembler::is_call_far_patchable_at(address instruction_addr) { 2492 return is_call_far_patchable_variant2_at(instruction_addr) || // short version: BRASL 2493 is_call_far_patchable_variant0_at(instruction_addr); // long version LARL + LG + BASR 2494 } 2495 2496 // Does the call_far_patchable instruction use a pc-relative encoding 2497 // of the call destination? 2498 bool MacroAssembler::is_call_far_patchable_pcrelative_at(address instruction_addr) { 2499 // Variant 2 is pc-relative. 2500 return is_call_far_patchable_variant2_at(instruction_addr); 2501 } 2502 2503 bool MacroAssembler::is_call_far_pcrelative(address instruction_addr) { 2504 // Prepend each BRASL with a nop. 2505 return is_z_nop(instruction_addr) && is_z_brasl(instruction_addr + nop_size()); // Match at position after one nop required. 2506 } 2507 2508 // Set destination address of a call_far_patchable instruction. 2509 void MacroAssembler::set_dest_of_call_far_patchable_at(address instruction_addr, address dest, int64_t tocOffset) { 2510 ResourceMark rm; 2511 2512 // Now that CP entry is verified, patch call to a pc-relative call (if circumstances permit). 2513 int code_size = MacroAssembler::call_far_patchable_size(); 2514 CodeBuffer buf(instruction_addr, code_size); 2515 MacroAssembler masm(&buf); 2516 masm.call_far_patchable(dest, tocOffset); 2517 ICache::invalidate_range(instruction_addr, code_size); // Empty on z. 2518 } 2519 2520 // Get dest address of a call_far_patchable instruction. 2521 address MacroAssembler::get_dest_of_call_far_patchable_at(address instruction_addr, address ctable) { 2522 // Dynamic TOC: absolute address in constant pool. 2523 // Check variant2 first, it is more frequent. 2524 2525 // Relative address encoded in call instruction. 2526 if (is_call_far_patchable_variant2_at(instruction_addr)) { 2527 return MacroAssembler::get_target_addr_pcrel(instruction_addr + nop_size()); // Prepend each BRASL with a nop. 2528 2529 // Absolute address in constant pool. 2530 } else if (is_call_far_patchable_variant0_at(instruction_addr)) { 2531 address iaddr = instruction_addr; 2532 2533 long tocOffset = get_load_const_from_toc_offset(iaddr); 2534 address tocLoc = iaddr + tocOffset; 2535 return *(address *)(tocLoc); 2536 } else { 2537 fprintf(stderr, "MacroAssembler::get_dest_of_call_far_patchable_at has a problem at %p:\n", instruction_addr); 2538 fprintf(stderr, "not a call_far_patchable: %16.16lx %16.16lx, len = %d\n", 2539 *(unsigned long*)instruction_addr, 2540 *(unsigned long*)(instruction_addr+8), 2541 call_far_patchable_size()); 2542 Disassembler::decode(instruction_addr, instruction_addr+call_far_patchable_size()); 2543 ShouldNotReachHere(); 2544 return NULL; 2545 } 2546 } 2547 2548 void MacroAssembler::align_call_far_patchable(address pc) { 2549 if (call_far_patchable_requires_alignment_nop(pc)) { z_nop(); } 2550 } 2551 2552 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { 2553 } 2554 2555 void MacroAssembler::check_and_handle_popframe(Register java_thread) { 2556 } 2557 2558 // Read from the polling page. 2559 // Use TM or TMY instruction, depending on read offset. 2560 // offset = 0: Use TM, safepoint polling. 2561 // offset < 0: Use TMY, profiling safepoint polling. 2562 void MacroAssembler::load_from_polling_page(Register polling_page_address, int64_t offset) { 2563 if (Immediate::is_uimm12(offset)) { 2564 z_tm(offset, polling_page_address, mask_safepoint); 2565 } else { 2566 z_tmy(offset, polling_page_address, mask_profiling); 2567 } 2568 } 2569 2570 // Check whether z_instruction is a read access to the polling page 2571 // which was emitted by load_from_polling_page(..). 2572 bool MacroAssembler::is_load_from_polling_page(address instr_loc) { 2573 unsigned long z_instruction; 2574 unsigned int ilen = get_instruction(instr_loc, &z_instruction); 2575 2576 if (ilen == 2) { return false; } // It's none of the allowed instructions. 2577 2578 if (ilen == 4) { 2579 if (!is_z_tm(z_instruction)) { return false; } // It's len=4, but not a z_tm. fail. 2580 2581 int ms = inv_mask(z_instruction,8,32); // mask 2582 int ra = inv_reg(z_instruction,16,32); // base register 2583 int ds = inv_uimm12(z_instruction); // displacement 2584 2585 if (!(ds == 0 && ra != 0 && ms == mask_safepoint)) { 2586 return false; // It's not a z_tm(0, ra, mask_safepoint). Fail. 2587 } 2588 2589 } else { /* if (ilen == 6) */ 2590 2591 assert(!is_z_lg(z_instruction), "old form (LG) polling page access. Please fix and use TM(Y)."); 2592 2593 if (!is_z_tmy(z_instruction)) { return false; } // It's len=6, but not a z_tmy. fail. 2594 2595 int ms = inv_mask(z_instruction,8,48); // mask 2596 int ra = inv_reg(z_instruction,16,48); // base register 2597 int ds = inv_simm20(z_instruction); // displacement 2598 } 2599 2600 return true; 2601 } 2602 2603 // Extract poll address from instruction and ucontext. 2604 address MacroAssembler::get_poll_address(address instr_loc, void* ucontext) { 2605 assert(ucontext != NULL, "must have ucontext"); 2606 ucontext_t* uc = (ucontext_t*) ucontext; 2607 unsigned long z_instruction; 2608 unsigned int ilen = get_instruction(instr_loc, &z_instruction); 2609 2610 if (ilen == 4 && is_z_tm(z_instruction)) { 2611 int ra = inv_reg(z_instruction, 16, 32); // base register 2612 int ds = inv_uimm12(z_instruction); // displacement 2613 address addr = (address)uc->uc_mcontext.gregs[ra]; 2614 return addr + ds; 2615 } else if (ilen == 6 && is_z_tmy(z_instruction)) { 2616 int ra = inv_reg(z_instruction, 16, 48); // base register 2617 int ds = inv_simm20(z_instruction); // displacement 2618 address addr = (address)uc->uc_mcontext.gregs[ra]; 2619 return addr + ds; 2620 } 2621 2622 ShouldNotReachHere(); 2623 return NULL; 2624 } 2625 2626 // Extract poll register from instruction. 2627 uint MacroAssembler::get_poll_register(address instr_loc) { 2628 unsigned long z_instruction; 2629 unsigned int ilen = get_instruction(instr_loc, &z_instruction); 2630 2631 if (ilen == 4 && is_z_tm(z_instruction)) { 2632 return (uint)inv_reg(z_instruction, 16, 32); // base register 2633 } else if (ilen == 6 && is_z_tmy(z_instruction)) { 2634 return (uint)inv_reg(z_instruction, 16, 48); // base register 2635 } 2636 2637 ShouldNotReachHere(); 2638 return 0; 2639 } 2640 2641 bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) { 2642 ShouldNotCallThis(); 2643 return false; 2644 } 2645 2646 // Write serialization page so VM thread can do a pseudo remote membar 2647 // We use the current thread pointer to calculate a thread specific 2648 // offset to write to within the page. This minimizes bus traffic 2649 // due to cache line collision. 2650 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { 2651 assert_different_registers(tmp1, tmp2); 2652 z_sllg(tmp2, thread, os::get_serialize_page_shift_count()); 2653 load_const_optimized(tmp1, (long) os::get_memory_serialize_page()); 2654 2655 int mask = os::get_serialize_page_mask(); 2656 if (Immediate::is_uimm16(mask)) { 2657 z_nill(tmp2, mask); 2658 z_llghr(tmp2, tmp2); 2659 } else { 2660 z_nilf(tmp2, mask); 2661 z_llgfr(tmp2, tmp2); 2662 } 2663 2664 z_release(); 2665 z_st(Z_R0, 0, tmp2, tmp1); 2666 } 2667 2668 // Don't rely on register locking, always use Z_R1 as scratch register instead. 2669 void MacroAssembler::bang_stack_with_offset(int offset) { 2670 // Stack grows down, caller passes positive offset. 2671 assert(offset > 0, "must bang with positive offset"); 2672 if (Displacement::is_validDisp(-offset)) { 2673 z_tmy(-offset, Z_SP, mask_stackbang); 2674 } else { 2675 add2reg(Z_R1, -offset, Z_SP); // Do not destroy Z_SP!!! 2676 z_tm(0, Z_R1, mask_stackbang); // Just banging. 2677 } 2678 } 2679 2680 void MacroAssembler::reserved_stack_check(Register return_pc) { 2681 // Test if reserved zone needs to be enabled. 2682 Label no_reserved_zone_enabling; 2683 assert(return_pc == Z_R14, "Return pc must be in R14 before z_br() to StackOverflow stub."); 2684 BLOCK_COMMENT("reserved_stack_check {"); 2685 2686 z_clg(Z_SP, Address(Z_thread, JavaThread::reserved_stack_activation_offset())); 2687 z_brl(no_reserved_zone_enabling); 2688 2689 // Enable reserved zone again, throw stack overflow exception. 2690 save_return_pc(); 2691 push_frame_abi160(0); 2692 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), Z_thread); 2693 pop_frame(); 2694 restore_return_pc(); 2695 2696 load_const_optimized(Z_R1, StubRoutines::throw_delayed_StackOverflowError_entry()); 2697 // Don't use call() or z_basr(), they will invalidate Z_R14 which contains the return pc. 2698 z_br(Z_R1); 2699 2700 should_not_reach_here(); 2701 2702 bind(no_reserved_zone_enabling); 2703 BLOCK_COMMENT("} reserved_stack_check"); 2704 } 2705 2706 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 2707 void MacroAssembler::tlab_allocate(Register obj, 2708 Register var_size_in_bytes, 2709 int con_size_in_bytes, 2710 Register t1, 2711 Label& slow_case) { 2712 assert_different_registers(obj, var_size_in_bytes, t1); 2713 Register end = t1; 2714 Register thread = Z_thread; 2715 2716 z_lg(obj, Address(thread, JavaThread::tlab_top_offset())); 2717 if (var_size_in_bytes == noreg) { 2718 z_lay(end, Address(obj, con_size_in_bytes)); 2719 } else { 2720 z_lay(end, Address(obj, var_size_in_bytes)); 2721 } 2722 z_cg(end, Address(thread, JavaThread::tlab_end_offset())); 2723 branch_optimized(bcondHigh, slow_case); 2724 2725 // Update the tlab top pointer. 2726 z_stg(end, Address(thread, JavaThread::tlab_top_offset())); 2727 2728 // Recover var_size_in_bytes if necessary. 2729 if (var_size_in_bytes == end) { 2730 z_sgr(var_size_in_bytes, obj); 2731 } 2732 } 2733 2734 // Emitter for interface method lookup. 2735 // input: recv_klass, intf_klass, itable_index 2736 // output: method_result 2737 // kills: itable_index, temp1_reg, Z_R0, Z_R1 2738 // TODO: Temp2_reg is unused. we may use this emitter also in the itable stubs. 2739 // If the register is still not needed then, remove it. 2740 void MacroAssembler::lookup_interface_method(Register recv_klass, 2741 Register intf_klass, 2742 RegisterOrConstant itable_index, 2743 Register method_result, 2744 Register temp1_reg, 2745 Register temp2_reg, 2746 Label& no_such_interface) { 2747 2748 const Register vtable_len = temp1_reg; // Used to compute itable_entry_addr. 2749 const Register itable_entry_addr = Z_R1_scratch; 2750 const Register itable_interface = Z_R0_scratch; 2751 2752 BLOCK_COMMENT("lookup_interface_method {"); 2753 2754 // Load start of itable entries into itable_entry_addr. 2755 z_llgf(vtable_len, Address(recv_klass, Klass::vtable_length_offset())); 2756 z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes())); 2757 2758 // Loop over all itable entries until desired interfaceOop(Rinterface) found. 2759 const int vtable_base_offset = in_bytes(Klass::vtable_start_offset()); 2760 2761 add2reg_with_index(itable_entry_addr, 2762 vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(), 2763 recv_klass, vtable_len); 2764 2765 const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize; 2766 Label search; 2767 2768 bind(search); 2769 2770 // Handle IncompatibleClassChangeError. 2771 // If the entry is NULL then we've reached the end of the table 2772 // without finding the expected interface, so throw an exception. 2773 load_and_test_long(itable_interface, Address(itable_entry_addr)); 2774 z_bre(no_such_interface); 2775 2776 add2reg(itable_entry_addr, itable_offset_search_inc); 2777 z_cgr(itable_interface, intf_klass); 2778 z_brne(search); 2779 2780 // Entry found and itable_entry_addr points to it, get offset of vtable for interface. 2781 2782 const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() - 2783 itableOffsetEntry::interface_offset_in_bytes()) - 2784 itable_offset_search_inc; 2785 2786 // Compute itableMethodEntry and get method and entry point 2787 // we use addressing with index and displacement, since the formula 2788 // for computing the entry's offset has a fixed and a dynamic part, 2789 // the latter depending on the matched interface entry and on the case, 2790 // that the itable index has been passed as a register, not a constant value. 2791 int method_offset = itableMethodEntry::method_offset_in_bytes(); 2792 // Fixed part (displacement), common operand. 2793 Register itable_offset; // Dynamic part (index register). 2794 2795 if (itable_index.is_register()) { 2796 // Compute the method's offset in that register, for the formula, see the 2797 // else-clause below. 2798 itable_offset = itable_index.as_register(); 2799 2800 z_sllg(itable_offset, itable_offset, exact_log2(itableMethodEntry::size() * wordSize)); 2801 z_agf(itable_offset, vtable_offset_offset, itable_entry_addr); 2802 } else { 2803 itable_offset = Z_R1_scratch; 2804 // Displacement increases. 2805 method_offset += itableMethodEntry::size() * wordSize * itable_index.as_constant(); 2806 2807 // Load index from itable. 2808 z_llgf(itable_offset, vtable_offset_offset, itable_entry_addr); 2809 } 2810 2811 // Finally load the method's oop. 2812 z_lg(method_result, method_offset, itable_offset, recv_klass); 2813 BLOCK_COMMENT("} lookup_interface_method"); 2814 } 2815 2816 // Lookup for virtual method invocation. 2817 void MacroAssembler::lookup_virtual_method(Register recv_klass, 2818 RegisterOrConstant vtable_index, 2819 Register method_result) { 2820 assert_different_registers(recv_klass, vtable_index.register_or_noreg()); 2821 assert(vtableEntry::size() * wordSize == wordSize, 2822 "else adjust the scaling in the code below"); 2823 2824 BLOCK_COMMENT("lookup_virtual_method {"); 2825 2826 const int base = in_bytes(Klass::vtable_start_offset()); 2827 2828 if (vtable_index.is_constant()) { 2829 // Load with base + disp. 2830 Address vtable_entry_addr(recv_klass, 2831 vtable_index.as_constant() * wordSize + 2832 base + 2833 vtableEntry::method_offset_in_bytes()); 2834 2835 z_lg(method_result, vtable_entry_addr); 2836 } else { 2837 // Shift index properly and load with base + index + disp. 2838 Register vindex = vtable_index.as_register(); 2839 Address vtable_entry_addr(recv_klass, vindex, 2840 base + vtableEntry::method_offset_in_bytes()); 2841 2842 z_sllg(vindex, vindex, exact_log2(wordSize)); 2843 z_lg(method_result, vtable_entry_addr); 2844 } 2845 BLOCK_COMMENT("} lookup_virtual_method"); 2846 } 2847 2848 // Factor out code to call ic_miss_handler. 2849 // Generate code to call the inline cache miss handler. 2850 // 2851 // In most cases, this code will be generated out-of-line. 2852 // The method parameters are intended to provide some variability. 2853 // ICM - Label which has to be bound to the start of useful code (past any traps). 2854 // trapMarker - Marking byte for the generated illtrap instructions (if any). 2855 // Any value except 0x00 is supported. 2856 // = 0x00 - do not generate illtrap instructions. 2857 // use nops to fill ununsed space. 2858 // requiredSize - required size of the generated code. If the actually 2859 // generated code is smaller, use padding instructions to fill up. 2860 // = 0 - no size requirement, no padding. 2861 // scratch - scratch register to hold branch target address. 2862 // 2863 // The method returns the code offset of the bound label. 2864 unsigned int MacroAssembler::call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch) { 2865 intptr_t startOffset = offset(); 2866 2867 // Prevent entry at content_begin(). 2868 if (trapMarker != 0) { 2869 z_illtrap(trapMarker); 2870 } 2871 2872 // Load address of inline cache miss code into scratch register 2873 // and branch to cache miss handler. 2874 BLOCK_COMMENT("IC miss handler {"); 2875 BIND(ICM); 2876 unsigned int labelOffset = offset(); 2877 AddressLiteral icmiss(SharedRuntime::get_ic_miss_stub()); 2878 2879 load_const_optimized(scratch, icmiss); 2880 z_br(scratch); 2881 2882 // Fill unused space. 2883 if (requiredSize > 0) { 2884 while ((offset() - startOffset) < requiredSize) { 2885 if (trapMarker == 0) { 2886 z_nop(); 2887 } else { 2888 z_illtrap(trapMarker); 2889 } 2890 } 2891 } 2892 BLOCK_COMMENT("} IC miss handler"); 2893 return labelOffset; 2894 } 2895 2896 void MacroAssembler::nmethod_UEP(Label& ic_miss) { 2897 Register ic_reg = as_Register(Matcher::inline_cache_reg_encode()); 2898 int klass_offset = oopDesc::klass_offset_in_bytes(); 2899 if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) { 2900 if (VM_Version::has_CompareBranch()) { 2901 z_cgij(Z_ARG1, 0, Assembler::bcondEqual, ic_miss); 2902 } else { 2903 z_ltgr(Z_ARG1, Z_ARG1); 2904 z_bre(ic_miss); 2905 } 2906 } 2907 // Compare cached class against klass from receiver. 2908 compare_klass_ptr(ic_reg, klass_offset, Z_ARG1, false); 2909 z_brne(ic_miss); 2910 } 2911 2912 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 2913 Register super_klass, 2914 Register temp1_reg, 2915 Label* L_success, 2916 Label* L_failure, 2917 Label* L_slow_path, 2918 RegisterOrConstant super_check_offset) { 2919 2920 const int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2921 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2922 2923 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 2924 bool need_slow_path = (must_load_sco || 2925 super_check_offset.constant_or_zero() == sc_offset); 2926 2927 // Input registers must not overlap. 2928 assert_different_registers(sub_klass, super_klass, temp1_reg); 2929 if (super_check_offset.is_register()) { 2930 assert_different_registers(sub_klass, super_klass, 2931 super_check_offset.as_register()); 2932 } else if (must_load_sco) { 2933 assert(temp1_reg != noreg, "supply either a temp or a register offset"); 2934 } 2935 2936 const Register Rsuper_check_offset = temp1_reg; 2937 2938 NearLabel L_fallthrough; 2939 int label_nulls = 0; 2940 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2941 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2942 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 2943 assert(label_nulls <= 1 || 2944 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), 2945 "at most one NULL in the batch, usually"); 2946 2947 BLOCK_COMMENT("check_klass_subtype_fast_path {"); 2948 // If the pointers are equal, we are done (e.g., String[] elements). 2949 // This self-check enables sharing of secondary supertype arrays among 2950 // non-primary types such as array-of-interface. Otherwise, each such 2951 // type would need its own customized SSA. 2952 // We move this check to the front of the fast path because many 2953 // type checks are in fact trivially successful in this manner, 2954 // so we get a nicely predicted branch right at the start of the check. 2955 compare64_and_branch(sub_klass, super_klass, bcondEqual, *L_success); 2956 2957 // Check the supertype display, which is uint. 2958 if (must_load_sco) { 2959 z_llgf(Rsuper_check_offset, sco_offset, super_klass); 2960 super_check_offset = RegisterOrConstant(Rsuper_check_offset); 2961 } 2962 Address super_check_addr(sub_klass, super_check_offset, 0); 2963 z_cg(super_klass, super_check_addr); // compare w/ displayed supertype 2964 2965 // This check has worked decisively for primary supers. 2966 // Secondary supers are sought in the super_cache ('super_cache_addr'). 2967 // (Secondary supers are interfaces and very deeply nested subtypes.) 2968 // This works in the same check above because of a tricky aliasing 2969 // between the super_cache and the primary super display elements. 2970 // (The 'super_check_addr' can address either, as the case requires.) 2971 // Note that the cache is updated below if it does not help us find 2972 // what we need immediately. 2973 // So if it was a primary super, we can just fail immediately. 2974 // Otherwise, it's the slow path for us (no success at this point). 2975 2976 // Hacked jmp, which may only be used just before L_fallthrough. 2977 #define final_jmp(label) \ 2978 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 2979 else { branch_optimized(Assembler::bcondAlways, label); } /*omit semicolon*/ 2980 2981 if (super_check_offset.is_register()) { 2982 branch_optimized(Assembler::bcondEqual, *L_success); 2983 z_cfi(super_check_offset.as_register(), sc_offset); 2984 if (L_failure == &L_fallthrough) { 2985 branch_optimized(Assembler::bcondEqual, *L_slow_path); 2986 } else { 2987 branch_optimized(Assembler::bcondNotEqual, *L_failure); 2988 final_jmp(*L_slow_path); 2989 } 2990 } else if (super_check_offset.as_constant() == sc_offset) { 2991 // Need a slow path; fast failure is impossible. 2992 if (L_slow_path == &L_fallthrough) { 2993 branch_optimized(Assembler::bcondEqual, *L_success); 2994 } else { 2995 branch_optimized(Assembler::bcondNotEqual, *L_slow_path); 2996 final_jmp(*L_success); 2997 } 2998 } else { 2999 // No slow path; it's a fast decision. 3000 if (L_failure == &L_fallthrough) { 3001 branch_optimized(Assembler::bcondEqual, *L_success); 3002 } else { 3003 branch_optimized(Assembler::bcondNotEqual, *L_failure); 3004 final_jmp(*L_success); 3005 } 3006 } 3007 3008 bind(L_fallthrough); 3009 #undef local_brc 3010 #undef final_jmp 3011 BLOCK_COMMENT("} check_klass_subtype_fast_path"); 3012 // fallthru (to slow path) 3013 } 3014 3015 void MacroAssembler::check_klass_subtype_slow_path(Register Rsubklass, 3016 Register Rsuperklass, 3017 Register Rarray_ptr, // tmp 3018 Register Rlength, // tmp 3019 Label* L_success, 3020 Label* L_failure) { 3021 // Input registers must not overlap. 3022 // Also check for R1 which is explicitely used here. 3023 assert_different_registers(Z_R1, Rsubklass, Rsuperklass, Rarray_ptr, Rlength); 3024 NearLabel L_fallthrough, L_loop; 3025 int label_nulls = 0; 3026 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 3027 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 3028 assert(label_nulls <= 1, "at most one NULL in the batch"); 3029 3030 const int ss_offset = in_bytes(Klass::secondary_supers_offset()); 3031 const int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 3032 3033 const int length_offset = Array<Klass*>::length_offset_in_bytes(); 3034 const int base_offset = Array<Klass*>::base_offset_in_bytes(); 3035 3036 // Hacked jmp, which may only be used just before L_fallthrough. 3037 #define final_jmp(label) \ 3038 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 3039 else branch_optimized(Assembler::bcondAlways, label) /*omit semicolon*/ 3040 3041 NearLabel loop_iterate, loop_count, match; 3042 3043 BLOCK_COMMENT("check_klass_subtype_slow_path {"); 3044 z_lg(Rarray_ptr, ss_offset, Rsubklass); 3045 3046 load_and_test_int(Rlength, Address(Rarray_ptr, length_offset)); 3047 branch_optimized(Assembler::bcondZero, *L_failure); 3048 3049 // Oops in table are NO MORE compressed. 3050 z_cg(Rsuperklass, base_offset, Rarray_ptr); // Check array element for match. 3051 z_bre(match); // Shortcut for array length = 1. 3052 3053 // No match yet, so we must walk the array's elements. 3054 z_lngfr(Rlength, Rlength); 3055 z_sllg(Rlength, Rlength, LogBytesPerWord); // -#bytes of cache array 3056 z_llill(Z_R1, BytesPerWord); // Set increment/end index. 3057 add2reg(Rlength, 2 * BytesPerWord); // start index = -(n-2)*BytesPerWord 3058 z_slgr(Rarray_ptr, Rlength); // start addr: += (n-2)*BytesPerWord 3059 z_bru(loop_count); 3060 3061 BIND(loop_iterate); 3062 z_cg(Rsuperklass, base_offset, Rlength, Rarray_ptr); // Check array element for match. 3063 z_bre(match); 3064 BIND(loop_count); 3065 z_brxlg(Rlength, Z_R1, loop_iterate); 3066 3067 // Rsuperklass not found among secondary super classes -> failure. 3068 branch_optimized(Assembler::bcondAlways, *L_failure); 3069 3070 // Got a hit. Return success (zero result). Set cache. 3071 // Cache load doesn't happen here. For speed it is directly emitted by the compiler. 3072 3073 BIND(match); 3074 3075 z_stg(Rsuperklass, sc_offset, Rsubklass); // Save result to cache. 3076 3077 final_jmp(*L_success); 3078 3079 // Exit to the surrounding code. 3080 BIND(L_fallthrough); 3081 #undef local_brc 3082 #undef final_jmp 3083 BLOCK_COMMENT("} check_klass_subtype_slow_path"); 3084 } 3085 3086 // Emitter for combining fast and slow path. 3087 void MacroAssembler::check_klass_subtype(Register sub_klass, 3088 Register super_klass, 3089 Register temp1_reg, 3090 Register temp2_reg, 3091 Label& L_success) { 3092 NearLabel failure; 3093 BLOCK_COMMENT(err_msg("check_klass_subtype(%s subclass of %s) {", sub_klass->name(), super_klass->name())); 3094 check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg, 3095 &L_success, &failure, NULL); 3096 check_klass_subtype_slow_path(sub_klass, super_klass, 3097 temp1_reg, temp2_reg, &L_success, NULL); 3098 BIND(failure); 3099 BLOCK_COMMENT("} check_klass_subtype"); 3100 } 3101 3102 // Increment a counter at counter_address when the eq condition code is 3103 // set. Kills registers tmp1_reg and tmp2_reg and preserves the condition code. 3104 void MacroAssembler::increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg) { 3105 Label l; 3106 z_brne(l); 3107 load_const(tmp1_reg, counter_address); 3108 add2mem_32(Address(tmp1_reg), 1, tmp2_reg); 3109 z_cr(tmp1_reg, tmp1_reg); // Set cc to eq. 3110 bind(l); 3111 } 3112 3113 // Semantics are dependent on the slow_case label: 3114 // If the slow_case label is not NULL, failure to biased-lock the object 3115 // transfers control to the location of the slow_case label. If the 3116 // object could be biased-locked, control is transferred to the done label. 3117 // The condition code is unpredictable. 3118 // 3119 // If the slow_case label is NULL, failure to biased-lock the object results 3120 // in a transfer of control to the done label with a condition code of not_equal. 3121 // If the biased-lock could be successfully obtained, control is transfered to 3122 // the done label with a condition code of equal. 3123 // It is mandatory to react on the condition code At the done label. 3124 // 3125 void MacroAssembler::biased_locking_enter(Register obj_reg, 3126 Register mark_reg, 3127 Register temp_reg, 3128 Register temp2_reg, // May be Z_RO! 3129 Label &done, 3130 Label *slow_case) { 3131 assert(UseBiasedLocking, "why call this otherwise?"); 3132 assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg); 3133 3134 Label cas_label; // Try, if implemented, CAS locking. Fall thru to slow path otherwise. 3135 3136 BLOCK_COMMENT("biased_locking_enter {"); 3137 3138 // Biased locking 3139 // See whether the lock is currently biased toward our thread and 3140 // whether the epoch is still valid. 3141 // Note that the runtime guarantees sufficient alignment of JavaThread 3142 // pointers to allow age to be placed into low bits. 3143 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, 3144 "biased locking makes assumptions about bit layout"); 3145 z_lr(temp_reg, mark_reg); 3146 z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place); 3147 z_chi(temp_reg, markOopDesc::biased_lock_pattern); 3148 z_brne(cas_label); // Try cas if object is not biased, i.e. cannot be biased locked. 3149 3150 load_prototype_header(temp_reg, obj_reg); 3151 load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place)); 3152 3153 z_ogr(temp_reg, Z_thread); 3154 z_xgr(temp_reg, mark_reg); 3155 z_ngr(temp_reg, temp2_reg); 3156 if (PrintBiasedLockingStatistics) { 3157 increment_counter_eq((address) BiasedLocking::biased_lock_entry_count_addr(), mark_reg, temp2_reg); 3158 // Restore mark_reg. 3159 z_lg(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg); 3160 } 3161 branch_optimized(Assembler::bcondEqual, done); // Biased lock obtained, return success. 3162 3163 Label try_revoke_bias; 3164 Label try_rebias; 3165 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); 3166 3167 //---------------------------------------------------------------------------- 3168 // At this point we know that the header has the bias pattern and 3169 // that we are not the bias owner in the current epoch. We need to 3170 // figure out more details about the state of the header in order to 3171 // know what operations can be legally performed on the object's 3172 // header. 3173 3174 // If the low three bits in the xor result aren't clear, that means 3175 // the prototype header is no longer biased and we have to revoke 3176 // the bias on this object. 3177 z_tmll(temp_reg, markOopDesc::biased_lock_mask_in_place); 3178 z_brnaz(try_revoke_bias); 3179 3180 // Biasing is still enabled for this data type. See whether the 3181 // epoch of the current bias is still valid, meaning that the epoch 3182 // bits of the mark word are equal to the epoch bits of the 3183 // prototype header. (Note that the prototype header's epoch bits 3184 // only change at a safepoint.) If not, attempt to rebias the object 3185 // toward the current thread. Note that we must be absolutely sure 3186 // that the current epoch is invalid in order to do this because 3187 // otherwise the manipulations it performs on the mark word are 3188 // illegal. 3189 z_tmll(temp_reg, markOopDesc::epoch_mask_in_place); 3190 z_brnaz(try_rebias); 3191 3192 //---------------------------------------------------------------------------- 3193 // The epoch of the current bias is still valid but we know nothing 3194 // about the owner; it might be set or it might be clear. Try to 3195 // acquire the bias of the object using an atomic operation. If this 3196 // fails we will go in to the runtime to revoke the object's bias. 3197 // Note that we first construct the presumed unbiased header so we 3198 // don't accidentally blow away another thread's valid bias. 3199 z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | 3200 markOopDesc::epoch_mask_in_place); 3201 z_lgr(temp_reg, Z_thread); 3202 z_llgfr(mark_reg, mark_reg); 3203 z_ogr(temp_reg, mark_reg); 3204 3205 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 3206 3207 z_csg(mark_reg, temp_reg, 0, obj_reg); 3208 3209 // If the biasing toward our thread failed, this means that 3210 // another thread succeeded in biasing it toward itself and we 3211 // need to revoke that bias. The revocation will occur in the 3212 // interpreter runtime in the slow case. 3213 3214 if (PrintBiasedLockingStatistics) { 3215 increment_counter_eq((address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), 3216 temp_reg, temp2_reg); 3217 } 3218 if (slow_case != NULL) { 3219 branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way. 3220 } 3221 branch_optimized(Assembler::bcondAlways, done); // Biased lock status given in condition code. 3222 3223 //---------------------------------------------------------------------------- 3224 bind(try_rebias); 3225 // At this point we know the epoch has expired, meaning that the 3226 // current "bias owner", if any, is actually invalid. Under these 3227 // circumstances _only_, we are allowed to use the current header's 3228 // value as the comparison value when doing the cas to acquire the 3229 // bias in the current epoch. In other words, we allow transfer of 3230 // the bias from one thread to another directly in this situation. 3231 3232 z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); 3233 load_prototype_header(temp_reg, obj_reg); 3234 z_llgfr(mark_reg, mark_reg); 3235 3236 z_ogr(temp_reg, Z_thread); 3237 3238 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 3239 3240 z_csg(mark_reg, temp_reg, 0, obj_reg); 3241 3242 // If the biasing toward our thread failed, this means that 3243 // another thread succeeded in biasing it toward itself and we 3244 // need to revoke that bias. The revocation will occur in the 3245 // interpreter runtime in the slow case. 3246 3247 if (PrintBiasedLockingStatistics) { 3248 increment_counter_eq((address) BiasedLocking::rebiased_lock_entry_count_addr(), temp_reg, temp2_reg); 3249 } 3250 if (slow_case != NULL) { 3251 branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way. 3252 } 3253 z_bru(done); // Biased lock status given in condition code. 3254 3255 //---------------------------------------------------------------------------- 3256 bind(try_revoke_bias); 3257 // The prototype mark in the klass doesn't have the bias bit set any 3258 // more, indicating that objects of this data type are not supposed 3259 // to be biased any more. We are going to try to reset the mark of 3260 // this object to the prototype value and fall through to the 3261 // CAS-based locking scheme. Note that if our CAS fails, it means 3262 // that another thread raced us for the privilege of revoking the 3263 // bias of this particular object, so it's okay to continue in the 3264 // normal locking code. 3265 load_prototype_header(temp_reg, obj_reg); 3266 3267 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 3268 3269 z_csg(mark_reg, temp_reg, 0, obj_reg); 3270 3271 // Fall through to the normal CAS-based lock, because no matter what 3272 // the result of the above CAS, some thread must have succeeded in 3273 // removing the bias bit from the object's header. 3274 if (PrintBiasedLockingStatistics) { 3275 // z_cgr(mark_reg, temp2_reg); 3276 increment_counter_eq((address) BiasedLocking::revoked_lock_entry_count_addr(), temp_reg, temp2_reg); 3277 } 3278 3279 bind(cas_label); 3280 BLOCK_COMMENT("} biased_locking_enter"); 3281 } 3282 3283 void MacroAssembler::biased_locking_exit(Register mark_addr, Register temp_reg, Label& done) { 3284 // Check for biased locking unlock case, which is a no-op 3285 // Note: we do not have to check the thread ID for two reasons. 3286 // First, the interpreter checks for IllegalMonitorStateException at 3287 // a higher level. Second, if the bias was revoked while we held the 3288 // lock, the object could not be rebiased toward another thread, so 3289 // the bias bit would be clear. 3290 BLOCK_COMMENT("biased_locking_exit {"); 3291 3292 z_lg(temp_reg, 0, mark_addr); 3293 z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place); 3294 3295 z_chi(temp_reg, markOopDesc::biased_lock_pattern); 3296 z_bre(done); 3297 BLOCK_COMMENT("} biased_locking_exit"); 3298 } 3299 3300 void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) { 3301 Register displacedHeader = temp1; 3302 Register currentHeader = temp1; 3303 Register temp = temp2; 3304 NearLabel done, object_has_monitor; 3305 3306 BLOCK_COMMENT("compiler_fast_lock_object {"); 3307 3308 // Load markOop from oop into mark. 3309 z_lg(displacedHeader, 0, oop); 3310 3311 if (try_bias) { 3312 biased_locking_enter(oop, displacedHeader, temp, Z_R0, done); 3313 } 3314 3315 // Handle existing monitor. 3316 if ((EmitSync & 0x01) == 0) { 3317 // The object has an existing monitor iff (mark & monitor_value) != 0. 3318 guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word"); 3319 z_lr(temp, displacedHeader); 3320 z_nill(temp, markOopDesc::monitor_value); 3321 z_brne(object_has_monitor); 3322 } 3323 3324 // Set mark to markOop | markOopDesc::unlocked_value. 3325 z_oill(displacedHeader, markOopDesc::unlocked_value); 3326 3327 // Load Compare Value application register. 3328 3329 // Initialize the box (must happen before we update the object mark). 3330 z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box); 3331 3332 // Memory Fence (in cmpxchgd) 3333 // Compare object markOop with mark and if equal exchange scratch1 with object markOop. 3334 3335 // If the compare-and-swap succeeded, then we found an unlocked object and we 3336 // have now locked it. 3337 z_csg(displacedHeader, box, 0, oop); 3338 assert(currentHeader==displacedHeader, "must be same register"); // Identified two registers from z/Architecture. 3339 z_bre(done); 3340 3341 // We did not see an unlocked object so try the fast recursive case. 3342 3343 z_sgr(currentHeader, Z_SP); 3344 load_const_optimized(temp, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place)); 3345 3346 z_ngr(currentHeader, temp); 3347 // z_brne(done); 3348 // z_release(); 3349 z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box); 3350 3351 z_bru(done); 3352 3353 if ((EmitSync & 0x01) == 0) { 3354 Register zero = temp; 3355 Register monitor_tagged = displacedHeader; // Tagged with markOopDesc::monitor_value. 3356 bind(object_has_monitor); 3357 // The object's monitor m is unlocked iff m->owner == NULL, 3358 // otherwise m->owner may contain a thread or a stack address. 3359 // 3360 // Try to CAS m->owner from NULL to current thread. 3361 z_lghi(zero, 0); 3362 // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ. 3363 z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged); 3364 // Store a non-null value into the box. 3365 z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box); 3366 #ifdef ASSERT 3367 z_brne(done); 3368 // We've acquired the monitor, check some invariants. 3369 // Invariant 1: _recursions should be 0. 3370 asm_assert_mem8_is_zero(OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), monitor_tagged, 3371 "monitor->_recursions should be 0", -1); 3372 z_ltgr(zero, zero); // Set CR=EQ. 3373 #endif 3374 } 3375 bind(done); 3376 3377 BLOCK_COMMENT("} compiler_fast_lock_object"); 3378 // If locking was successful, CR should indicate 'EQ'. 3379 // The compiler or the native wrapper generates a branch to the runtime call 3380 // _complete_monitor_locking_Java. 3381 } 3382 3383 void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) { 3384 Register displacedHeader = temp1; 3385 Register currentHeader = temp2; 3386 Register temp = temp1; 3387 Register monitor = temp2; 3388 3389 Label done, object_has_monitor; 3390 3391 BLOCK_COMMENT("compiler_fast_unlock_object {"); 3392 3393 if (try_bias) { 3394 biased_locking_exit(oop, currentHeader, done); 3395 } 3396 3397 // Find the lock address and load the displaced header from the stack. 3398 // if the displaced header is zero, we have a recursive unlock. 3399 load_and_test_long(displacedHeader, Address(box, BasicLock::displaced_header_offset_in_bytes())); 3400 z_bre(done); 3401 3402 // Handle existing monitor. 3403 if ((EmitSync & 0x02) == 0) { 3404 // The object has an existing monitor iff (mark & monitor_value) != 0. 3405 z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop); 3406 guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word"); 3407 z_nill(currentHeader, markOopDesc::monitor_value); 3408 z_brne(object_has_monitor); 3409 } 3410 3411 // Check if it is still a light weight lock, this is true if we see 3412 // the stack address of the basicLock in the markOop of the object 3413 // copy box to currentHeader such that csg does not kill it. 3414 z_lgr(currentHeader, box); 3415 z_csg(currentHeader, displacedHeader, 0, oop); 3416 z_bru(done); // Csg sets CR as desired. 3417 3418 // Handle existing monitor. 3419 if ((EmitSync & 0x02) == 0) { 3420 bind(object_has_monitor); 3421 z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop); // CurrentHeader is tagged with monitor_value set. 3422 load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); 3423 z_brne(done); 3424 load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 3425 z_brne(done); 3426 load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); 3427 z_brne(done); 3428 load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); 3429 z_brne(done); 3430 z_release(); 3431 z_stg(temp/*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), currentHeader); 3432 } 3433 3434 bind(done); 3435 3436 BLOCK_COMMENT("} compiler_fast_unlock_object"); 3437 // flag == EQ indicates success 3438 // flag == NE indicates failure 3439 } 3440 3441 // Write to card table for modification at store_addr - register is destroyed afterwards. 3442 void MacroAssembler::card_write_barrier_post(Register store_addr, Register tmp) { 3443 CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set(); 3444 assert(bs->kind() == BarrierSet::CardTableForRS || 3445 bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); 3446 assert_different_registers(store_addr, tmp); 3447 z_srlg(store_addr, store_addr, CardTableModRefBS::card_shift); 3448 load_absolute_address(tmp, (address)bs->byte_map_base); 3449 z_agr(store_addr, tmp); 3450 z_mvi(0, store_addr, 0); // Store byte 0. 3451 } 3452 3453 #if INCLUDE_ALL_GCS 3454 3455 //------------------------------------------------------ 3456 // General G1 pre-barrier generator. 3457 // Purpose: record the previous value if it is not null. 3458 // All non-tmps are preserved. 3459 //------------------------------------------------------ 3460 void MacroAssembler::g1_write_barrier_pre(Register Robj, 3461 RegisterOrConstant offset, 3462 Register Rpre_val, // Ideally, this is a non-volatile register. 3463 Register Rval, // Will be preserved. 3464 Register Rtmp1, // If Rpre_val is volatile, either Rtmp1 3465 Register Rtmp2, // or Rtmp2 has to be non-volatile.. 3466 bool pre_val_needed // Save Rpre_val across runtime call, caller uses it. 3467 ) { 3468 Label callRuntime, filtered; 3469 const int active_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()); 3470 const int buffer_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_buf()); 3471 const int index_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_index()); 3472 assert_different_registers(Rtmp1, Rtmp2, Z_R0_scratch); // None of the Rtmp<i> must be Z_R0!! 3473 3474 BLOCK_COMMENT("g1_write_barrier_pre {"); 3475 3476 // Is marking active? 3477 // Note: value is loaded for test purposes only. No further use here. 3478 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 3479 load_and_test_int(Rtmp1, Address(Z_thread, active_offset)); 3480 } else { 3481 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 3482 load_and_test_byte(Rtmp1, Address(Z_thread, active_offset)); 3483 } 3484 z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently. 3485 3486 // Do we need to load the previous value into Rpre_val? 3487 if (Robj != noreg) { 3488 // Load the previous value... 3489 Register ixReg = offset.is_register() ? offset.register_or_noreg() : Z_R0; 3490 if (UseCompressedOops) { 3491 z_llgf(Rpre_val, offset.constant_or_zero(), ixReg, Robj); 3492 } else { 3493 z_lg(Rpre_val, offset.constant_or_zero(), ixReg, Robj); 3494 } 3495 } 3496 assert(Rpre_val != noreg, "must have a real register"); 3497 3498 // Is the previous value NULL? 3499 // Note: pre_val is loaded, decompressed and stored (directly or via runtime call). 3500 // Register contents is preserved across runtime call if caller requests to do so. 3501 z_ltgr(Rpre_val, Rpre_val); 3502 z_bre(filtered); // previous value is NULL, so we don't need to record it. 3503 3504 // Decode the oop now. We know it's not NULL. 3505 if (Robj != noreg && UseCompressedOops) { 3506 oop_decoder(Rpre_val, Rpre_val, /*maybeNULL=*/false); 3507 } 3508 3509 // OK, it's not filtered, so we'll need to call enqueue. 3510 3511 // We can store the original value in the thread's buffer 3512 // only if index > 0. Otherwise, we need runtime to handle. 3513 // (The index field is typed as size_t.) 3514 Register Rbuffer = Rtmp1, Rindex = Rtmp2; 3515 3516 z_lg(Rbuffer, buffer_offset, Z_thread); 3517 3518 load_and_test_long(Rindex, Address(Z_thread, index_offset)); 3519 z_bre(callRuntime); // If index == 0, goto runtime. 3520 3521 add2reg(Rindex, -wordSize); // Decrement index. 3522 z_stg(Rindex, index_offset, Z_thread); 3523 3524 // Record the previous value. 3525 z_stg(Rpre_val, 0, Rbuffer, Rindex); 3526 z_bru(filtered); // We are done. 3527 3528 Rbuffer = noreg; // end of life 3529 Rindex = noreg; // end of life 3530 3531 bind(callRuntime); 3532 3533 // Save Rpre_val (result) over runtime call. 3534 // Requires Rtmp1, Rtmp2, or Rpre_val to be non-volatile. 3535 Register Rpre_save = Rpre_val; 3536 if (pre_val_needed && Rpre_val->is_volatile()) { 3537 guarantee(!Rtmp1->is_volatile() || !Rtmp2->is_volatile(), "oops!"); 3538 Rpre_save = !Rtmp1->is_volatile() ? Rtmp1 : Rtmp2; 3539 } 3540 lgr_if_needed(Rpre_save, Rpre_val); 3541 3542 // Preserve inputs by spilling them into the top frame. 3543 if (Robj != noreg && Robj->is_volatile()) { 3544 z_stg(Robj, Robj->encoding()*BytesPerWord, Z_SP); 3545 } 3546 if (offset.is_register() && offset.as_register()->is_volatile()) { 3547 Register Roff = offset.as_register(); 3548 z_stg(Roff, Roff->encoding()*BytesPerWord, Z_SP); 3549 } 3550 if (Rval != noreg && Rval->is_volatile()) { 3551 z_stg(Rval, Rval->encoding()*BytesPerWord, Z_SP); 3552 } 3553 3554 // Push frame to protect top frame with return pc and spilled register values. 3555 save_return_pc(); 3556 push_frame_abi160(0); // Will use Z_R0 as tmp on old CPUs. 3557 3558 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_val, Z_thread); 3559 3560 pop_frame(); 3561 restore_return_pc(); 3562 3563 // Restore spilled values. 3564 if (Robj != noreg && Robj->is_volatile()) { 3565 z_lg(Robj, Robj->encoding()*BytesPerWord, Z_SP); 3566 } 3567 if (offset.is_register() && offset.as_register()->is_volatile()) { 3568 Register Roff = offset.as_register(); 3569 z_lg(Roff, Roff->encoding()*BytesPerWord, Z_SP); 3570 } 3571 if (Rval != noreg && Rval->is_volatile()) { 3572 z_lg(Rval, Rval->encoding()*BytesPerWord, Z_SP); 3573 } 3574 3575 // Restore Rpre_val (result) after runtime call. 3576 lgr_if_needed(Rpre_val, Rpre_save); 3577 3578 bind(filtered); 3579 BLOCK_COMMENT("} g1_write_barrier_pre"); 3580 } 3581 3582 // General G1 post-barrier generator. 3583 // Purpose: Store cross-region card. 3584 void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, 3585 Register Rnew_val, 3586 Register Rtmp1, 3587 Register Rtmp2, 3588 Register Rtmp3) { 3589 Label callRuntime, filtered; 3590 3591 assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); // Most probably, Rnew_val == Rtmp3. 3592 3593 G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set(); 3594 assert(bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier"); 3595 3596 BLOCK_COMMENT("g1_write_barrier_post {"); 3597 3598 // Does store cross heap regions? 3599 // It does if the two addresses specify different grain addresses. 3600 if (G1RSBarrierRegionFilter) { 3601 if (VM_Version::has_DistinctOpnds()) { 3602 z_xgrk(Rtmp1, Rstore_addr, Rnew_val); 3603 } else { 3604 z_lgr(Rtmp1, Rstore_addr); 3605 z_xgr(Rtmp1, Rnew_val); 3606 } 3607 z_srag(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes); 3608 z_bre(filtered); 3609 } 3610 3611 // Crosses regions, storing NULL? 3612 #ifdef ASSERT 3613 z_ltgr(Rnew_val, Rnew_val); 3614 asm_assert_ne("null oop not allowed (G1)", 0x255); // TODO: also on z? Checked by caller on PPC64, so following branch is obsolete: 3615 z_bre(filtered); // Safety net: don't break if we have a NULL oop. 3616 #endif 3617 Rnew_val = noreg; // end of lifetime 3618 3619 // Storing region crossing non-NULL, is card already dirty? 3620 assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code"); 3621 assert_different_registers(Rtmp1, Rtmp2, Rtmp3); 3622 // Make sure not to use Z_R0 for any of these registers. 3623 Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3; 3624 Register Rbase = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp3; 3625 3626 // calculate address of card 3627 load_const_optimized(Rbase, (address)bs->byte_map_base); // Card table base. 3628 z_srlg(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift); // Index into card table. 3629 add2reg_with_index(Rcard_addr, 0, Rcard_addr, Rbase); // Explicit calculation needed for cli. 3630 Rbase = noreg; // end of lifetime 3631 3632 // Filter young. 3633 assert((unsigned int)G1SATBCardTableModRefBS::g1_young_card_val() <= 255, "otherwise check this code"); 3634 z_cli(0, Rcard_addr, (int)G1SATBCardTableModRefBS::g1_young_card_val()); 3635 z_bre(filtered); 3636 3637 // Check the card value. If dirty, we're done. 3638 // This also avoids false sharing of the (already dirty) card. 3639 z_sync(); // Required to support concurrent cleaning. 3640 assert((unsigned int)CardTableModRefBS::dirty_card_val() <= 255, "otherwise check this code"); 3641 z_cli(0, Rcard_addr, CardTableModRefBS::dirty_card_val()); // Reload after membar. 3642 z_bre(filtered); 3643 3644 // Storing a region crossing, non-NULL oop, card is clean. 3645 // Dirty card and log. 3646 z_mvi(0, Rcard_addr, CardTableModRefBS::dirty_card_val()); 3647 3648 Register Rcard_addr_x = Rcard_addr; 3649 Register Rqueue_index = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp1; 3650 Register Rqueue_buf = (Rtmp3 != Z_R0_scratch) ? Rtmp3 : Rtmp1; 3651 const int qidx_off = in_bytes(JavaThread::dirty_card_queue_offset() + SATBMarkQueue::byte_offset_of_index()); 3652 const int qbuf_off = in_bytes(JavaThread::dirty_card_queue_offset() + SATBMarkQueue::byte_offset_of_buf()); 3653 if ((Rcard_addr == Rqueue_buf) || (Rcard_addr == Rqueue_index)) { 3654 Rcard_addr_x = Z_R0_scratch; // Register shortage. We have to use Z_R0. 3655 } 3656 lgr_if_needed(Rcard_addr_x, Rcard_addr); 3657 3658 load_and_test_long(Rqueue_index, Address(Z_thread, qidx_off)); 3659 z_bre(callRuntime); // Index == 0 then jump to runtime. 3660 3661 z_lg(Rqueue_buf, qbuf_off, Z_thread); 3662 3663 add2reg(Rqueue_index, -wordSize); // Decrement index. 3664 z_stg(Rqueue_index, qidx_off, Z_thread); 3665 3666 z_stg(Rcard_addr_x, 0, Rqueue_index, Rqueue_buf); // Store card. 3667 z_bru(filtered); 3668 3669 bind(callRuntime); 3670 3671 // TODO: do we need a frame? Introduced to be on the safe side. 3672 bool needs_frame = true; 3673 3674 // VM call need frame to access(write) O register. 3675 if (needs_frame) { 3676 save_return_pc(); 3677 push_frame_abi160(0); // Will use Z_R0 as tmp on old CPUs. 3678 } 3679 3680 // Save the live input values. 3681 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr_x, Z_thread); 3682 3683 if (needs_frame) { 3684 pop_frame(); 3685 restore_return_pc(); 3686 } 3687 3688 bind(filtered); 3689 3690 BLOCK_COMMENT("} g1_write_barrier_post"); 3691 } 3692 #endif // INCLUDE_ALL_GCS 3693 3694 // Last_Java_sp must comply to the rules in frame_s390.hpp. 3695 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation) { 3696 BLOCK_COMMENT("set_last_Java_frame {"); 3697 3698 // Always set last_Java_pc and flags first because once last_Java_sp 3699 // is visible has_last_Java_frame is true and users will look at the 3700 // rest of the fields. (Note: flags should always be zero before we 3701 // get here so doesn't need to be set.) 3702 3703 // Verify that last_Java_pc was zeroed on return to Java. 3704 if (allow_relocation) { 3705 asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()), 3706 Z_thread, 3707 "last_Java_pc not zeroed before leaving Java", 3708 0x200); 3709 } else { 3710 asm_assert_mem8_is_zero_static(in_bytes(JavaThread::last_Java_pc_offset()), 3711 Z_thread, 3712 "last_Java_pc not zeroed before leaving Java", 3713 0x200); 3714 } 3715 3716 // When returning from calling out from Java mode the frame anchor's 3717 // last_Java_pc will always be set to NULL. It is set here so that 3718 // if we are doing a call to native (not VM) that we capture the 3719 // known pc and don't have to rely on the native call having a 3720 // standard frame linkage where we can find the pc. 3721 if (last_Java_pc!=noreg) { 3722 z_stg(last_Java_pc, Address(Z_thread, JavaThread::last_Java_pc_offset())); 3723 } 3724 3725 // This membar release is not required on z/Architecture, since the sequence of stores 3726 // in maintained. Nevertheless, we leave it in to document the required ordering. 3727 // The implementation of z_release() should be empty. 3728 // z_release(); 3729 3730 z_stg(last_Java_sp, Address(Z_thread, JavaThread::last_Java_sp_offset())); 3731 BLOCK_COMMENT("} set_last_Java_frame"); 3732 } 3733 3734 void MacroAssembler::reset_last_Java_frame(bool allow_relocation) { 3735 BLOCK_COMMENT("reset_last_Java_frame {"); 3736 3737 if (allow_relocation) { 3738 asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()), 3739 Z_thread, 3740 "SP was not set, still zero", 3741 0x202); 3742 } else { 3743 asm_assert_mem8_isnot_zero_static(in_bytes(JavaThread::last_Java_sp_offset()), 3744 Z_thread, 3745 "SP was not set, still zero", 3746 0x202); 3747 } 3748 3749 // _last_Java_sp = 0 3750 // Clearing storage must be atomic here, so don't use clear_mem()! 3751 store_const(Address(Z_thread, JavaThread::last_Java_sp_offset()), 0); 3752 3753 // _last_Java_pc = 0 3754 store_const(Address(Z_thread, JavaThread::last_Java_pc_offset()), 0); 3755 3756 BLOCK_COMMENT("} reset_last_Java_frame"); 3757 return; 3758 } 3759 3760 void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation) { 3761 assert_different_registers(sp, tmp1); 3762 3763 // We cannot trust that code generated by the C++ compiler saves R14 3764 // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at 3765 // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()). 3766 // Therefore we load the PC into tmp1 and let set_last_Java_frame() save 3767 // it into the frame anchor. 3768 get_PC(tmp1); 3769 set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1, allow_relocation); 3770 } 3771 3772 void MacroAssembler::set_thread_state(JavaThreadState new_state) { 3773 z_release(); 3774 3775 assert(Immediate::is_uimm16(_thread_max_state), "enum value out of range for instruction"); 3776 assert(sizeof(JavaThreadState) == sizeof(int), "enum value must have base type int"); 3777 store_const(Address(Z_thread, JavaThread::thread_state_offset()), new_state, Z_R0, false); 3778 } 3779 3780 void MacroAssembler::get_vm_result(Register oop_result) { 3781 verify_thread(); 3782 3783 z_lg(oop_result, Address(Z_thread, JavaThread::vm_result_offset())); 3784 clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(void*)); 3785 3786 verify_oop(oop_result); 3787 } 3788 3789 void MacroAssembler::get_vm_result_2(Register result) { 3790 verify_thread(); 3791 3792 z_lg(result, Address(Z_thread, JavaThread::vm_result_2_offset())); 3793 clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(void*)); 3794 } 3795 3796 // We require that C code which does not return a value in vm_result will 3797 // leave it undisturbed. 3798 void MacroAssembler::set_vm_result(Register oop_result) { 3799 z_stg(oop_result, Address(Z_thread, JavaThread::vm_result_offset())); 3800 } 3801 3802 // Explicit null checks (used for method handle code). 3803 void MacroAssembler::null_check(Register reg, Register tmp, int64_t offset) { 3804 if (!ImplicitNullChecks) { 3805 NearLabel ok; 3806 3807 compare64_and_branch(reg, (intptr_t) 0, Assembler::bcondNotEqual, ok); 3808 3809 // We just put the address into reg if it was 0 (tmp==Z_R0 is allowed so we can't use it for the address). 3810 address exception_entry = Interpreter::throw_NullPointerException_entry(); 3811 load_absolute_address(reg, exception_entry); 3812 z_br(reg); 3813 3814 bind(ok); 3815 } else { 3816 if (needs_explicit_null_check((intptr_t)offset)) { 3817 // Provoke OS NULL exception if reg = NULL by 3818 // accessing M[reg] w/o changing any registers. 3819 z_lg(tmp, 0, reg); 3820 } 3821 // else 3822 // Nothing to do, (later) access of M[reg + offset] 3823 // will provoke OS NULL exception if reg = NULL. 3824 } 3825 } 3826 3827 //------------------------------------- 3828 // Compressed Klass Pointers 3829 //------------------------------------- 3830 3831 // Klass oop manipulations if compressed. 3832 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 3833 Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible. 3834 address base = Universe::narrow_klass_base(); 3835 int shift = Universe::narrow_klass_shift(); 3836 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 3837 3838 BLOCK_COMMENT("cKlass encoder {"); 3839 3840 #ifdef ASSERT 3841 Label ok; 3842 z_tmll(current, KlassAlignmentInBytes-1); // Check alignment. 3843 z_brc(Assembler::bcondAllZero, ok); 3844 // The plain disassembler does not recognize illtrap. It instead displays 3845 // a 32-bit value. Issueing two illtraps assures the disassembler finds 3846 // the proper beginning of the next instruction. 3847 z_illtrap(0xee); 3848 z_illtrap(0xee); 3849 bind(ok); 3850 #endif 3851 3852 if (base != NULL) { 3853 unsigned int base_h = ((unsigned long)base)>>32; 3854 unsigned int base_l = (unsigned int)((unsigned long)base); 3855 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 3856 lgr_if_needed(dst, current); 3857 z_aih(dst, -((int)base_h)); // Base has no set bits in lower half. 3858 } else if ((base_h == 0) && (base_l != 0)) { 3859 lgr_if_needed(dst, current); 3860 z_agfi(dst, -(int)base_l); 3861 } else { 3862 load_const(Z_R0, base); 3863 lgr_if_needed(dst, current); 3864 z_sgr(dst, Z_R0); 3865 } 3866 current = dst; 3867 } 3868 if (shift != 0) { 3869 assert (LogKlassAlignmentInBytes == shift, "decode alg wrong"); 3870 z_srlg(dst, current, shift); 3871 current = dst; 3872 } 3873 lgr_if_needed(dst, current); // Move may be required (if neither base nor shift != 0). 3874 3875 BLOCK_COMMENT("} cKlass encoder"); 3876 } 3877 3878 // This function calculates the size of the code generated by 3879 // decode_klass_not_null(register dst, Register src) 3880 // when (Universe::heap() != NULL). Hence, if the instructions 3881 // it generates change, then this method needs to be updated. 3882 int MacroAssembler::instr_size_for_decode_klass_not_null() { 3883 address base = Universe::narrow_klass_base(); 3884 int shift_size = Universe::narrow_klass_shift() == 0 ? 0 : 6; /* sllg */ 3885 int addbase_size = 0; 3886 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 3887 3888 if (base != NULL) { 3889 unsigned int base_h = ((unsigned long)base)>>32; 3890 unsigned int base_l = (unsigned int)((unsigned long)base); 3891 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 3892 addbase_size += 6; /* aih */ 3893 } else if ((base_h == 0) && (base_l != 0)) { 3894 addbase_size += 6; /* algfi */ 3895 } else { 3896 addbase_size += load_const_size(); 3897 addbase_size += 4; /* algr */ 3898 } 3899 } 3900 #ifdef ASSERT 3901 addbase_size += 10; 3902 addbase_size += 2; // Extra sigill. 3903 #endif 3904 return addbase_size + shift_size; 3905 } 3906 3907 // !!! If the instructions that get generated here change 3908 // then function instr_size_for_decode_klass_not_null() 3909 // needs to get updated. 3910 // This variant of decode_klass_not_null() must generate predictable code! 3911 // The code must only depend on globally known parameters. 3912 void MacroAssembler::decode_klass_not_null(Register dst) { 3913 address base = Universe::narrow_klass_base(); 3914 int shift = Universe::narrow_klass_shift(); 3915 int beg_off = offset(); 3916 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 3917 3918 BLOCK_COMMENT("cKlass decoder (const size) {"); 3919 3920 if (shift != 0) { // Shift required? 3921 z_sllg(dst, dst, shift); 3922 } 3923 if (base != NULL) { 3924 unsigned int base_h = ((unsigned long)base)>>32; 3925 unsigned int base_l = (unsigned int)((unsigned long)base); 3926 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 3927 z_aih(dst, base_h); // Base has no set bits in lower half. 3928 } else if ((base_h == 0) && (base_l != 0)) { 3929 z_algfi(dst, base_l); // Base has no set bits in upper half. 3930 } else { 3931 load_const(Z_R0, base); // Base has set bits everywhere. 3932 z_algr(dst, Z_R0); 3933 } 3934 } 3935 3936 #ifdef ASSERT 3937 Label ok; 3938 z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment. 3939 z_brc(Assembler::bcondAllZero, ok); 3940 // The plain disassembler does not recognize illtrap. It instead displays 3941 // a 32-bit value. Issueing two illtraps assures the disassembler finds 3942 // the proper beginning of the next instruction. 3943 z_illtrap(0xd1); 3944 z_illtrap(0xd1); 3945 bind(ok); 3946 #endif 3947 assert(offset() == beg_off + instr_size_for_decode_klass_not_null(), "Code gen mismatch."); 3948 3949 BLOCK_COMMENT("} cKlass decoder (const size)"); 3950 } 3951 3952 // This variant of decode_klass_not_null() is for cases where 3953 // 1) the size of the generated instructions may vary 3954 // 2) the result is (potentially) stored in a register different from the source. 3955 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 3956 address base = Universe::narrow_klass_base(); 3957 int shift = Universe::narrow_klass_shift(); 3958 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 3959 3960 BLOCK_COMMENT("cKlass decoder {"); 3961 3962 if (src == noreg) src = dst; 3963 3964 if (shift != 0) { // Shift or at least move required? 3965 z_sllg(dst, src, shift); 3966 } else { 3967 lgr_if_needed(dst, src); 3968 } 3969 3970 if (base != NULL) { 3971 unsigned int base_h = ((unsigned long)base)>>32; 3972 unsigned int base_l = (unsigned int)((unsigned long)base); 3973 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 3974 z_aih(dst, base_h); // Base has not set bits in lower half. 3975 } else if ((base_h == 0) && (base_l != 0)) { 3976 z_algfi(dst, base_l); // Base has no set bits in upper half. 3977 } else { 3978 load_const_optimized(Z_R0, base); // Base has set bits everywhere. 3979 z_algr(dst, Z_R0); 3980 } 3981 } 3982 3983 #ifdef ASSERT 3984 Label ok; 3985 z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment. 3986 z_brc(Assembler::bcondAllZero, ok); 3987 // The plain disassembler does not recognize illtrap. It instead displays 3988 // a 32-bit value. Issueing two illtraps assures the disassembler finds 3989 // the proper beginning of the next instruction. 3990 z_illtrap(0xd2); 3991 z_illtrap(0xd2); 3992 bind(ok); 3993 #endif 3994 BLOCK_COMMENT("} cKlass decoder"); 3995 } 3996 3997 void MacroAssembler::load_klass(Register klass, Address mem) { 3998 if (UseCompressedClassPointers) { 3999 z_llgf(klass, mem); 4000 // Attention: no null check here! 4001 decode_klass_not_null(klass); 4002 } else { 4003 z_lg(klass, mem); 4004 } 4005 } 4006 4007 void MacroAssembler::load_klass(Register klass, Register src_oop) { 4008 if (UseCompressedClassPointers) { 4009 z_llgf(klass, oopDesc::klass_offset_in_bytes(), src_oop); 4010 // Attention: no null check here! 4011 decode_klass_not_null(klass); 4012 } else { 4013 z_lg(klass, oopDesc::klass_offset_in_bytes(), src_oop); 4014 } 4015 } 4016 4017 void MacroAssembler::load_prototype_header(Register Rheader, Register Rsrc_oop) { 4018 assert_different_registers(Rheader, Rsrc_oop); 4019 load_klass(Rheader, Rsrc_oop); 4020 z_lg(Rheader, Address(Rheader, Klass::prototype_header_offset())); 4021 } 4022 4023 void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) { 4024 if (UseCompressedClassPointers) { 4025 assert_different_registers(dst_oop, klass, Z_R0); 4026 if (ck == noreg) ck = klass; 4027 encode_klass_not_null(ck, klass); 4028 z_st(ck, Address(dst_oop, oopDesc::klass_offset_in_bytes())); 4029 } else { 4030 z_stg(klass, Address(dst_oop, oopDesc::klass_offset_in_bytes())); 4031 } 4032 } 4033 4034 void MacroAssembler::store_klass_gap(Register s, Register d) { 4035 if (UseCompressedClassPointers) { 4036 assert(s != d, "not enough registers"); 4037 z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes())); 4038 } 4039 } 4040 4041 // Compare klass ptr in memory against klass ptr in register. 4042 // 4043 // Rop1 - klass in register, always uncompressed. 4044 // disp - Offset of klass in memory, compressed/uncompressed, depending on runtime flag. 4045 // Rbase - Base address of cKlass in memory. 4046 // maybeNULL - True if Rop1 possibly is a NULL. 4047 void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL) { 4048 4049 BLOCK_COMMENT("compare klass ptr {"); 4050 4051 if (UseCompressedClassPointers) { 4052 const int shift = Universe::narrow_klass_shift(); 4053 address base = Universe::narrow_klass_base(); 4054 4055 assert((shift == 0) || (shift == LogKlassAlignmentInBytes), "cKlass encoder detected bad shift"); 4056 assert_different_registers(Rop1, Z_R0); 4057 assert_different_registers(Rop1, Rbase, Z_R1); 4058 4059 // First encode register oop and then compare with cOop in memory. 4060 // This sequence saves an unnecessary cOop load and decode. 4061 if (base == NULL) { 4062 if (shift == 0) { 4063 z_cl(Rop1, disp, Rbase); // Unscaled 4064 } else { 4065 z_srlg(Z_R0, Rop1, shift); // ZeroBased 4066 z_cl(Z_R0, disp, Rbase); 4067 } 4068 } else { // HeapBased 4069 #ifdef ASSERT 4070 bool used_R0 = true; 4071 bool used_R1 = true; 4072 #endif 4073 Register current = Rop1; 4074 Label done; 4075 4076 if (maybeNULL) { // NULL ptr must be preserved! 4077 z_ltgr(Z_R0, current); 4078 z_bre(done); 4079 current = Z_R0; 4080 } 4081 4082 unsigned int base_h = ((unsigned long)base)>>32; 4083 unsigned int base_l = (unsigned int)((unsigned long)base); 4084 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 4085 lgr_if_needed(Z_R0, current); 4086 z_aih(Z_R0, -((int)base_h)); // Base has no set bits in lower half. 4087 } else if ((base_h == 0) && (base_l != 0)) { 4088 lgr_if_needed(Z_R0, current); 4089 z_agfi(Z_R0, -(int)base_l); 4090 } else { 4091 int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base)); 4092 add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); // Subtract base by adding complement. 4093 } 4094 4095 if (shift != 0) { 4096 z_srlg(Z_R0, Z_R0, shift); 4097 } 4098 bind(done); 4099 z_cl(Z_R0, disp, Rbase); 4100 #ifdef ASSERT 4101 if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2); 4102 if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2); 4103 #endif 4104 } 4105 } else { 4106 z_clg(Rop1, disp, Z_R0, Rbase); 4107 } 4108 BLOCK_COMMENT("} compare klass ptr"); 4109 } 4110 4111 //--------------------------- 4112 // Compressed oops 4113 //--------------------------- 4114 4115 void MacroAssembler::encode_heap_oop(Register oop) { 4116 oop_encoder(oop, oop, true /*maybe null*/); 4117 } 4118 4119 void MacroAssembler::encode_heap_oop_not_null(Register oop) { 4120 oop_encoder(oop, oop, false /*not null*/); 4121 } 4122 4123 // Called with something derived from the oop base. e.g. oop_base>>3. 4124 int MacroAssembler::get_oop_base_pow2_offset(uint64_t oop_base) { 4125 unsigned int oop_base_ll = ((unsigned int)(oop_base >> 0)) & 0xffff; 4126 unsigned int oop_base_lh = ((unsigned int)(oop_base >> 16)) & 0xffff; 4127 unsigned int oop_base_hl = ((unsigned int)(oop_base >> 32)) & 0xffff; 4128 unsigned int oop_base_hh = ((unsigned int)(oop_base >> 48)) & 0xffff; 4129 unsigned int n_notzero_parts = (oop_base_ll == 0 ? 0:1) 4130 + (oop_base_lh == 0 ? 0:1) 4131 + (oop_base_hl == 0 ? 0:1) 4132 + (oop_base_hh == 0 ? 0:1); 4133 4134 assert(oop_base != 0, "This is for HeapBased cOops only"); 4135 4136 if (n_notzero_parts != 1) { // Check if oop_base is just a few pages shy of a power of 2. 4137 uint64_t pow2_offset = 0x10000 - oop_base_ll; 4138 if (pow2_offset < 0x8000) { // This might not be necessary. 4139 uint64_t oop_base2 = oop_base + pow2_offset; 4140 4141 oop_base_ll = ((unsigned int)(oop_base2 >> 0)) & 0xffff; 4142 oop_base_lh = ((unsigned int)(oop_base2 >> 16)) & 0xffff; 4143 oop_base_hl = ((unsigned int)(oop_base2 >> 32)) & 0xffff; 4144 oop_base_hh = ((unsigned int)(oop_base2 >> 48)) & 0xffff; 4145 n_notzero_parts = (oop_base_ll == 0 ? 0:1) + 4146 (oop_base_lh == 0 ? 0:1) + 4147 (oop_base_hl == 0 ? 0:1) + 4148 (oop_base_hh == 0 ? 0:1); 4149 if (n_notzero_parts == 1) { 4150 assert(-(int64_t)pow2_offset != (int64_t)-1, "We use -1 to signal uninitialized base register"); 4151 return -pow2_offset; 4152 } 4153 } 4154 } 4155 return 0; 4156 } 4157 4158 // If base address is offset from a straight power of two by just a few pages, 4159 // return this offset to the caller for a possible later composite add. 4160 // TODO/FIX: will only work correctly for 4k pages. 4161 int MacroAssembler::get_oop_base(Register Rbase, uint64_t oop_base) { 4162 int pow2_offset = get_oop_base_pow2_offset(oop_base); 4163 4164 load_const_optimized(Rbase, oop_base - pow2_offset); // Best job possible. 4165 4166 return pow2_offset; 4167 } 4168 4169 int MacroAssembler::get_oop_base_complement(Register Rbase, uint64_t oop_base) { 4170 int offset = get_oop_base(Rbase, oop_base); 4171 z_lcgr(Rbase, Rbase); 4172 return -offset; 4173 } 4174 4175 // Compare compressed oop in memory against oop in register. 4176 // Rop1 - Oop in register. 4177 // disp - Offset of cOop in memory. 4178 // Rbase - Base address of cOop in memory. 4179 // maybeNULL - True if Rop1 possibly is a NULL. 4180 // maybeNULLtarget - Branch target for Rop1 == NULL, if flow control shall NOT continue with compare instruction. 4181 void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL) { 4182 Register Rbase = mem.baseOrR0(); 4183 Register Rindex = mem.indexOrR0(); 4184 int64_t disp = mem.disp(); 4185 4186 const int shift = Universe::narrow_oop_shift(); 4187 address base = Universe::narrow_oop_base(); 4188 4189 assert(UseCompressedOops, "must be on to call this method"); 4190 assert(Universe::heap() != NULL, "java heap must be initialized to call this method"); 4191 assert((shift == 0) || (shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift"); 4192 assert_different_registers(Rop1, Z_R0); 4193 assert_different_registers(Rop1, Rbase, Z_R1); 4194 assert_different_registers(Rop1, Rindex, Z_R1); 4195 4196 BLOCK_COMMENT("compare heap oop {"); 4197 4198 // First encode register oop and then compare with cOop in memory. 4199 // This sequence saves an unnecessary cOop load and decode. 4200 if (base == NULL) { 4201 if (shift == 0) { 4202 z_cl(Rop1, disp, Rindex, Rbase); // Unscaled 4203 } else { 4204 z_srlg(Z_R0, Rop1, shift); // ZeroBased 4205 z_cl(Z_R0, disp, Rindex, Rbase); 4206 } 4207 } else { // HeapBased 4208 #ifdef ASSERT 4209 bool used_R0 = true; 4210 bool used_R1 = true; 4211 #endif 4212 Label done; 4213 int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base)); 4214 4215 if (maybeNULL) { // NULL ptr must be preserved! 4216 z_ltgr(Z_R0, Rop1); 4217 z_bre(done); 4218 } 4219 4220 add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); 4221 z_srlg(Z_R0, Z_R0, shift); 4222 4223 bind(done); 4224 z_cl(Z_R0, disp, Rindex, Rbase); 4225 #ifdef ASSERT 4226 if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2); 4227 if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2); 4228 #endif 4229 } 4230 BLOCK_COMMENT("} compare heap oop"); 4231 } 4232 4233 // Load heap oop and decompress, if necessary. 4234 void MacroAssembler::load_heap_oop(Register dest, const Address &a) { 4235 if (UseCompressedOops) { 4236 z_llgf(dest, a.disp(), a.indexOrR0(), a.baseOrR0()); 4237 oop_decoder(dest, dest, true); 4238 } else { 4239 z_lg(dest, a.disp(), a.indexOrR0(), a.baseOrR0()); 4240 } 4241 } 4242 4243 // Load heap oop and decompress, if necessary. 4244 void MacroAssembler::load_heap_oop(Register dest, int64_t disp, Register base) { 4245 if (UseCompressedOops) { 4246 z_llgf(dest, disp, base); 4247 oop_decoder(dest, dest, true); 4248 } else { 4249 z_lg(dest, disp, base); 4250 } 4251 } 4252 4253 // Load heap oop and decompress, if necessary. 4254 void MacroAssembler::load_heap_oop_not_null(Register dest, int64_t disp, Register base) { 4255 if (UseCompressedOops) { 4256 z_llgf(dest, disp, base); 4257 oop_decoder(dest, dest, false); 4258 } else { 4259 z_lg(dest, disp, base); 4260 } 4261 } 4262 4263 // Compress, if necessary, and store oop to heap. 4264 void MacroAssembler::store_heap_oop(Register Roop, RegisterOrConstant offset, Register base) { 4265 Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0; 4266 if (UseCompressedOops) { 4267 assert_different_registers(Roop, offset.register_or_noreg(), base); 4268 encode_heap_oop(Roop); 4269 z_st(Roop, offset.constant_or_zero(), Ridx, base); 4270 } else { 4271 z_stg(Roop, offset.constant_or_zero(), Ridx, base); 4272 } 4273 } 4274 4275 // Compress, if necessary, and store oop to heap. Oop is guaranteed to be not NULL. 4276 void MacroAssembler::store_heap_oop_not_null(Register Roop, RegisterOrConstant offset, Register base) { 4277 Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0; 4278 if (UseCompressedOops) { 4279 assert_different_registers(Roop, offset.register_or_noreg(), base); 4280 encode_heap_oop_not_null(Roop); 4281 z_st(Roop, offset.constant_or_zero(), Ridx, base); 4282 } else { 4283 z_stg(Roop, offset.constant_or_zero(), Ridx, base); 4284 } 4285 } 4286 4287 // Store NULL oop to heap. 4288 void MacroAssembler::store_heap_oop_null(Register zero, RegisterOrConstant offset, Register base) { 4289 Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0; 4290 if (UseCompressedOops) { 4291 z_st(zero, offset.constant_or_zero(), Ridx, base); 4292 } else { 4293 z_stg(zero, offset.constant_or_zero(), Ridx, base); 4294 } 4295 } 4296 4297 //------------------------------------------------- 4298 // Encode compressed oop. Generally usable encoder. 4299 //------------------------------------------------- 4300 // Rsrc - contains regular oop on entry. It remains unchanged. 4301 // Rdst - contains compressed oop on exit. 4302 // Rdst and Rsrc may indicate same register, in which case Rsrc does not remain unchanged. 4303 // 4304 // Rdst must not indicate scratch register Z_R1 (Z_R1_scratch) for functionality. 4305 // Rdst should not indicate scratch register Z_R0 (Z_R0_scratch) for performance. 4306 // 4307 // only32bitValid is set, if later code only uses the lower 32 bits. In this 4308 // case we must not fix the upper 32 bits. 4309 void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL, 4310 Register Rbase, int pow2_offset, bool only32bitValid) { 4311 4312 const address oop_base = Universe::narrow_oop_base(); 4313 const int oop_shift = Universe::narrow_oop_shift(); 4314 const bool disjoint = Universe::narrow_oop_base_disjoint(); 4315 4316 assert(UseCompressedOops, "must be on to call this method"); 4317 assert(Universe::heap() != NULL, "java heap must be initialized to call this encoder"); 4318 assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift"); 4319 4320 if (disjoint || (oop_base == NULL)) { 4321 BLOCK_COMMENT("cOop encoder zeroBase {"); 4322 if (oop_shift == 0) { 4323 if (oop_base != NULL && !only32bitValid) { 4324 z_llgfr(Rdst, Rsrc); // Clear upper bits in case the register will be decoded again. 4325 } else { 4326 lgr_if_needed(Rdst, Rsrc); 4327 } 4328 } else { 4329 z_srlg(Rdst, Rsrc, oop_shift); 4330 if (oop_base != NULL && !only32bitValid) { 4331 z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again. 4332 } 4333 } 4334 BLOCK_COMMENT("} cOop encoder zeroBase"); 4335 return; 4336 } 4337 4338 bool used_R0 = false; 4339 bool used_R1 = false; 4340 4341 BLOCK_COMMENT("cOop encoder general {"); 4342 assert_different_registers(Rdst, Z_R1); 4343 assert_different_registers(Rsrc, Rbase); 4344 if (maybeNULL) { 4345 Label done; 4346 // We reorder shifting and subtracting, so that we can compare 4347 // and shift in parallel: 4348 // 4349 // cycle 0: potential LoadN, base = <const> 4350 // cycle 1: base = !base dst = src >> 3, cmp cr = (src != 0) 4351 // cycle 2: if (cr) br, dst = dst + base + offset 4352 4353 // Get oop_base components. 4354 if (pow2_offset == -1) { 4355 if (Rdst == Rbase) { 4356 if (Rdst == Z_R1 || Rsrc == Z_R1) { 4357 Rbase = Z_R0; 4358 used_R0 = true; 4359 } else { 4360 Rdst = Z_R1; 4361 used_R1 = true; 4362 } 4363 } 4364 if (Rbase == Z_R1) { 4365 used_R1 = true; 4366 } 4367 pow2_offset = get_oop_base_complement(Rbase, ((uint64_t)(intptr_t)oop_base) >> oop_shift); 4368 } 4369 assert_different_registers(Rdst, Rbase); 4370 4371 // Check for NULL oop (must be left alone) and shift. 4372 if (oop_shift != 0) { // Shift out alignment bits 4373 if (((intptr_t)oop_base&0xc000000000000000L) == 0L) { // We are sure: no single address will have the leftmost bit set. 4374 z_srag(Rdst, Rsrc, oop_shift); // Arithmetic shift sets the condition code. 4375 } else { 4376 z_srlg(Rdst, Rsrc, oop_shift); 4377 z_ltgr(Rsrc, Rsrc); // This is the recommended way of testing for zero. 4378 // This probably is faster, as it does not write a register. No! 4379 // z_cghi(Rsrc, 0); 4380 } 4381 } else { 4382 z_ltgr(Rdst, Rsrc); // Move NULL to result register. 4383 } 4384 z_bre(done); 4385 4386 // Subtract oop_base components. 4387 if ((Rdst == Z_R0) || (Rbase == Z_R0)) { 4388 z_algr(Rdst, Rbase); 4389 if (pow2_offset != 0) { add2reg(Rdst, pow2_offset); } 4390 } else { 4391 add2reg_with_index(Rdst, pow2_offset, Rbase, Rdst); 4392 } 4393 if (!only32bitValid) { 4394 z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again. 4395 } 4396 bind(done); 4397 4398 } else { // not null 4399 // Get oop_base components. 4400 if (pow2_offset == -1) { 4401 pow2_offset = get_oop_base_complement(Rbase, (uint64_t)(intptr_t)oop_base); 4402 } 4403 4404 // Subtract oop_base components and shift. 4405 if (Rdst == Z_R0 || Rsrc == Z_R0 || Rbase == Z_R0) { 4406 // Don't use lay instruction. 4407 if (Rdst == Rsrc) { 4408 z_algr(Rdst, Rbase); 4409 } else { 4410 lgr_if_needed(Rdst, Rbase); 4411 z_algr(Rdst, Rsrc); 4412 } 4413 if (pow2_offset != 0) add2reg(Rdst, pow2_offset); 4414 } else { 4415 add2reg_with_index(Rdst, pow2_offset, Rbase, Rsrc); 4416 } 4417 if (oop_shift != 0) { // Shift out alignment bits. 4418 z_srlg(Rdst, Rdst, oop_shift); 4419 } 4420 if (!only32bitValid) { 4421 z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again. 4422 } 4423 } 4424 #ifdef ASSERT 4425 if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb01bUL, 2); } 4426 if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb02bUL, 2); } 4427 #endif 4428 BLOCK_COMMENT("} cOop encoder general"); 4429 } 4430 4431 //------------------------------------------------- 4432 // decode compressed oop. Generally usable decoder. 4433 //------------------------------------------------- 4434 // Rsrc - contains compressed oop on entry. 4435 // Rdst - contains regular oop on exit. 4436 // Rdst and Rsrc may indicate same register. 4437 // Rdst must not be the same register as Rbase, if Rbase was preloaded (before call). 4438 // Rdst can be the same register as Rbase. Then, either Z_R0 or Z_R1 must be available as scratch. 4439 // Rbase - register to use for the base 4440 // pow2_offset - offset of base to nice value. If -1, base must be loaded. 4441 // For performance, it is good to 4442 // - avoid Z_R0 for any of the argument registers. 4443 // - keep Rdst and Rsrc distinct from Rbase. Rdst == Rsrc is ok for performance. 4444 // - avoid Z_R1 for Rdst if Rdst == Rbase. 4445 void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, Register Rbase, int pow2_offset) { 4446 4447 const address oop_base = Universe::narrow_oop_base(); 4448 const int oop_shift = Universe::narrow_oop_shift(); 4449 const bool disjoint = Universe::narrow_oop_base_disjoint(); 4450 4451 assert(UseCompressedOops, "must be on to call this method"); 4452 assert(Universe::heap() != NULL, "java heap must be initialized to call this decoder"); 4453 assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), 4454 "cOop encoder detected bad shift"); 4455 4456 // cOops are always loaded zero-extended from memory. No explicit zero-extension necessary. 4457 4458 if (oop_base != NULL) { 4459 unsigned int oop_base_hl = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xffff; 4460 unsigned int oop_base_hh = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 48)) & 0xffff; 4461 unsigned int oop_base_hf = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xFFFFffff; 4462 if (disjoint && (oop_base_hl == 0 || oop_base_hh == 0)) { 4463 BLOCK_COMMENT("cOop decoder disjointBase {"); 4464 // We do not need to load the base. Instead, we can install the upper bits 4465 // with an OR instead of an ADD. 4466 Label done; 4467 4468 // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set. 4469 if (maybeNULL) { // NULL ptr must be preserved! 4470 z_slag(Rdst, Rsrc, oop_shift); // Arithmetic shift sets the condition code. 4471 z_bre(done); 4472 } else { 4473 z_sllg(Rdst, Rsrc, oop_shift); // Logical shift leaves condition code alone. 4474 } 4475 if ((oop_base_hl != 0) && (oop_base_hh != 0)) { 4476 z_oihf(Rdst, oop_base_hf); 4477 } else if (oop_base_hl != 0) { 4478 z_oihl(Rdst, oop_base_hl); 4479 } else { 4480 assert(oop_base_hh != 0, "not heapbased mode"); 4481 z_oihh(Rdst, oop_base_hh); 4482 } 4483 bind(done); 4484 BLOCK_COMMENT("} cOop decoder disjointBase"); 4485 } else { 4486 BLOCK_COMMENT("cOop decoder general {"); 4487 // There are three decode steps: 4488 // scale oop offset (shift left) 4489 // get base (in reg) and pow2_offset (constant) 4490 // add base, pow2_offset, and oop offset 4491 // The following register overlap situations may exist: 4492 // Rdst == Rsrc, Rbase any other 4493 // not a problem. Scaling in-place leaves Rbase undisturbed. 4494 // Loading Rbase does not impact the scaled offset. 4495 // Rdst == Rbase, Rsrc any other 4496 // scaling would destroy a possibly preloaded Rbase. Loading Rbase 4497 // would destroy the scaled offset. 4498 // Remedy: use Rdst_tmp if Rbase has been preloaded. 4499 // use Rbase_tmp if base has to be loaded. 4500 // Rsrc == Rbase, Rdst any other 4501 // Only possible without preloaded Rbase. 4502 // Loading Rbase does not destroy compressed oop because it was scaled into Rdst before. 4503 // Rsrc == Rbase, Rdst == Rbase 4504 // Only possible without preloaded Rbase. 4505 // Loading Rbase would destroy compressed oop. Scaling in-place is ok. 4506 // Remedy: use Rbase_tmp. 4507 // 4508 Label done; 4509 Register Rdst_tmp = Rdst; 4510 Register Rbase_tmp = Rbase; 4511 bool used_R0 = false; 4512 bool used_R1 = false; 4513 bool base_preloaded = pow2_offset >= 0; 4514 guarantee(!(base_preloaded && (Rsrc == Rbase)), "Register clash, check caller"); 4515 assert(oop_shift != 0, "room for optimization"); 4516 4517 // Check if we need to use scratch registers. 4518 if (Rdst == Rbase) { 4519 assert(!(((Rdst == Z_R0) && (Rsrc == Z_R1)) || ((Rdst == Z_R1) && (Rsrc == Z_R0))), "need a scratch reg"); 4520 if (Rdst != Rsrc) { 4521 if (base_preloaded) { Rdst_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; } 4522 else { Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; } 4523 } else { 4524 Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; 4525 } 4526 } 4527 if (base_preloaded) lgr_if_needed(Rbase_tmp, Rbase); 4528 4529 // Scale oop and check for NULL. 4530 // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set. 4531 if (maybeNULL) { // NULL ptr must be preserved! 4532 z_slag(Rdst_tmp, Rsrc, oop_shift); // Arithmetic shift sets the condition code. 4533 z_bre(done); 4534 } else { 4535 z_sllg(Rdst_tmp, Rsrc, oop_shift); // Logical shift leaves condition code alone. 4536 } 4537 4538 // Get oop_base components. 4539 if (!base_preloaded) { 4540 pow2_offset = get_oop_base(Rbase_tmp, (uint64_t)(intptr_t)oop_base); 4541 } 4542 4543 // Add up all components. 4544 if ((Rbase_tmp == Z_R0) || (Rdst_tmp == Z_R0)) { 4545 z_algr(Rdst_tmp, Rbase_tmp); 4546 if (pow2_offset != 0) { add2reg(Rdst_tmp, pow2_offset); } 4547 } else { 4548 add2reg_with_index(Rdst_tmp, pow2_offset, Rbase_tmp, Rdst_tmp); 4549 } 4550 4551 bind(done); 4552 lgr_if_needed(Rdst, Rdst_tmp); 4553 #ifdef ASSERT 4554 if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb03bUL, 2); } 4555 if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb04bUL, 2); } 4556 #endif 4557 BLOCK_COMMENT("} cOop decoder general"); 4558 } 4559 } else { 4560 BLOCK_COMMENT("cOop decoder zeroBase {"); 4561 if (oop_shift == 0) { 4562 lgr_if_needed(Rdst, Rsrc); 4563 } else { 4564 z_sllg(Rdst, Rsrc, oop_shift); 4565 } 4566 BLOCK_COMMENT("} cOop decoder zeroBase"); 4567 } 4568 } 4569 4570 void MacroAssembler::load_mirror(Register mirror, Register method) { 4571 mem2reg_opt(mirror, Address(method, Method::const_offset())); 4572 mem2reg_opt(mirror, Address(mirror, ConstMethod::constants_offset())); 4573 mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes())); 4574 mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset())); 4575 } 4576 4577 //--------------------------------------------------------------- 4578 //--- Operations on arrays. 4579 //--------------------------------------------------------------- 4580 4581 // Compiler ensures base is doubleword aligned and cnt is #doublewords. 4582 // Emitter does not KILL cnt and base arguments, since they need to be copied to 4583 // work registers anyway. 4584 // Actually, only r0, r1, and r5 are killed. 4585 unsigned int MacroAssembler::Clear_Array(Register cnt_arg, Register base_pointer_arg, Register src_addr, Register src_len) { 4586 // Src_addr is evenReg. 4587 // Src_len is odd_Reg. 4588 4589 int block_start = offset(); 4590 Register tmp_reg = src_len; // Holds target instr addr for EX. 4591 Register dst_len = Z_R1; // Holds dst len for MVCLE. 4592 Register dst_addr = Z_R0; // Holds dst addr for MVCLE. 4593 4594 Label doXC, doMVCLE, done; 4595 4596 BLOCK_COMMENT("Clear_Array {"); 4597 4598 // Check for zero len and convert to long. 4599 z_ltgfr(src_len, cnt_arg); // Remember casted value for doSTG case. 4600 z_bre(done); // Nothing to do if len == 0. 4601 4602 // Prefetch data to be cleared. 4603 if (VM_Version::has_Prefetch()) { 4604 z_pfd(0x02, 0, Z_R0, base_pointer_arg); 4605 z_pfd(0x02, 256, Z_R0, base_pointer_arg); 4606 } 4607 4608 z_sllg(dst_len, src_len, 3); // #bytes to clear. 4609 z_cghi(src_len, 32); // Check for len <= 256 bytes (<=32 DW). 4610 z_brnh(doXC); // If so, use executed XC to clear. 4611 4612 // MVCLE: initialize long arrays (general case). 4613 bind(doMVCLE); 4614 z_lgr(dst_addr, base_pointer_arg); 4615 clear_reg(src_len, true, false); // Src len of MVCLE is zero. 4616 4617 MacroAssembler::move_long_ext(dst_addr, src_addr, 0); 4618 z_bru(done); 4619 4620 // XC: initialize short arrays. 4621 Label XC_template; // Instr template, never exec directly! 4622 bind(XC_template); 4623 z_xc(0,0,base_pointer_arg,0,base_pointer_arg); 4624 4625 bind(doXC); 4626 add2reg(dst_len, -1); // Get #bytes-1 for EXECUTE. 4627 if (VM_Version::has_ExecuteExtensions()) { 4628 z_exrl(dst_len, XC_template); // Execute XC with var. len. 4629 } else { 4630 z_larl(tmp_reg, XC_template); 4631 z_ex(dst_len,0,Z_R0,tmp_reg); // Execute XC with var. len. 4632 } 4633 // z_bru(done); // fallthru 4634 4635 bind(done); 4636 4637 BLOCK_COMMENT("} Clear_Array"); 4638 4639 int block_end = offset(); 4640 return block_end - block_start; 4641 } 4642 4643 // Compiler ensures base is doubleword aligned and cnt is count of doublewords. 4644 // Emitter does not KILL any arguments nor work registers. 4645 // Emitter generates up to 16 XC instructions, depending on the array length. 4646 unsigned int MacroAssembler::Clear_Array_Const(long cnt, Register base) { 4647 int block_start = offset(); 4648 int off; 4649 int lineSize_Bytes = AllocatePrefetchStepSize; 4650 int lineSize_DW = AllocatePrefetchStepSize>>LogBytesPerWord; 4651 bool doPrefetch = VM_Version::has_Prefetch(); 4652 int XC_maxlen = 256; 4653 int numXCInstr = cnt > 0 ? (cnt*BytesPerWord-1)/XC_maxlen+1 : 0; 4654 4655 BLOCK_COMMENT("Clear_Array_Const {"); 4656 assert(cnt*BytesPerWord <= 4096, "ClearArrayConst can handle 4k only"); 4657 4658 // Do less prefetching for very short arrays. 4659 if (numXCInstr > 0) { 4660 // Prefetch only some cache lines, then begin clearing. 4661 if (doPrefetch) { 4662 if (cnt*BytesPerWord <= lineSize_Bytes/4) { // If less than 1/4 of a cache line to clear, 4663 z_pfd(0x02, 0, Z_R0, base); // prefetch just the first cache line. 4664 } else { 4665 assert(XC_maxlen == lineSize_Bytes, "ClearArrayConst needs 256B cache lines"); 4666 for (off = 0; (off < AllocatePrefetchLines) && (off <= numXCInstr); off ++) { 4667 z_pfd(0x02, off*lineSize_Bytes, Z_R0, base); 4668 } 4669 } 4670 } 4671 4672 for (off=0; off<(numXCInstr-1); off++) { 4673 z_xc(off*XC_maxlen, XC_maxlen-1, base, off*XC_maxlen, base); 4674 4675 // Prefetch some cache lines in advance. 4676 if (doPrefetch && (off <= numXCInstr-AllocatePrefetchLines)) { 4677 z_pfd(0x02, (off+AllocatePrefetchLines)*lineSize_Bytes, Z_R0, base); 4678 } 4679 } 4680 if (off*XC_maxlen < cnt*BytesPerWord) { 4681 z_xc(off*XC_maxlen, (cnt*BytesPerWord-off*XC_maxlen)-1, base, off*XC_maxlen, base); 4682 } 4683 } 4684 BLOCK_COMMENT("} Clear_Array_Const"); 4685 4686 int block_end = offset(); 4687 return block_end - block_start; 4688 } 4689 4690 // Compiler ensures base is doubleword aligned and cnt is #doublewords. 4691 // Emitter does not KILL cnt and base arguments, since they need to be copied to 4692 // work registers anyway. 4693 // Actually, only r0, r1, r4, and r5 (which are work registers) are killed. 4694 // 4695 // For very large arrays, exploit MVCLE H/W support. 4696 // MVCLE instruction automatically exploits H/W-optimized page mover. 4697 // - Bytes up to next page boundary are cleared with a series of XC to self. 4698 // - All full pages are cleared with the page mover H/W assist. 4699 // - Remaining bytes are again cleared by a series of XC to self. 4700 // 4701 unsigned int MacroAssembler::Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register src_addr, Register src_len) { 4702 // Src_addr is evenReg. 4703 // Src_len is odd_Reg. 4704 4705 int block_start = offset(); 4706 Register dst_len = Z_R1; // Holds dst len for MVCLE. 4707 Register dst_addr = Z_R0; // Holds dst addr for MVCLE. 4708 4709 BLOCK_COMMENT("Clear_Array_Const_Big {"); 4710 4711 // Get len to clear. 4712 load_const_optimized(dst_len, (long)cnt*8L); // in Bytes = #DW*8 4713 4714 // Prepare other args to MVCLE. 4715 z_lgr(dst_addr, base_pointer_arg); 4716 // Indicate unused result. 4717 (void) clear_reg(src_len, true, false); // Src len of MVCLE is zero. 4718 4719 // Clear. 4720 MacroAssembler::move_long_ext(dst_addr, src_addr, 0); 4721 BLOCK_COMMENT("} Clear_Array_Const_Big"); 4722 4723 int block_end = offset(); 4724 return block_end - block_start; 4725 } 4726 4727 // Allocator. 4728 unsigned int MacroAssembler::CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg, 4729 Register cnt_reg, 4730 Register tmp1_reg, Register tmp2_reg) { 4731 // Tmp1 is oddReg. 4732 // Tmp2 is evenReg. 4733 4734 int block_start = offset(); 4735 Label doMVC, doMVCLE, done, MVC_template; 4736 4737 BLOCK_COMMENT("CopyRawMemory_AlignedDisjoint {"); 4738 4739 // Check for zero len and convert to long. 4740 z_ltgfr(cnt_reg, cnt_reg); // Remember casted value for doSTG case. 4741 z_bre(done); // Nothing to do if len == 0. 4742 4743 z_sllg(Z_R1, cnt_reg, 3); // Dst len in bytes. calc early to have the result ready. 4744 4745 z_cghi(cnt_reg, 32); // Check for len <= 256 bytes (<=32 DW). 4746 z_brnh(doMVC); // If so, use executed MVC to clear. 4747 4748 bind(doMVCLE); // A lot of data (more than 256 bytes). 4749 // Prep dest reg pair. 4750 z_lgr(Z_R0, dst_reg); // dst addr 4751 // Dst len already in Z_R1. 4752 // Prep src reg pair. 4753 z_lgr(tmp2_reg, src_reg); // src addr 4754 z_lgr(tmp1_reg, Z_R1); // Src len same as dst len. 4755 4756 // Do the copy. 4757 move_long_ext(Z_R0, tmp2_reg, 0xb0); // Bypass cache. 4758 z_bru(done); // All done. 4759 4760 bind(MVC_template); // Just some data (not more than 256 bytes). 4761 z_mvc(0, 0, dst_reg, 0, src_reg); 4762 4763 bind(doMVC); 4764 4765 if (VM_Version::has_ExecuteExtensions()) { 4766 add2reg(Z_R1, -1); 4767 } else { 4768 add2reg(tmp1_reg, -1, Z_R1); 4769 z_larl(Z_R1, MVC_template); 4770 } 4771 4772 if (VM_Version::has_Prefetch()) { 4773 z_pfd(1, 0,Z_R0,src_reg); 4774 z_pfd(2, 0,Z_R0,dst_reg); 4775 // z_pfd(1,256,Z_R0,src_reg); // Assume very short copy. 4776 // z_pfd(2,256,Z_R0,dst_reg); 4777 } 4778 4779 if (VM_Version::has_ExecuteExtensions()) { 4780 z_exrl(Z_R1, MVC_template); 4781 } else { 4782 z_ex(tmp1_reg, 0, Z_R0, Z_R1); 4783 } 4784 4785 bind(done); 4786 4787 BLOCK_COMMENT("} CopyRawMemory_AlignedDisjoint"); 4788 4789 int block_end = offset(); 4790 return block_end - block_start; 4791 } 4792 4793 //------------------------------------------------------ 4794 // Special String Intrinsics. Implementation 4795 //------------------------------------------------------ 4796 4797 // Intrinsics for CompactStrings 4798 4799 // Compress char[] to byte[]. odd_reg contains cnt. Kills dst. Early clobber: result 4800 // The result is the number of characters copied before the first incompatible character was found. 4801 // If tmp2 is provided and the compression fails, the compression stops exactly at this point and the result is precise. 4802 // 4803 // Note: Does not behave exactly like package private StringUTF16 compress java implementation in case of failure: 4804 // - Different number of characters may have been written to dead array (if tmp2 not provided). 4805 // - Returns a number <cnt instead of 0. (Result gets compared with cnt.) 4806 unsigned int MacroAssembler::string_compress(Register result, Register src, Register dst, Register odd_reg, 4807 Register even_reg, Register tmp, Register tmp2) { 4808 int block_start = offset(); 4809 Label Lloop1, Lloop2, Lslow, Ldone; 4810 const Register addr2 = dst, ind1 = result, mask = tmp; 4811 const bool precise = (tmp2 != noreg); 4812 4813 BLOCK_COMMENT("string_compress {"); 4814 4815 z_sll(odd_reg, 1); // Number of bytes to read. (Must be a positive simm32.) 4816 clear_reg(ind1); // Index to read. 4817 z_llilf(mask, 0xFF00FF00); 4818 z_ahi(odd_reg, -16); // Last possible index for fast loop. 4819 z_brl(Lslow); 4820 4821 // ind1: index, even_reg: index increment, odd_reg: index limit 4822 z_iihf(mask, 0xFF00FF00); 4823 z_lhi(even_reg, 16); 4824 4825 bind(Lloop1); // 8 Characters per iteration. 4826 z_lg(Z_R0, Address(src, ind1)); 4827 z_lg(Z_R1, Address(src, ind1, 8)); 4828 if (precise) { 4829 if (VM_Version::has_DistinctOpnds()) { 4830 z_ogrk(tmp2, Z_R0, Z_R1); 4831 } else { 4832 z_lgr(tmp2, Z_R0); 4833 z_ogr(tmp2, Z_R1); 4834 } 4835 z_ngr(tmp2, mask); 4836 z_brne(Lslow); // Failed fast case, retry slowly. 4837 } 4838 z_stcmh(Z_R0, 5, 0, addr2); 4839 z_stcm(Z_R0, 5, 2, addr2); 4840 if (!precise) { z_ogr(Z_R0, Z_R1); } 4841 z_stcmh(Z_R1, 5, 4, addr2); 4842 z_stcm(Z_R1, 5, 6, addr2); 4843 if (!precise) { 4844 z_ngr(Z_R0, mask); 4845 z_brne(Ldone); // Failed (more than needed was written). 4846 } 4847 z_aghi(addr2, 8); 4848 z_brxle(ind1, even_reg, Lloop1); 4849 4850 bind(Lslow); 4851 // Compute index limit and skip if negative. 4852 z_ahi(odd_reg, 16-2); // Last possible index for slow loop. 4853 z_lhi(even_reg, 2); 4854 z_cr(ind1, odd_reg); 4855 z_brh(Ldone); 4856 4857 bind(Lloop2); // 1 Character per iteration. 4858 z_llh(Z_R0, Address(src, ind1)); 4859 z_tmll(Z_R0, 0xFF00); 4860 z_brnaz(Ldone); // Failed slow case: Return number of written characters. 4861 z_stc(Z_R0, Address(addr2)); 4862 z_aghi(addr2, 1); 4863 z_brxle(ind1, even_reg, Lloop2); 4864 4865 bind(Ldone); // result = ind1 = 2*cnt 4866 z_srl(ind1, 1); 4867 4868 BLOCK_COMMENT("} string_compress"); 4869 4870 return offset() - block_start; 4871 } 4872 4873 // Inflate byte[] to char[]. 4874 unsigned int MacroAssembler::string_inflate_trot(Register src, Register dst, Register cnt, Register tmp) { 4875 int block_start = offset(); 4876 4877 BLOCK_COMMENT("string_inflate {"); 4878 4879 Register stop_char = Z_R0; 4880 Register table = Z_R1; 4881 Register src_addr = tmp; 4882 4883 assert_different_registers(Z_R0, Z_R1, tmp, src, dst, cnt); 4884 assert(dst->encoding()%2 == 0, "must be even reg"); 4885 assert(cnt->encoding()%2 == 1, "must be odd reg"); 4886 assert(cnt->encoding() - dst->encoding() == 1, "must be even/odd pair"); 4887 4888 StubRoutines::zarch::generate_load_trot_table_addr(this, table); // kills Z_R0 (if ASSERT) 4889 clear_reg(stop_char); // Stop character. Not used here, but initialized to have a defined value. 4890 lgr_if_needed(src_addr, src); 4891 z_llgfr(cnt, cnt); // # src characters, must be a positive simm32. 4892 4893 translate_ot(dst, src_addr, /* mask = */ 0x0001); 4894 4895 BLOCK_COMMENT("} string_inflate"); 4896 4897 return offset() - block_start; 4898 } 4899 4900 // Inflate byte[] to char[]. odd_reg contains cnt. Kills src. 4901 unsigned int MacroAssembler::string_inflate(Register src, Register dst, Register odd_reg, 4902 Register even_reg, Register tmp) { 4903 int block_start = offset(); 4904 4905 BLOCK_COMMENT("string_inflate {"); 4906 4907 Label Lloop1, Lloop2, Lslow, Ldone; 4908 const Register addr1 = src, ind2 = tmp; 4909 4910 z_sll(odd_reg, 1); // Number of bytes to write. (Must be a positive simm32.) 4911 clear_reg(ind2); // Index to write. 4912 z_ahi(odd_reg, -16); // Last possible index for fast loop. 4913 z_brl(Lslow); 4914 4915 // ind2: index, even_reg: index increment, odd_reg: index limit 4916 clear_reg(Z_R0); 4917 clear_reg(Z_R1); 4918 z_lhi(even_reg, 16); 4919 4920 bind(Lloop1); // 8 Characters per iteration. 4921 z_icmh(Z_R0, 5, 0, addr1); 4922 z_icmh(Z_R1, 5, 4, addr1); 4923 z_icm(Z_R0, 5, 2, addr1); 4924 z_icm(Z_R1, 5, 6, addr1); 4925 z_aghi(addr1, 8); 4926 z_stg(Z_R0, Address(dst, ind2)); 4927 z_stg(Z_R1, Address(dst, ind2, 8)); 4928 z_brxle(ind2, even_reg, Lloop1); 4929 4930 bind(Lslow); 4931 // Compute index limit and skip if negative. 4932 z_ahi(odd_reg, 16-2); // Last possible index for slow loop. 4933 z_lhi(even_reg, 2); 4934 z_cr(ind2, odd_reg); 4935 z_brh(Ldone); 4936 4937 bind(Lloop2); // 1 Character per iteration. 4938 z_llc(Z_R0, Address(addr1)); 4939 z_sth(Z_R0, Address(dst, ind2)); 4940 z_aghi(addr1, 1); 4941 z_brxle(ind2, even_reg, Lloop2); 4942 4943 bind(Ldone); 4944 4945 BLOCK_COMMENT("} string_inflate"); 4946 4947 return offset() - block_start; 4948 } 4949 4950 // Kills src. 4951 unsigned int MacroAssembler::has_negatives(Register result, Register src, Register cnt, 4952 Register odd_reg, Register even_reg, Register tmp) { 4953 int block_start = offset(); 4954 Label Lloop1, Lloop2, Lslow, Lnotfound, Ldone; 4955 const Register addr = src, mask = tmp; 4956 4957 BLOCK_COMMENT("has_negatives {"); 4958 4959 z_llgfr(Z_R1, cnt); // Number of bytes to read. (Must be a positive simm32.) 4960 z_llilf(mask, 0x80808080); 4961 z_lhi(result, 1); // Assume true. 4962 // Last possible addr for fast loop. 4963 z_lay(odd_reg, -16, Z_R1, src); 4964 z_chi(cnt, 16); 4965 z_brl(Lslow); 4966 4967 // ind1: index, even_reg: index increment, odd_reg: index limit 4968 z_iihf(mask, 0x80808080); 4969 z_lghi(even_reg, 16); 4970 4971 bind(Lloop1); // 16 bytes per iteration. 4972 z_lg(Z_R0, Address(addr)); 4973 z_lg(Z_R1, Address(addr, 8)); 4974 z_ogr(Z_R0, Z_R1); 4975 z_ngr(Z_R0, mask); 4976 z_brne(Ldone); // If found return 1. 4977 z_brxlg(addr, even_reg, Lloop1); 4978 4979 bind(Lslow); 4980 z_aghi(odd_reg, 16-1); // Last possible addr for slow loop. 4981 z_lghi(even_reg, 1); 4982 z_cgr(addr, odd_reg); 4983 z_brh(Lnotfound); 4984 4985 bind(Lloop2); // 1 byte per iteration. 4986 z_cli(Address(addr), 0x80); 4987 z_brnl(Ldone); // If found return 1. 4988 z_brxlg(addr, even_reg, Lloop2); 4989 4990 bind(Lnotfound); 4991 z_lhi(result, 0); 4992 4993 bind(Ldone); 4994 4995 BLOCK_COMMENT("} has_negatives"); 4996 4997 return offset() - block_start; 4998 } 4999 5000 // kill: cnt1, cnt2, odd_reg, even_reg; early clobber: result 5001 unsigned int MacroAssembler::string_compare(Register str1, Register str2, 5002 Register cnt1, Register cnt2, 5003 Register odd_reg, Register even_reg, Register result, int ae) { 5004 int block_start = offset(); 5005 5006 assert_different_registers(str1, cnt1, cnt2, odd_reg, even_reg, result); 5007 assert_different_registers(str2, cnt1, cnt2, odd_reg, even_reg, result); 5008 5009 // If strings are equal up to min length, return the length difference. 5010 const Register diff = result, // Pre-set result with length difference. 5011 min = cnt1, // min number of bytes 5012 tmp = cnt2; 5013 5014 // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a) 5015 // we interchange str1 and str2 in the UL case and negate the result. 5016 // Like this, str1 is always latin1 encoded, except for the UU case. 5017 // In addition, we need 0 (or sign which is 0) extend when using 64 bit register. 5018 const bool used_as_LU = (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL); 5019 5020 BLOCK_COMMENT("string_compare {"); 5021 5022 if (used_as_LU) { 5023 z_srl(cnt2, 1); 5024 } 5025 5026 // See if the lengths are different, and calculate min in cnt1. 5027 // Save diff in case we need it for a tie-breaker. 5028 5029 // diff = cnt1 - cnt2 5030 if (VM_Version::has_DistinctOpnds()) { 5031 z_srk(diff, cnt1, cnt2); 5032 } else { 5033 z_lr(diff, cnt1); 5034 z_sr(diff, cnt2); 5035 } 5036 if (str1 != str2) { 5037 if (VM_Version::has_LoadStoreConditional()) { 5038 z_locr(min, cnt2, Assembler::bcondHigh); 5039 } else { 5040 Label Lskip; 5041 z_brl(Lskip); // min ok if cnt1 < cnt2 5042 z_lr(min, cnt2); // min = cnt2 5043 bind(Lskip); 5044 } 5045 } 5046 5047 if (ae == StrIntrinsicNode::UU) { 5048 z_sra(diff, 1); 5049 } 5050 if (str1 != str2) { 5051 Label Ldone; 5052 if (used_as_LU) { 5053 // Loop which searches the first difference character by character. 5054 Label Lloop; 5055 const Register ind1 = Z_R1, 5056 ind2 = min; 5057 int stride1 = 1, stride2 = 2; // See comment above. 5058 5059 // ind1: index, even_reg: index increment, odd_reg: index limit 5060 z_llilf(ind1, (unsigned int)(-stride1)); 5061 z_lhi(even_reg, stride1); 5062 add2reg(odd_reg, -stride1, min); 5063 clear_reg(ind2); // kills min 5064 5065 bind(Lloop); 5066 z_brxh(ind1, even_reg, Ldone); 5067 z_llc(tmp, Address(str1, ind1)); 5068 z_llh(Z_R0, Address(str2, ind2)); 5069 z_ahi(ind2, stride2); 5070 z_sr(tmp, Z_R0); 5071 z_bre(Lloop); 5072 5073 z_lr(result, tmp); 5074 5075 } else { 5076 // Use clcle in fast loop (only for same encoding). 5077 z_lgr(Z_R0, str1); 5078 z_lgr(even_reg, str2); 5079 z_llgfr(Z_R1, min); 5080 z_llgfr(odd_reg, min); 5081 5082 if (ae == StrIntrinsicNode::LL) { 5083 compare_long_ext(Z_R0, even_reg, 0); 5084 } else { 5085 compare_long_uni(Z_R0, even_reg, 0); 5086 } 5087 z_bre(Ldone); 5088 z_lgr(Z_R1, Z_R0); 5089 if (ae == StrIntrinsicNode::LL) { 5090 z_llc(Z_R0, Address(even_reg)); 5091 z_llc(result, Address(Z_R1)); 5092 } else { 5093 z_llh(Z_R0, Address(even_reg)); 5094 z_llh(result, Address(Z_R1)); 5095 } 5096 z_sr(result, Z_R0); 5097 } 5098 5099 // Otherwise, return the difference between the first mismatched chars. 5100 bind(Ldone); 5101 } 5102 5103 if (ae == StrIntrinsicNode::UL) { 5104 z_lcr(result, result); // Negate result (see note above). 5105 } 5106 5107 BLOCK_COMMENT("} string_compare"); 5108 5109 return offset() - block_start; 5110 } 5111 5112 unsigned int MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, Register limit, 5113 Register odd_reg, Register even_reg, Register result, bool is_byte) { 5114 int block_start = offset(); 5115 5116 BLOCK_COMMENT("array_equals {"); 5117 5118 assert_different_registers(ary1, limit, odd_reg, even_reg); 5119 assert_different_registers(ary2, limit, odd_reg, even_reg); 5120 5121 Label Ldone, Ldone_true, Ldone_false, Lclcle, CLC_template; 5122 int base_offset = 0; 5123 5124 if (ary1 != ary2) { 5125 if (is_array_equ) { 5126 base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR); 5127 5128 // Return true if the same array. 5129 compareU64_and_branch(ary1, ary2, Assembler::bcondEqual, Ldone_true); 5130 5131 // Return false if one of them is NULL. 5132 compareU64_and_branch(ary1, (intptr_t)0, Assembler::bcondEqual, Ldone_false); 5133 compareU64_and_branch(ary2, (intptr_t)0, Assembler::bcondEqual, Ldone_false); 5134 5135 // Load the lengths of arrays. 5136 z_llgf(odd_reg, Address(ary1, arrayOopDesc::length_offset_in_bytes())); 5137 5138 // Return false if the two arrays are not equal length. 5139 z_c(odd_reg, Address(ary2, arrayOopDesc::length_offset_in_bytes())); 5140 z_brne(Ldone_false); 5141 5142 // string len in bytes (right operand) 5143 if (!is_byte) { 5144 z_chi(odd_reg, 128); 5145 z_sll(odd_reg, 1); // preserves flags 5146 z_brh(Lclcle); 5147 } else { 5148 compareU32_and_branch(odd_reg, (intptr_t)256, Assembler::bcondHigh, Lclcle); 5149 } 5150 } else { 5151 z_llgfr(odd_reg, limit); // Need to zero-extend prior to using the value. 5152 compareU32_and_branch(limit, (intptr_t)256, Assembler::bcondHigh, Lclcle); 5153 } 5154 5155 5156 // Use clc instruction for up to 256 bytes. 5157 { 5158 Register str1_reg = ary1, 5159 str2_reg = ary2; 5160 if (is_array_equ) { 5161 str1_reg = Z_R1; 5162 str2_reg = even_reg; 5163 add2reg(str1_reg, base_offset, ary1); // string addr (left operand) 5164 add2reg(str2_reg, base_offset, ary2); // string addr (right operand) 5165 } 5166 z_ahi(odd_reg, -1); // Clc uses decremented limit. Also compare result to 0. 5167 z_brl(Ldone_true); 5168 // Note: We could jump to the template if equal. 5169 5170 assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware"); 5171 z_exrl(odd_reg, CLC_template); 5172 z_bre(Ldone_true); 5173 // fall through 5174 5175 bind(Ldone_false); 5176 clear_reg(result); 5177 z_bru(Ldone); 5178 5179 bind(CLC_template); 5180 z_clc(0, 0, str1_reg, 0, str2_reg); 5181 } 5182 5183 // Use clcle instruction. 5184 { 5185 bind(Lclcle); 5186 add2reg(even_reg, base_offset, ary2); // string addr (right operand) 5187 add2reg(Z_R0, base_offset, ary1); // string addr (left operand) 5188 5189 z_lgr(Z_R1, odd_reg); // string len in bytes (left operand) 5190 if (is_byte) { 5191 compare_long_ext(Z_R0, even_reg, 0); 5192 } else { 5193 compare_long_uni(Z_R0, even_reg, 0); 5194 } 5195 z_lghi(result, 0); // Preserve flags. 5196 z_brne(Ldone); 5197 } 5198 } 5199 // fall through 5200 5201 bind(Ldone_true); 5202 z_lghi(result, 1); // All characters are equal. 5203 bind(Ldone); 5204 5205 BLOCK_COMMENT("} array_equals"); 5206 5207 return offset() - block_start; 5208 } 5209 5210 // kill: haycnt, needlecnt, odd_reg, even_reg; early clobber: result 5211 unsigned int MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt, 5212 Register needle, Register needlecnt, int needlecntval, 5213 Register odd_reg, Register even_reg, int ae) { 5214 int block_start = offset(); 5215 5216 // Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite! 5217 assert(ae != StrIntrinsicNode::LU, "Invalid encoding"); 5218 const int h_csize = (ae == StrIntrinsicNode::LL) ? 1 : 2; 5219 const int n_csize = (ae == StrIntrinsicNode::UU) ? 2 : 1; 5220 Label L_needle1, L_Found, L_NotFound; 5221 5222 BLOCK_COMMENT("string_indexof {"); 5223 5224 if (needle == haystack) { 5225 z_lhi(result, 0); 5226 } else { 5227 5228 // Load first character of needle (R0 used by search_string instructions). 5229 if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); } 5230 5231 // Compute last haystack addr to use if no match gets found. 5232 if (needlecnt != noreg) { // variable needlecnt 5233 z_ahi(needlecnt, -1); // Remaining characters after first one. 5234 z_sr(haycnt, needlecnt); // Compute index succeeding last element to compare. 5235 if (n_csize == 2) { z_sll(needlecnt, 1); } // In bytes. 5236 } else { // constant needlecnt 5237 assert((needlecntval & 0x7fff) == needlecntval, "must be positive simm16 immediate"); 5238 // Compute index succeeding last element to compare. 5239 if (needlecntval != 1) { z_ahi(haycnt, 1 - needlecntval); } 5240 } 5241 5242 z_llgfr(haycnt, haycnt); // Clear high half. 5243 z_lgr(result, haystack); // Final result will be computed from needle start pointer. 5244 if (h_csize == 2) { z_sll(haycnt, 1); } // Scale to number of bytes. 5245 z_agr(haycnt, haystack); // Point to address succeeding last element (haystack+scale*(haycnt-needlecnt+1)). 5246 5247 if (h_csize != n_csize) { 5248 assert(ae == StrIntrinsicNode::UL, "Invalid encoding"); 5249 5250 if (needlecnt != noreg || needlecntval != 1) { 5251 if (needlecnt != noreg) { 5252 compare32_and_branch(needlecnt, (intptr_t)0, Assembler::bcondEqual, L_needle1); 5253 } 5254 5255 // Main Loop: UL version (now we have at least 2 characters). 5256 Label L_OuterLoop, L_InnerLoop, L_Skip; 5257 bind(L_OuterLoop); // Search for 1st 2 characters. 5258 z_lgr(Z_R1, haycnt); 5259 MacroAssembler::search_string_uni(Z_R1, result); 5260 z_brc(Assembler::bcondNotFound, L_NotFound); 5261 z_lgr(result, Z_R1); 5262 5263 z_lghi(Z_R1, n_csize); 5264 z_lghi(even_reg, h_csize); 5265 bind(L_InnerLoop); 5266 z_llgc(odd_reg, Address(needle, Z_R1)); 5267 z_ch(odd_reg, Address(result, even_reg)); 5268 z_brne(L_Skip); 5269 if (needlecnt != noreg) { z_cr(Z_R1, needlecnt); } else { z_chi(Z_R1, needlecntval - 1); } 5270 z_brnl(L_Found); 5271 z_aghi(Z_R1, n_csize); 5272 z_aghi(even_reg, h_csize); 5273 z_bru(L_InnerLoop); 5274 5275 bind(L_Skip); 5276 z_aghi(result, h_csize); // This is the new address we want to use for comparing. 5277 z_bru(L_OuterLoop); 5278 } 5279 5280 } else { 5281 const intptr_t needle_bytes = (n_csize == 2) ? ((needlecntval - 1) << 1) : (needlecntval - 1); 5282 Label L_clcle; 5283 5284 if (needlecnt != noreg || (needlecntval != 1 && needle_bytes <= 256)) { 5285 if (needlecnt != noreg) { 5286 compare32_and_branch(needlecnt, 256, Assembler::bcondHigh, L_clcle); 5287 z_ahi(needlecnt, -1); // remaining bytes -1 (for CLC) 5288 z_brl(L_needle1); 5289 } 5290 5291 // Main Loop: clc version (now we have at least 2 characters). 5292 Label L_OuterLoop, CLC_template; 5293 bind(L_OuterLoop); // Search for 1st 2 characters. 5294 z_lgr(Z_R1, haycnt); 5295 if (h_csize == 1) { 5296 MacroAssembler::search_string(Z_R1, result); 5297 } else { 5298 MacroAssembler::search_string_uni(Z_R1, result); 5299 } 5300 z_brc(Assembler::bcondNotFound, L_NotFound); 5301 z_lgr(result, Z_R1); 5302 5303 if (needlecnt != noreg) { 5304 assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware"); 5305 z_exrl(needlecnt, CLC_template); 5306 } else { 5307 z_clc(h_csize, needle_bytes -1, Z_R1, n_csize, needle); 5308 } 5309 z_bre(L_Found); 5310 z_aghi(result, h_csize); // This is the new address we want to use for comparing. 5311 z_bru(L_OuterLoop); 5312 5313 if (needlecnt != noreg) { 5314 bind(CLC_template); 5315 z_clc(h_csize, 0, Z_R1, n_csize, needle); 5316 } 5317 } 5318 5319 if (needlecnt != noreg || needle_bytes > 256) { 5320 bind(L_clcle); 5321 5322 // Main Loop: clcle version (now we have at least 256 bytes). 5323 Label L_OuterLoop, CLC_template; 5324 bind(L_OuterLoop); // Search for 1st 2 characters. 5325 z_lgr(Z_R1, haycnt); 5326 if (h_csize == 1) { 5327 MacroAssembler::search_string(Z_R1, result); 5328 } else { 5329 MacroAssembler::search_string_uni(Z_R1, result); 5330 } 5331 z_brc(Assembler::bcondNotFound, L_NotFound); 5332 5333 add2reg(Z_R0, n_csize, needle); 5334 add2reg(even_reg, h_csize, Z_R1); 5335 z_lgr(result, Z_R1); 5336 if (needlecnt != noreg) { 5337 z_llgfr(Z_R1, needlecnt); // needle len in bytes (left operand) 5338 z_llgfr(odd_reg, needlecnt); 5339 } else { 5340 load_const_optimized(Z_R1, needle_bytes); 5341 if (Immediate::is_simm16(needle_bytes)) { z_lghi(odd_reg, needle_bytes); } else { z_lgr(odd_reg, Z_R1); } 5342 } 5343 if (h_csize == 1) { 5344 compare_long_ext(Z_R0, even_reg, 0); 5345 } else { 5346 compare_long_uni(Z_R0, even_reg, 0); 5347 } 5348 z_bre(L_Found); 5349 5350 if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); } // Reload. 5351 z_aghi(result, h_csize); // This is the new address we want to use for comparing. 5352 z_bru(L_OuterLoop); 5353 } 5354 } 5355 5356 if (needlecnt != noreg || needlecntval == 1) { 5357 bind(L_needle1); 5358 5359 // Single needle character version. 5360 if (h_csize == 1) { 5361 MacroAssembler::search_string(haycnt, result); 5362 } else { 5363 MacroAssembler::search_string_uni(haycnt, result); 5364 } 5365 z_lgr(result, haycnt); 5366 z_brc(Assembler::bcondFound, L_Found); 5367 } 5368 5369 bind(L_NotFound); 5370 add2reg(result, -1, haystack); // Return -1. 5371 5372 bind(L_Found); // Return index (or -1 in fallthrough case). 5373 z_sgr(result, haystack); 5374 if (h_csize == 2) { z_srag(result, result, exact_log2(sizeof(jchar))); } 5375 } 5376 BLOCK_COMMENT("} string_indexof"); 5377 5378 return offset() - block_start; 5379 } 5380 5381 // early clobber: result 5382 unsigned int MacroAssembler::string_indexof_char(Register result, Register haystack, Register haycnt, 5383 Register needle, jchar needleChar, Register odd_reg, Register even_reg, bool is_byte) { 5384 int block_start = offset(); 5385 5386 BLOCK_COMMENT("string_indexof_char {"); 5387 5388 if (needle == haystack) { 5389 z_lhi(result, 0); 5390 } else { 5391 5392 Label Ldone; 5393 5394 z_llgfr(odd_reg, haycnt); // Preset loop ctr/searchrange end. 5395 if (needle == noreg) { 5396 load_const_optimized(Z_R0, (unsigned long)needleChar); 5397 } else { 5398 if (is_byte) { 5399 z_llgcr(Z_R0, needle); // First (and only) needle char. 5400 } else { 5401 z_llghr(Z_R0, needle); // First (and only) needle char. 5402 } 5403 } 5404 5405 if (!is_byte) { 5406 z_agr(odd_reg, odd_reg); // Calc #bytes to be processed with SRSTU. 5407 } 5408 5409 z_lgr(even_reg, haystack); // haystack addr 5410 z_agr(odd_reg, haystack); // First char after range end. 5411 z_lghi(result, -1); 5412 5413 if (is_byte) { 5414 MacroAssembler::search_string(odd_reg, even_reg); 5415 } else { 5416 MacroAssembler::search_string_uni(odd_reg, even_reg); 5417 } 5418 z_brc(Assembler::bcondNotFound, Ldone); 5419 if (is_byte) { 5420 if (VM_Version::has_DistinctOpnds()) { 5421 z_sgrk(result, odd_reg, haystack); 5422 } else { 5423 z_sgr(odd_reg, haystack); 5424 z_lgr(result, odd_reg); 5425 } 5426 } else { 5427 z_slgr(odd_reg, haystack); 5428 z_srlg(result, odd_reg, exact_log2(sizeof(jchar))); 5429 } 5430 5431 bind(Ldone); 5432 } 5433 BLOCK_COMMENT("} string_indexof_char"); 5434 5435 return offset() - block_start; 5436 } 5437 5438 5439 //------------------------------------------------- 5440 // Constants (scalar and oop) in constant pool 5441 //------------------------------------------------- 5442 5443 // Add a non-relocated constant to the CP. 5444 int MacroAssembler::store_const_in_toc(AddressLiteral& val) { 5445 long value = val.value(); 5446 address tocPos = long_constant(value); 5447 5448 if (tocPos != NULL) { 5449 int tocOffset = (int)(tocPos - code()->consts()->start()); 5450 return tocOffset; 5451 } 5452 // Address_constant returned NULL, so no constant entry has been created. 5453 // In that case, we return a "fatal" offset, just in case that subsequently 5454 // generated access code is executed. 5455 return -1; 5456 } 5457 5458 // Returns the TOC offset where the address is stored. 5459 // Add a relocated constant to the CP. 5460 int MacroAssembler::store_oop_in_toc(AddressLiteral& oop) { 5461 // Use RelocationHolder::none for the constant pool entry. 5462 // Otherwise we will end up with a failing NativeCall::verify(x), 5463 // where x is the address of the constant pool entry. 5464 address tocPos = address_constant((address)oop.value(), RelocationHolder::none); 5465 5466 if (tocPos != NULL) { 5467 int tocOffset = (int)(tocPos - code()->consts()->start()); 5468 RelocationHolder rsp = oop.rspec(); 5469 Relocation *rel = rsp.reloc(); 5470 5471 // Store toc_offset in relocation, used by call_far_patchable. 5472 if ((relocInfo::relocType)rel->type() == relocInfo::runtime_call_w_cp_type) { 5473 ((runtime_call_w_cp_Relocation *)(rel))->set_constant_pool_offset(tocOffset); 5474 } 5475 // Relocate at the load's pc. 5476 relocate(rsp); 5477 5478 return tocOffset; 5479 } 5480 // Address_constant returned NULL, so no constant entry has been created 5481 // in that case, we return a "fatal" offset, just in case that subsequently 5482 // generated access code is executed. 5483 return -1; 5484 } 5485 5486 bool MacroAssembler::load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc) { 5487 int tocOffset = store_const_in_toc(a); 5488 if (tocOffset == -1) return false; 5489 address tocPos = tocOffset + code()->consts()->start(); 5490 assert((address)code()->consts()->start() != NULL, "Please add CP address"); 5491 5492 load_long_pcrelative(dst, tocPos); 5493 return true; 5494 } 5495 5496 bool MacroAssembler::load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc) { 5497 int tocOffset = store_oop_in_toc(a); 5498 if (tocOffset == -1) return false; 5499 address tocPos = tocOffset + code()->consts()->start(); 5500 assert((address)code()->consts()->start() != NULL, "Please add CP address"); 5501 5502 load_addr_pcrelative(dst, tocPos); 5503 return true; 5504 } 5505 5506 // If the instruction sequence at the given pc is a load_const_from_toc 5507 // sequence, return the value currently stored at the referenced position 5508 // in the TOC. 5509 intptr_t MacroAssembler::get_const_from_toc(address pc) { 5510 5511 assert(is_load_const_from_toc(pc), "must be load_const_from_pool"); 5512 5513 long offset = get_load_const_from_toc_offset(pc); 5514 address dataLoc = NULL; 5515 if (is_load_const_from_toc_pcrelative(pc)) { 5516 dataLoc = pc + offset; 5517 } else { 5518 CodeBlob* cb = CodeCache::find_blob_unsafe(pc); // Else we get assertion if nmethod is zombie. 5519 assert(cb && cb->is_nmethod(), "sanity"); 5520 nmethod* nm = (nmethod*)cb; 5521 dataLoc = nm->ctable_begin() + offset; 5522 } 5523 return *(intptr_t *)dataLoc; 5524 } 5525 5526 // If the instruction sequence at the given pc is a load_const_from_toc 5527 // sequence, copy the passed-in new_data value into the referenced 5528 // position in the TOC. 5529 void MacroAssembler::set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb) { 5530 assert(is_load_const_from_toc(pc), "must be load_const_from_pool"); 5531 5532 long offset = MacroAssembler::get_load_const_from_toc_offset(pc); 5533 address dataLoc = NULL; 5534 if (is_load_const_from_toc_pcrelative(pc)) { 5535 dataLoc = pc+offset; 5536 } else { 5537 nmethod* nm = CodeCache::find_nmethod(pc); 5538 assert((cb == NULL) || (nm == (nmethod*)cb), "instruction address should be in CodeBlob"); 5539 dataLoc = nm->ctable_begin() + offset; 5540 } 5541 if (*(unsigned long *)dataLoc != new_data) { // Prevent cache invalidation: update only if necessary. 5542 *(unsigned long *)dataLoc = new_data; 5543 } 5544 } 5545 5546 // Dynamic TOC. Getter must only be called if "a" is a load_const_from_toc 5547 // site. Verify by calling is_load_const_from_toc() before!! 5548 // Offset is +/- 2**32 -> use long. 5549 long MacroAssembler::get_load_const_from_toc_offset(address a) { 5550 assert(is_load_const_from_toc_pcrelative(a), "expected pc relative load"); 5551 // expected code sequence: 5552 // z_lgrl(t, simm32); len = 6 5553 unsigned long inst; 5554 unsigned int len = get_instruction(a, &inst); 5555 return get_pcrel_offset(inst); 5556 } 5557 5558 //********************************************************************************** 5559 // inspection of generated instruction sequences for a particular pattern 5560 //********************************************************************************** 5561 5562 bool MacroAssembler::is_load_const_from_toc_pcrelative(address a) { 5563 #ifdef ASSERT 5564 unsigned long inst; 5565 unsigned int len = get_instruction(a+2, &inst); 5566 if ((len == 6) && is_load_pcrelative_long(a) && is_call_pcrelative_long(inst)) { 5567 const int range = 128; 5568 Assembler::dump_code_range(tty, a, range, "instr(a) == z_lgrl && instr(a+2) == z_brasl"); 5569 VM_Version::z_SIGSEGV(); 5570 } 5571 #endif 5572 // expected code sequence: 5573 // z_lgrl(t, relAddr32); len = 6 5574 //TODO: verify accessed data is in CP, if possible. 5575 return is_load_pcrelative_long(a); // TODO: might be too general. Currently, only lgrl is used. 5576 } 5577 5578 bool MacroAssembler::is_load_const_from_toc_call(address a) { 5579 return is_load_const_from_toc(a) && is_call_byregister(a + load_const_from_toc_size()); 5580 } 5581 5582 bool MacroAssembler::is_load_const_call(address a) { 5583 return is_load_const(a) && is_call_byregister(a + load_const_size()); 5584 } 5585 5586 //------------------------------------------------- 5587 // Emitters for some really CICS instructions 5588 //------------------------------------------------- 5589 5590 void MacroAssembler::move_long_ext(Register dst, Register src, unsigned int pad) { 5591 assert(dst->encoding()%2==0, "must be an even/odd register pair"); 5592 assert(src->encoding()%2==0, "must be an even/odd register pair"); 5593 assert(pad<256, "must be a padding BYTE"); 5594 5595 Label retry; 5596 bind(retry); 5597 Assembler::z_mvcle(dst, src, pad); 5598 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5599 } 5600 5601 void MacroAssembler::compare_long_ext(Register left, Register right, unsigned int pad) { 5602 assert(left->encoding() % 2 == 0, "must be an even/odd register pair"); 5603 assert(right->encoding() % 2 == 0, "must be an even/odd register pair"); 5604 assert(pad<256, "must be a padding BYTE"); 5605 5606 Label retry; 5607 bind(retry); 5608 Assembler::z_clcle(left, right, pad, Z_R0); 5609 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5610 } 5611 5612 void MacroAssembler::compare_long_uni(Register left, Register right, unsigned int pad) { 5613 assert(left->encoding() % 2 == 0, "must be an even/odd register pair"); 5614 assert(right->encoding() % 2 == 0, "must be an even/odd register pair"); 5615 assert(pad<=0xfff, "must be a padding HALFWORD"); 5616 assert(VM_Version::has_ETF2(), "instruction must be available"); 5617 5618 Label retry; 5619 bind(retry); 5620 Assembler::z_clclu(left, right, pad, Z_R0); 5621 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5622 } 5623 5624 void MacroAssembler::search_string(Register end, Register start) { 5625 assert(end->encoding() != 0, "end address must not be in R0"); 5626 assert(start->encoding() != 0, "start address must not be in R0"); 5627 5628 Label retry; 5629 bind(retry); 5630 Assembler::z_srst(end, start); 5631 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5632 } 5633 5634 void MacroAssembler::search_string_uni(Register end, Register start) { 5635 assert(end->encoding() != 0, "end address must not be in R0"); 5636 assert(start->encoding() != 0, "start address must not be in R0"); 5637 assert(VM_Version::has_ETF3(), "instruction must be available"); 5638 5639 Label retry; 5640 bind(retry); 5641 Assembler::z_srstu(end, start); 5642 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5643 } 5644 5645 void MacroAssembler::kmac(Register srcBuff) { 5646 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 5647 assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair"); 5648 5649 Label retry; 5650 bind(retry); 5651 Assembler::z_kmac(Z_R0, srcBuff); 5652 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5653 } 5654 5655 void MacroAssembler::kimd(Register srcBuff) { 5656 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 5657 assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair"); 5658 5659 Label retry; 5660 bind(retry); 5661 Assembler::z_kimd(Z_R0, srcBuff); 5662 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5663 } 5664 5665 void MacroAssembler::klmd(Register srcBuff) { 5666 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 5667 assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair"); 5668 5669 Label retry; 5670 bind(retry); 5671 Assembler::z_klmd(Z_R0, srcBuff); 5672 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5673 } 5674 5675 void MacroAssembler::km(Register dstBuff, Register srcBuff) { 5676 // DstBuff and srcBuff are allowed to be the same register (encryption in-place). 5677 // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block. 5678 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 5679 assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register"); 5680 assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair"); 5681 5682 Label retry; 5683 bind(retry); 5684 Assembler::z_km(dstBuff, srcBuff); 5685 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5686 } 5687 5688 void MacroAssembler::kmc(Register dstBuff, Register srcBuff) { 5689 // DstBuff and srcBuff are allowed to be the same register (encryption in-place). 5690 // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block. 5691 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 5692 assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register"); 5693 assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair"); 5694 5695 Label retry; 5696 bind(retry); 5697 Assembler::z_kmc(dstBuff, srcBuff); 5698 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5699 } 5700 5701 void MacroAssembler::cksm(Register crcBuff, Register srcBuff) { 5702 assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair"); 5703 5704 Label retry; 5705 bind(retry); 5706 Assembler::z_cksm(crcBuff, srcBuff); 5707 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5708 } 5709 5710 void MacroAssembler::translate_oo(Register r1, Register r2, uint m3) { 5711 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); 5712 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); 5713 5714 Label retry; 5715 bind(retry); 5716 Assembler::z_troo(r1, r2, m3); 5717 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5718 } 5719 5720 void MacroAssembler::translate_ot(Register r1, Register r2, uint m3) { 5721 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); 5722 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); 5723 5724 Label retry; 5725 bind(retry); 5726 Assembler::z_trot(r1, r2, m3); 5727 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5728 } 5729 5730 void MacroAssembler::translate_to(Register r1, Register r2, uint m3) { 5731 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); 5732 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); 5733 5734 Label retry; 5735 bind(retry); 5736 Assembler::z_trto(r1, r2, m3); 5737 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5738 } 5739 5740 void MacroAssembler::translate_tt(Register r1, Register r2, uint m3) { 5741 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); 5742 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); 5743 5744 Label retry; 5745 bind(retry); 5746 Assembler::z_trtt(r1, r2, m3); 5747 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5748 } 5749 5750 void MacroAssembler::generate_safepoint_check(Label& slow_path, Register scratch, bool may_relocate) { 5751 if (scratch == noreg) scratch = Z_R1; 5752 address Astate = SafepointSynchronize::address_of_state(); 5753 BLOCK_COMMENT("safepoint check:"); 5754 5755 if (may_relocate) { 5756 ptrdiff_t total_distance = Astate - this->pc(); 5757 if (RelAddr::is_in_range_of_RelAddr32(total_distance)) { 5758 RelocationHolder rspec = external_word_Relocation::spec(Astate); 5759 (this)->relocate(rspec, relocInfo::pcrel_addr_format); 5760 load_absolute_address(scratch, Astate); 5761 } else { 5762 load_const_optimized(scratch, Astate); 5763 } 5764 } else { 5765 load_absolute_address(scratch, Astate); 5766 } 5767 z_cli(/*SafepointSynchronize::sz_state()*/4-1, scratch, SafepointSynchronize::_not_synchronized); 5768 z_brne(slow_path); 5769 } 5770 5771 5772 void MacroAssembler::generate_type_profiling(const Register Rdata, 5773 const Register Rreceiver_klass, 5774 const Register Rwanted_receiver_klass, 5775 const Register Rmatching_row, 5776 bool is_virtual_call) { 5777 const int row_size = in_bytes(ReceiverTypeData::receiver_offset(1)) - 5778 in_bytes(ReceiverTypeData::receiver_offset(0)); 5779 const int num_rows = ReceiverTypeData::row_limit(); 5780 NearLabel found_free_row; 5781 NearLabel do_increment; 5782 NearLabel found_no_slot; 5783 5784 BLOCK_COMMENT("type profiling {"); 5785 5786 // search for: 5787 // a) The type given in Rwanted_receiver_klass. 5788 // b) The *first* empty row. 5789 5790 // First search for a) only, just running over b) with no regard. 5791 // This is possible because 5792 // wanted_receiver_class == receiver_class && wanted_receiver_class == 0 5793 // is never true (receiver_class can't be zero). 5794 for (int row_num = 0; row_num < num_rows; row_num++) { 5795 // Row_offset should be a well-behaved positive number. The generated code relies 5796 // on that wrt constant code size. Add2reg can handle all row_offset values, but 5797 // will have to vary generated code size. 5798 int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num)); 5799 assert(Displacement::is_shortDisp(row_offset), "Limitation of generated code"); 5800 5801 // Is Rwanted_receiver_klass in this row? 5802 if (VM_Version::has_CompareBranch()) { 5803 z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata); 5804 // Rmatching_row = Rdata + row_offset; 5805 add2reg(Rmatching_row, row_offset, Rdata); 5806 // if (*row_recv == (intptr_t) receiver_klass) goto fill_existing_slot; 5807 compare64_and_branch(Rwanted_receiver_klass, Rreceiver_klass, Assembler::bcondEqual, do_increment); 5808 } else { 5809 add2reg(Rmatching_row, row_offset, Rdata); 5810 z_cg(Rreceiver_klass, row_offset, Z_R0, Rdata); 5811 z_bre(do_increment); 5812 } 5813 } 5814 5815 // Now that we did not find a match, let's search for b). 5816 5817 // We could save the first calculation of Rmatching_row if we woud search for a) in reverse order. 5818 // We would then end up here with Rmatching_row containing the value for row_num == 0. 5819 // We would not see much benefit, if any at all, because the CPU can schedule 5820 // two instructions together with a branch anyway. 5821 for (int row_num = 0; row_num < num_rows; row_num++) { 5822 int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num)); 5823 5824 // Has this row a zero receiver_klass, i.e. is it empty? 5825 if (VM_Version::has_CompareBranch()) { 5826 z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata); 5827 // Rmatching_row = Rdata + row_offset 5828 add2reg(Rmatching_row, row_offset, Rdata); 5829 // if (*row_recv == (intptr_t) 0) goto found_free_row 5830 compare64_and_branch(Rwanted_receiver_klass, (intptr_t)0, Assembler::bcondEqual, found_free_row); 5831 } else { 5832 add2reg(Rmatching_row, row_offset, Rdata); 5833 load_and_test_long(Rwanted_receiver_klass, Address(Rdata, row_offset)); 5834 z_bre(found_free_row); // zero -> Found a free row. 5835 } 5836 } 5837 5838 // No match, no empty row found. 5839 // Increment total counter to indicate polymorphic case. 5840 if (is_virtual_call) { 5841 add2mem_64(Address(Rdata, CounterData::count_offset()), 1, Rmatching_row); 5842 } 5843 z_bru(found_no_slot); 5844 5845 // Here we found an empty row, but we have not found Rwanted_receiver_klass. 5846 // Rmatching_row holds the address to the first empty row. 5847 bind(found_free_row); 5848 // Store receiver_klass into empty slot. 5849 z_stg(Rreceiver_klass, 0, Z_R0, Rmatching_row); 5850 5851 // Increment the counter of Rmatching_row. 5852 bind(do_increment); 5853 ByteSize counter_offset = ReceiverTypeData::receiver_count_offset(0) - ReceiverTypeData::receiver_offset(0); 5854 add2mem_64(Address(Rmatching_row, counter_offset), 1, Rdata); 5855 5856 bind(found_no_slot); 5857 5858 BLOCK_COMMENT("} type profiling"); 5859 } 5860 5861 //--------------------------------------- 5862 // Helpers for Intrinsic Emitters 5863 //--------------------------------------- 5864 5865 /** 5866 * uint32_t crc; 5867 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 5868 */ 5869 void MacroAssembler::fold_byte_crc32(Register crc, Register val, Register table, Register tmp) { 5870 assert_different_registers(crc, table, tmp); 5871 assert_different_registers(val, table); 5872 if (crc == val) { // Must rotate first to use the unmodified value. 5873 rotate_then_insert(tmp, val, 56-2, 63-2, 2, true); // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest. 5874 z_srl(crc, 8); // Unsigned shift, clear leftmost 8 bits. 5875 } else { 5876 z_srl(crc, 8); // Unsigned shift, clear leftmost 8 bits. 5877 rotate_then_insert(tmp, val, 56-2, 63-2, 2, true); // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest. 5878 } 5879 z_x(crc, Address(table, tmp, 0)); 5880 } 5881 5882 /** 5883 * uint32_t crc; 5884 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 5885 */ 5886 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 5887 fold_byte_crc32(crc, crc, table, tmp); 5888 } 5889 5890 /** 5891 * Emits code to update CRC-32 with a byte value according to constants in table. 5892 * 5893 * @param [in,out]crc Register containing the crc. 5894 * @param [in]val Register containing the byte to fold into the CRC. 5895 * @param [in]table Register containing the table of crc constants. 5896 * 5897 * uint32_t crc; 5898 * val = crc_table[(val ^ crc) & 0xFF]; 5899 * crc = val ^ (crc >> 8); 5900 */ 5901 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 5902 z_xr(val, crc); 5903 fold_byte_crc32(crc, val, table, val); 5904 } 5905 5906 5907 /** 5908 * @param crc register containing existing CRC (32-bit) 5909 * @param buf register pointing to input byte buffer (byte*) 5910 * @param len register containing number of bytes 5911 * @param table register pointing to CRC table 5912 */ 5913 void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, 5914 Register data, bool invertCRC) { 5915 assert_different_registers(crc, buf, len, table, data); 5916 5917 Label L_mainLoop, L_done; 5918 const int mainLoop_stepping = 1; 5919 5920 // Process all bytes in a single-byte loop. 5921 z_ltr(len, len); 5922 z_brnh(L_done); 5923 5924 if (invertCRC) { 5925 not_(crc, noreg, false); // ~c 5926 } 5927 5928 bind(L_mainLoop); 5929 z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register. 5930 add2reg(buf, mainLoop_stepping); // Advance buffer position. 5931 update_byte_crc32(crc, data, table); 5932 z_brct(len, L_mainLoop); // Iterate. 5933 5934 if (invertCRC) { 5935 not_(crc, noreg, false); // ~c 5936 } 5937 5938 bind(L_done); 5939 } 5940 5941 /** 5942 * Emits code to update CRC-32 with a 4-byte value according to constants in table. 5943 * Implementation according to jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.c. 5944 * 5945 */ 5946 void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc, 5947 Register t0, Register t1, Register t2, Register t3) { 5948 // This is what we implement (the DOBIG4 part): 5949 // 5950 // #define DOBIG4 c ^= *++buf4; \ 5951 // c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \ 5952 // crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24] 5953 // #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4 5954 const int ix0 = 4*(4*CRC32_COLUMN_SIZE); 5955 const int ix1 = 5*(4*CRC32_COLUMN_SIZE); 5956 const int ix2 = 6*(4*CRC32_COLUMN_SIZE); 5957 const int ix3 = 7*(4*CRC32_COLUMN_SIZE); 5958 5959 // XOR crc with next four bytes of buffer. 5960 lgr_if_needed(t0, crc); 5961 z_x(t0, Address(buf, bufDisp)); 5962 if (bufInc != 0) { 5963 add2reg(buf, bufInc); 5964 } 5965 5966 // Chop crc into 4 single-byte pieces, shifted left 2 bits, to form the table indices. 5967 rotate_then_insert(t3, t0, 56-2, 63-2, 2, true); // ((c >> 0) & 0xff) << 2 5968 rotate_then_insert(t2, t0, 56-2, 63-2, 2-8, true); // ((c >> 8) & 0xff) << 2 5969 rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true); // ((c >> 16) & 0xff) << 2 5970 rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true); // ((c >> 24) & 0xff) << 2 5971 5972 // Load pre-calculated table values. 5973 // Use columns 4..7 for big-endian. 5974 z_ly(t3, Address(table, t3, (intptr_t)ix0)); 5975 z_ly(t2, Address(table, t2, (intptr_t)ix1)); 5976 z_ly(t1, Address(table, t1, (intptr_t)ix2)); 5977 z_ly(t0, Address(table, t0, (intptr_t)ix3)); 5978 5979 // Calculate new crc from table values. 5980 z_xr(t2, t3); 5981 z_xr(t0, t1); 5982 z_xr(t0, t2); // Now crc contains the final checksum value. 5983 lgr_if_needed(crc, t0); 5984 } 5985 5986 /** 5987 * @param crc register containing existing CRC (32-bit) 5988 * @param buf register pointing to input byte buffer (byte*) 5989 * @param len register containing number of bytes 5990 * @param table register pointing to CRC table 5991 * 5992 * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller! 5993 */ 5994 void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table, 5995 Register t0, Register t1, Register t2, Register t3) { 5996 assert_different_registers(crc, buf, len, table); 5997 5998 Label L_mainLoop, L_tail; 5999 Register data = t0; 6000 Register ctr = Z_R0; 6001 const int mainLoop_stepping = 8; 6002 const int tailLoop_stepping = 1; 6003 const int log_stepping = exact_log2(mainLoop_stepping); 6004 6005 // Don't test for len <= 0 here. This pathological case should not occur anyway. 6006 // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles. 6007 // The situation itself is detected and handled correctly by the conditional branches 6008 // following aghi(len, -stepping) and aghi(len, +stepping). 6009 6010 not_(crc, noreg, false); // 1s complement of crc 6011 6012 #if 0 6013 { 6014 // Pre-mainLoop alignment did not show any positive effect on performance. 6015 // We leave the code in for reference. Maybe the vector instructions in z13 depend on alignment. 6016 6017 z_cghi(len, mainLoop_stepping); // Alignment is useless for short data streams. 6018 z_brnh(L_tail); 6019 6020 // Align buf to word (4-byte) boundary. 6021 z_lcr(ctr, buf); 6022 rotate_then_insert(ctr, ctr, 62, 63, 0, true); // TODO: should set cc 6023 z_sgfr(len, ctr); // Remaining len after alignment. 6024 6025 update_byteLoop_crc32(crc, buf, ctr, table, data, false); 6026 } 6027 #endif 6028 6029 // Check for short (<mainLoop_stepping bytes) buffer. 6030 z_srag(ctr, len, log_stepping); 6031 z_brnh(L_tail); 6032 6033 z_lrvr(crc, crc); // Revert byte order because we are dealing with big-endian data. 6034 rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop 6035 6036 BIND(L_mainLoop); 6037 update_1word_crc32(crc, buf, table, 0, 0, crc, t1, t2, t3); 6038 update_1word_crc32(crc, buf, table, 4, mainLoop_stepping, crc, t1, t2, t3); 6039 z_brct(ctr, L_mainLoop); // Iterate. 6040 6041 z_lrvr(crc, crc); // Revert byte order back to original. 6042 6043 // Process last few (<8) bytes of buffer. 6044 BIND(L_tail); 6045 update_byteLoop_crc32(crc, buf, len, table, data, false); 6046 6047 not_(crc, noreg, false); // 1s complement of crc 6048 } 6049 6050 /** 6051 * @param crc register containing existing CRC (32-bit) 6052 * @param buf register pointing to input byte buffer (byte*) 6053 * @param len register containing number of bytes 6054 * @param table register pointing to CRC table 6055 * 6056 * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller! 6057 */ 6058 void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table, 6059 Register t0, Register t1, Register t2, Register t3) { 6060 assert_different_registers(crc, buf, len, table); 6061 6062 Label L_mainLoop, L_tail; 6063 Register data = t0; 6064 Register ctr = Z_R0; 6065 const int mainLoop_stepping = 4; 6066 const int log_stepping = exact_log2(mainLoop_stepping); 6067 6068 // Don't test for len <= 0 here. This pathological case should not occur anyway. 6069 // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles. 6070 // The situation itself is detected and handled correctly by the conditional branches 6071 // following aghi(len, -stepping) and aghi(len, +stepping). 6072 6073 not_(crc, noreg, false); // 1s complement of crc 6074 6075 // Check for short (<4 bytes) buffer. 6076 z_srag(ctr, len, log_stepping); 6077 z_brnh(L_tail); 6078 6079 z_lrvr(crc, crc); // Revert byte order because we are dealing with big-endian data. 6080 rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop 6081 6082 BIND(L_mainLoop); 6083 update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3); 6084 z_brct(ctr, L_mainLoop); // Iterate. 6085 z_lrvr(crc, crc); // Revert byte order back to original. 6086 6087 // Process last few (<8) bytes of buffer. 6088 BIND(L_tail); 6089 update_byteLoop_crc32(crc, buf, len, table, data, false); 6090 6091 not_(crc, noreg, false); // 1s complement of crc 6092 } 6093 6094 /** 6095 * @param crc register containing existing CRC (32-bit) 6096 * @param buf register pointing to input byte buffer (byte*) 6097 * @param len register containing number of bytes 6098 * @param table register pointing to CRC table 6099 */ 6100 void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table, 6101 Register t0, Register t1, Register t2, Register t3) { 6102 assert_different_registers(crc, buf, len, table); 6103 Register data = t0; 6104 6105 update_byteLoop_crc32(crc, buf, len, table, data, true); 6106 } 6107 6108 void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp) { 6109 assert_different_registers(crc, buf, len, table, tmp); 6110 6111 not_(crc, noreg, false); // ~c 6112 6113 z_llgc(tmp, Address(buf, (intptr_t)0)); // Current byte of input buffer (zero extended). Avoids garbage in upper half of register. 6114 update_byte_crc32(crc, tmp, table); 6115 6116 not_(crc, noreg, false); // ~c 6117 } 6118 6119 // 6120 // Code for BigInteger::multiplyToLen() intrinsic. 6121 // 6122 6123 // dest_lo += src1 + src2 6124 // dest_hi += carry1 + carry2 6125 // Z_R7 is destroyed ! 6126 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, 6127 Register src1, Register src2) { 6128 clear_reg(Z_R7); 6129 z_algr(dest_lo, src1); 6130 z_alcgr(dest_hi, Z_R7); 6131 z_algr(dest_lo, src2); 6132 z_alcgr(dest_hi, Z_R7); 6133 } 6134 6135 // Multiply 64 bit by 64 bit first loop. 6136 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, 6137 Register x_xstart, 6138 Register y, Register y_idx, 6139 Register z, 6140 Register carry, 6141 Register product, 6142 Register idx, Register kdx) { 6143 // jlong carry, x[], y[], z[]; 6144 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) { 6145 // huge_128 product = y[idx] * x[xstart] + carry; 6146 // z[kdx] = (jlong)product; 6147 // carry = (jlong)(product >>> 64); 6148 // } 6149 // z[xstart] = carry; 6150 6151 Label L_first_loop, L_first_loop_exit; 6152 Label L_one_x, L_one_y, L_multiply; 6153 6154 z_aghi(xstart, -1); 6155 z_brl(L_one_x); // Special case: length of x is 1. 6156 6157 // Load next two integers of x. 6158 z_sllg(Z_R1_scratch, xstart, LogBytesPerInt); 6159 mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0)); 6160 6161 6162 bind(L_first_loop); 6163 6164 z_aghi(idx, -1); 6165 z_brl(L_first_loop_exit); 6166 z_aghi(idx, -1); 6167 z_brl(L_one_y); 6168 6169 // Load next two integers of y. 6170 z_sllg(Z_R1_scratch, idx, LogBytesPerInt); 6171 mem2reg_opt(y_idx, Address(y, Z_R1_scratch, 0)); 6172 6173 6174 bind(L_multiply); 6175 6176 Register multiplicand = product->successor(); 6177 Register product_low = multiplicand; 6178 6179 lgr_if_needed(multiplicand, x_xstart); 6180 z_mlgr(product, y_idx); // multiplicand * y_idx -> product::multiplicand 6181 clear_reg(Z_R7); 6182 z_algr(product_low, carry); // Add carry to result. 6183 z_alcgr(product, Z_R7); // Add carry of the last addition. 6184 add2reg(kdx, -2); 6185 6186 // Store result. 6187 z_sllg(Z_R7, kdx, LogBytesPerInt); 6188 reg2mem_opt(product_low, Address(z, Z_R7, 0)); 6189 lgr_if_needed(carry, product); 6190 z_bru(L_first_loop); 6191 6192 6193 bind(L_one_y); // Load one 32 bit portion of y as (0,value). 6194 6195 clear_reg(y_idx); 6196 mem2reg_opt(y_idx, Address(y, (intptr_t) 0), false); 6197 z_bru(L_multiply); 6198 6199 6200 bind(L_one_x); // Load one 32 bit portion of x as (0,value). 6201 6202 clear_reg(x_xstart); 6203 mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false); 6204 z_bru(L_first_loop); 6205 6206 bind(L_first_loop_exit); 6207 } 6208 6209 // Multiply 64 bit by 64 bit and add 128 bit. 6210 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, 6211 Register z, 6212 Register yz_idx, Register idx, 6213 Register carry, Register product, 6214 int offset) { 6215 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; 6216 // z[kdx] = (jlong)product; 6217 6218 Register multiplicand = product->successor(); 6219 Register product_low = multiplicand; 6220 6221 z_sllg(Z_R7, idx, LogBytesPerInt); 6222 mem2reg_opt(yz_idx, Address(y, Z_R7, offset)); 6223 6224 lgr_if_needed(multiplicand, x_xstart); 6225 z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand 6226 mem2reg_opt(yz_idx, Address(z, Z_R7, offset)); 6227 6228 add2_with_carry(product, product_low, carry, yz_idx); 6229 6230 z_sllg(Z_R7, idx, LogBytesPerInt); 6231 reg2mem_opt(product_low, Address(z, Z_R7, offset)); 6232 6233 } 6234 6235 // Multiply 128 bit by 128 bit. Unrolled inner loop. 6236 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, 6237 Register y, Register z, 6238 Register yz_idx, Register idx, 6239 Register jdx, 6240 Register carry, Register product, 6241 Register carry2) { 6242 // jlong carry, x[], y[], z[]; 6243 // int kdx = ystart+1; 6244 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 6245 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; 6246 // z[kdx+idx+1] = (jlong)product; 6247 // jlong carry2 = (jlong)(product >>> 64); 6248 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; 6249 // z[kdx+idx] = (jlong)product; 6250 // carry = (jlong)(product >>> 64); 6251 // } 6252 // idx += 2; 6253 // if (idx > 0) { 6254 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry; 6255 // z[kdx+idx] = (jlong)product; 6256 // carry = (jlong)(product >>> 64); 6257 // } 6258 6259 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 6260 6261 // scale the index 6262 lgr_if_needed(jdx, idx); 6263 and_imm(jdx, 0xfffffffffffffffcL); 6264 rshift(jdx, 2); 6265 6266 6267 bind(L_third_loop); 6268 6269 z_aghi(jdx, -1); 6270 z_brl(L_third_loop_exit); 6271 add2reg(idx, -4); 6272 6273 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8); 6274 lgr_if_needed(carry2, product); 6275 6276 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0); 6277 lgr_if_needed(carry, product); 6278 z_bru(L_third_loop); 6279 6280 6281 bind(L_third_loop_exit); // Handle any left-over operand parts. 6282 6283 and_imm(idx, 0x3); 6284 z_brz(L_post_third_loop_done); 6285 6286 Label L_check_1; 6287 6288 z_aghi(idx, -2); 6289 z_brl(L_check_1); 6290 6291 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0); 6292 lgr_if_needed(carry, product); 6293 6294 6295 bind(L_check_1); 6296 6297 add2reg(idx, 0x2); 6298 and_imm(idx, 0x1); 6299 z_aghi(idx, -1); 6300 z_brl(L_post_third_loop_done); 6301 6302 Register multiplicand = product->successor(); 6303 Register product_low = multiplicand; 6304 6305 z_sllg(Z_R7, idx, LogBytesPerInt); 6306 clear_reg(yz_idx); 6307 mem2reg_opt(yz_idx, Address(y, Z_R7, 0), false); 6308 lgr_if_needed(multiplicand, x_xstart); 6309 z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand 6310 clear_reg(yz_idx); 6311 mem2reg_opt(yz_idx, Address(z, Z_R7, 0), false); 6312 6313 add2_with_carry(product, product_low, yz_idx, carry); 6314 6315 z_sllg(Z_R7, idx, LogBytesPerInt); 6316 reg2mem_opt(product_low, Address(z, Z_R7, 0), false); 6317 rshift(product_low, 32); 6318 6319 lshift(product, 32); 6320 z_ogr(product_low, product); 6321 lgr_if_needed(carry, product_low); 6322 6323 bind(L_post_third_loop_done); 6324 } 6325 6326 void MacroAssembler::multiply_to_len(Register x, Register xlen, 6327 Register y, Register ylen, 6328 Register z, 6329 Register tmp1, Register tmp2, 6330 Register tmp3, Register tmp4, 6331 Register tmp5) { 6332 ShortBranchVerifier sbv(this); 6333 6334 assert_different_registers(x, xlen, y, ylen, z, 6335 tmp1, tmp2, tmp3, tmp4, tmp5, Z_R1_scratch, Z_R7); 6336 assert_different_registers(x, xlen, y, ylen, z, 6337 tmp1, tmp2, tmp3, tmp4, tmp5, Z_R8); 6338 6339 z_stmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP); 6340 6341 // In openJdk, we store the argument as 32-bit value to slot. 6342 Address zlen(Z_SP, _z_abi(remaining_cargs)); // Int in long on big endian. 6343 6344 const Register idx = tmp1; 6345 const Register kdx = tmp2; 6346 const Register xstart = tmp3; 6347 6348 const Register y_idx = tmp4; 6349 const Register carry = tmp5; 6350 const Register product = Z_R0_scratch; 6351 const Register x_xstart = Z_R8; 6352 6353 // First Loop. 6354 // 6355 // final static long LONG_MASK = 0xffffffffL; 6356 // int xstart = xlen - 1; 6357 // int ystart = ylen - 1; 6358 // long carry = 0; 6359 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 6360 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 6361 // z[kdx] = (int)product; 6362 // carry = product >>> 32; 6363 // } 6364 // z[xstart] = (int)carry; 6365 // 6366 6367 lgr_if_needed(idx, ylen); // idx = ylen 6368 z_llgf(kdx, zlen); // C2 does not respect int to long conversion for stub calls, thus load zero-extended. 6369 clear_reg(carry); // carry = 0 6370 6371 Label L_done; 6372 6373 lgr_if_needed(xstart, xlen); 6374 z_aghi(xstart, -1); 6375 z_brl(L_done); 6376 6377 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 6378 6379 NearLabel L_second_loop; 6380 compare64_and_branch(kdx, RegisterOrConstant((intptr_t) 0), bcondEqual, L_second_loop); 6381 6382 NearLabel L_carry; 6383 z_aghi(kdx, -1); 6384 z_brz(L_carry); 6385 6386 // Store lower 32 bits of carry. 6387 z_sllg(Z_R1_scratch, kdx, LogBytesPerInt); 6388 reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); 6389 rshift(carry, 32); 6390 z_aghi(kdx, -1); 6391 6392 6393 bind(L_carry); 6394 6395 // Store upper 32 bits of carry. 6396 z_sllg(Z_R1_scratch, kdx, LogBytesPerInt); 6397 reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); 6398 6399 // Second and third (nested) loops. 6400 // 6401 // for (int i = xstart-1; i >= 0; i--) { // Second loop 6402 // carry = 0; 6403 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 6404 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 6405 // (z[k] & LONG_MASK) + carry; 6406 // z[k] = (int)product; 6407 // carry = product >>> 32; 6408 // } 6409 // z[i] = (int)carry; 6410 // } 6411 // 6412 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx 6413 6414 const Register jdx = tmp1; 6415 6416 bind(L_second_loop); 6417 6418 clear_reg(carry); // carry = 0; 6419 lgr_if_needed(jdx, ylen); // j = ystart+1 6420 6421 z_aghi(xstart, -1); // i = xstart-1; 6422 z_brl(L_done); 6423 6424 // Use free slots in the current stackframe instead of push/pop. 6425 Address zsave(Z_SP, _z_abi(carg_1)); 6426 reg2mem_opt(z, zsave); 6427 6428 6429 Label L_last_x; 6430 6431 z_sllg(Z_R1_scratch, xstart, LogBytesPerInt); 6432 load_address(z, Address(z, Z_R1_scratch, 4)); // z = z + k - j 6433 z_aghi(xstart, -1); // i = xstart-1; 6434 z_brl(L_last_x); 6435 6436 z_sllg(Z_R1_scratch, xstart, LogBytesPerInt); 6437 mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0)); 6438 6439 6440 Label L_third_loop_prologue; 6441 6442 bind(L_third_loop_prologue); 6443 6444 Address xsave(Z_SP, _z_abi(carg_2)); 6445 Address xlensave(Z_SP, _z_abi(carg_3)); 6446 Address ylensave(Z_SP, _z_abi(carg_4)); 6447 6448 reg2mem_opt(x, xsave); 6449 reg2mem_opt(xstart, xlensave); 6450 reg2mem_opt(ylen, ylensave); 6451 6452 6453 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x); 6454 6455 mem2reg_opt(z, zsave); 6456 mem2reg_opt(x, xsave); 6457 mem2reg_opt(xlen, xlensave); // This is the decrement of the loop counter! 6458 mem2reg_opt(ylen, ylensave); 6459 6460 add2reg(tmp3, 1, xlen); 6461 z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt); 6462 reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); 6463 z_aghi(tmp3, -1); 6464 z_brl(L_done); 6465 6466 rshift(carry, 32); 6467 z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt); 6468 reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); 6469 z_bru(L_second_loop); 6470 6471 // Next infrequent code is moved outside loops. 6472 bind(L_last_x); 6473 6474 clear_reg(x_xstart); 6475 mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false); 6476 z_bru(L_third_loop_prologue); 6477 6478 bind(L_done); 6479 6480 z_lmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP); 6481 } 6482 6483 #ifndef PRODUCT 6484 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false). 6485 void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) { 6486 Label ok; 6487 if (check_equal) { 6488 z_bre(ok); 6489 } else { 6490 z_brne(ok); 6491 } 6492 stop(msg, id); 6493 bind(ok); 6494 } 6495 6496 // Assert if CC indicates "low". 6497 void MacroAssembler::asm_assert_low(const char *msg, int id) { 6498 Label ok; 6499 z_brnl(ok); 6500 stop(msg, id); 6501 bind(ok); 6502 } 6503 6504 // Assert if CC indicates "high". 6505 void MacroAssembler::asm_assert_high(const char *msg, int id) { 6506 Label ok; 6507 z_brnh(ok); 6508 stop(msg, id); 6509 bind(ok); 6510 } 6511 6512 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false) 6513 // generate non-relocatable code. 6514 void MacroAssembler::asm_assert_static(bool check_equal, const char *msg, int id) { 6515 Label ok; 6516 if (check_equal) { z_bre(ok); } 6517 else { z_brne(ok); } 6518 stop_static(msg, id); 6519 bind(ok); 6520 } 6521 6522 void MacroAssembler::asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset, 6523 Register mem_base, const char* msg, int id) { 6524 switch (size) { 6525 case 4: 6526 load_and_test_int(Z_R0, Address(mem_base, mem_offset)); 6527 break; 6528 case 8: 6529 load_and_test_long(Z_R0, Address(mem_base, mem_offset)); 6530 break; 6531 default: 6532 ShouldNotReachHere(); 6533 } 6534 if (allow_relocation) { asm_assert(check_equal, msg, id); } 6535 else { asm_assert_static(check_equal, msg, id); } 6536 } 6537 6538 // Check the condition 6539 // expected_size == FP - SP 6540 // after transformation: 6541 // expected_size - FP + SP == 0 6542 // Destroys Register expected_size if no tmp register is passed. 6543 void MacroAssembler::asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) { 6544 if (tmp == noreg) { 6545 tmp = expected_size; 6546 } else { 6547 if (tmp != expected_size) { 6548 z_lgr(tmp, expected_size); 6549 } 6550 z_algr(tmp, Z_SP); 6551 z_slg(tmp, 0, Z_R0, Z_SP); 6552 asm_assert_eq(msg, id); 6553 } 6554 } 6555 #endif // !PRODUCT 6556 6557 void MacroAssembler::verify_thread() { 6558 if (VerifyThread) { 6559 unimplemented("", 117); 6560 } 6561 } 6562 6563 // Plausibility check for oops. 6564 void MacroAssembler::verify_oop(Register oop, const char* msg) { 6565 if (!VerifyOops) return; 6566 6567 BLOCK_COMMENT("verify_oop {"); 6568 Register tmp = Z_R0; 6569 unsigned int nbytes_save = 6 *8; 6570 address entry = StubRoutines::verify_oop_subroutine_entry_address(); 6571 save_return_pc(); 6572 push_frame_abi160(nbytes_save); 6573 z_stmg(Z_R0, Z_R5, 160, Z_SP); 6574 6575 z_lgr(Z_ARG2, oop); 6576 load_const(Z_ARG1, (address) msg); 6577 load_const(Z_R1, entry); 6578 z_lg(Z_R1, 0, Z_R1); 6579 call_c(Z_R1); 6580 6581 z_lmg(Z_R0, Z_R5, 160, Z_SP); 6582 pop_frame(); 6583 6584 restore_return_pc(); 6585 BLOCK_COMMENT("} verify_oop "); 6586 } 6587 6588 const char* MacroAssembler::stop_types[] = { 6589 "stop", 6590 "untested", 6591 "unimplemented", 6592 "shouldnotreachhere" 6593 }; 6594 6595 static void stop_on_request(const char* tp, const char* msg) { 6596 tty->print("Z assembly code requires stop: (%s) %s\n", tp, msg); 6597 guarantee(false, "Z assembly code requires stop: %s", msg); 6598 } 6599 6600 void MacroAssembler::stop(int type, const char* msg, int id) { 6601 BLOCK_COMMENT(err_msg("stop: %s {", msg)); 6602 6603 // Setup arguments. 6604 load_const(Z_ARG1, (void*) stop_types[type%stop_end]); 6605 load_const(Z_ARG2, (void*) msg); 6606 get_PC(Z_R14); // Following code pushes a frame without entering a new function. Use current pc as return address. 6607 save_return_pc(); // Saves return pc Z_R14. 6608 push_frame_abi160(0); 6609 call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2); 6610 // The plain disassembler does not recognize illtrap. It instead displays 6611 // a 32-bit value. Issueing two illtraps assures the disassembler finds 6612 // the proper beginning of the next instruction. 6613 z_illtrap(); // Illegal instruction. 6614 z_illtrap(); // Illegal instruction. 6615 6616 BLOCK_COMMENT(" } stop"); 6617 } 6618 6619 // Special version of stop() for code size reduction. 6620 // Reuses the previously generated call sequence, if any. 6621 // Generates the call sequence on its own, if necessary. 6622 // Note: This code will work only in non-relocatable code! 6623 // The relative address of the data elements (arg1, arg2) must not change. 6624 // The reentry point must not move relative to it's users. This prerequisite 6625 // should be given for "hand-written" code, if all chain calls are in the same code blob. 6626 // Generated code must not undergo any transformation, e.g. ShortenBranches, to be safe. 6627 address MacroAssembler::stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation) { 6628 BLOCK_COMMENT(err_msg("stop_chain(%s,%s): %s {", reentry==NULL?"init":"cont", allow_relocation?"reloc ":"static", msg)); 6629 6630 // Setup arguments. 6631 if (allow_relocation) { 6632 // Relocatable version (for comparison purposes). Remove after some time. 6633 load_const(Z_ARG1, (void*) stop_types[type%stop_end]); 6634 load_const(Z_ARG2, (void*) msg); 6635 } else { 6636 load_absolute_address(Z_ARG1, (address)stop_types[type%stop_end]); 6637 load_absolute_address(Z_ARG2, (address)msg); 6638 } 6639 if ((reentry != NULL) && RelAddr::is_in_range_of_RelAddr16(reentry, pc())) { 6640 BLOCK_COMMENT("branch to reentry point:"); 6641 z_brc(bcondAlways, reentry); 6642 } else { 6643 BLOCK_COMMENT("reentry point:"); 6644 reentry = pc(); // Re-entry point for subsequent stop calls. 6645 save_return_pc(); // Saves return pc Z_R14. 6646 push_frame_abi160(0); 6647 if (allow_relocation) { 6648 reentry = NULL; // Prevent reentry if code relocation is allowed. 6649 call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2); 6650 } else { 6651 call_VM_leaf_static(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2); 6652 } 6653 z_illtrap(); // Illegal instruction as emergency stop, should the above call return. 6654 } 6655 BLOCK_COMMENT(" } stop_chain"); 6656 6657 return reentry; 6658 } 6659 6660 // Special version of stop() for code size reduction. 6661 // Assumes constant relative addresses for data and runtime call. 6662 void MacroAssembler::stop_static(int type, const char* msg, int id) { 6663 stop_chain(NULL, type, msg, id, false); 6664 } 6665 6666 void MacroAssembler::stop_subroutine() { 6667 unimplemented("stop_subroutine", 710); 6668 } 6669 6670 // Prints msg to stdout from within generated code.. 6671 void MacroAssembler::warn(const char* msg) { 6672 RegisterSaver::save_live_registers(this, RegisterSaver::all_registers, Z_R14); 6673 load_absolute_address(Z_R1, (address) warning); 6674 load_absolute_address(Z_ARG1, (address) msg); 6675 (void) call(Z_R1); 6676 RegisterSaver::restore_live_registers(this, RegisterSaver::all_registers); 6677 } 6678 6679 #ifndef PRODUCT 6680 6681 // Write pattern 0x0101010101010101 in region [low-before, high+after]. 6682 void MacroAssembler::zap_from_to(Register low, Register high, Register val, Register addr, int before, int after) { 6683 if (!ZapEmptyStackFields) return; 6684 BLOCK_COMMENT("zap memory region {"); 6685 load_const_optimized(val, 0x0101010101010101); 6686 int size = before + after; 6687 if (low == high && size < 5 && size > 0) { 6688 int offset = -before*BytesPerWord; 6689 for (int i = 0; i < size; ++i) { 6690 z_stg(val, Address(low, offset)); 6691 offset +=(1*BytesPerWord); 6692 } 6693 } else { 6694 add2reg(addr, -before*BytesPerWord, low); 6695 if (after) { 6696 #ifdef ASSERT 6697 jlong check = after * BytesPerWord; 6698 assert(Immediate::is_simm32(check) && Immediate::is_simm32(-check), "value not encodable !"); 6699 #endif 6700 add2reg(high, after * BytesPerWord); 6701 } 6702 NearLabel loop; 6703 bind(loop); 6704 z_stg(val, Address(addr)); 6705 add2reg(addr, 8); 6706 compare64_and_branch(addr, high, bcondNotHigh, loop); 6707 if (after) { 6708 add2reg(high, -after * BytesPerWord); 6709 } 6710 } 6711 BLOCK_COMMENT("} zap memory region"); 6712 } 6713 #endif // !PRODUCT 6714 6715 SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value, Register _rscratch) { 6716 _masm = masm; 6717 _masm->load_absolute_address(_rscratch, (address)flag_addr); 6718 _masm->load_and_test_int(_rscratch, Address(_rscratch)); 6719 if (value) { 6720 _masm->z_brne(_label); // Skip if true, i.e. != 0. 6721 } else { 6722 _masm->z_bre(_label); // Skip if false, i.e. == 0. 6723 } 6724 } 6725 6726 SkipIfEqual::~SkipIfEqual() { 6727 _masm->bind(_label); 6728 }