1 /* 2 * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/codeBuffer.hpp" 28 #include "asm/macroAssembler.inline.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "gc/shared/collectedHeap.inline.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "gc/shared/cardTableModRefBS.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/klass.inline.hpp" 36 #include "opto/compile.hpp" 37 #include "opto/intrinsicnode.hpp" 38 #include "opto/matcher.hpp" 39 #include "prims/methodHandles.hpp" 40 #include "registerSaver_s390.hpp" 41 #include "runtime/biasedLocking.hpp" 42 #include "runtime/icache.hpp" 43 #include "runtime/interfaceSupport.hpp" 44 #include "runtime/objectMonitor.hpp" 45 #include "runtime/os.hpp" 46 #include "runtime/sharedRuntime.hpp" 47 #include "runtime/stubRoutines.hpp" 48 #include "utilities/events.hpp" 49 #include "utilities/macros.hpp" 50 #if INCLUDE_ALL_GCS 51 #include "gc/g1/g1CollectedHeap.inline.hpp" 52 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 53 #include "gc/g1/heapRegion.hpp" 54 #endif 55 56 #include <ucontext.h> 57 58 #define BLOCK_COMMENT(str) block_comment(str) 59 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 60 61 // Move 32-bit register if destination and source are different. 62 void MacroAssembler::lr_if_needed(Register rd, Register rs) { 63 if (rs != rd) { z_lr(rd, rs); } 64 } 65 66 // Move register if destination and source are different. 67 void MacroAssembler::lgr_if_needed(Register rd, Register rs) { 68 if (rs != rd) { z_lgr(rd, rs); } 69 } 70 71 // Zero-extend 32-bit register into 64-bit register if destination and source are different. 72 void MacroAssembler::llgfr_if_needed(Register rd, Register rs) { 73 if (rs != rd) { z_llgfr(rd, rs); } 74 } 75 76 // Move float register if destination and source are different. 77 void MacroAssembler::ldr_if_needed(FloatRegister rd, FloatRegister rs) { 78 if (rs != rd) { z_ldr(rd, rs); } 79 } 80 81 // Move integer register if destination and source are different. 82 // It is assumed that shorter-than-int types are already 83 // appropriately sign-extended. 84 void MacroAssembler::move_reg_if_needed(Register dst, BasicType dst_type, Register src, 85 BasicType src_type) { 86 assert((dst_type != T_FLOAT) && (dst_type != T_DOUBLE), "use move_freg for float types"); 87 assert((src_type != T_FLOAT) && (src_type != T_DOUBLE), "use move_freg for float types"); 88 89 if (dst_type == src_type) { 90 lgr_if_needed(dst, src); // Just move all 64 bits. 91 return; 92 } 93 94 switch (dst_type) { 95 // Do not support these types for now. 96 // case T_BOOLEAN: 97 case T_BYTE: // signed byte 98 switch (src_type) { 99 case T_INT: 100 z_lgbr(dst, src); 101 break; 102 default: 103 ShouldNotReachHere(); 104 } 105 return; 106 107 case T_CHAR: 108 case T_SHORT: 109 switch (src_type) { 110 case T_INT: 111 if (dst_type == T_CHAR) { 112 z_llghr(dst, src); 113 } else { 114 z_lghr(dst, src); 115 } 116 break; 117 default: 118 ShouldNotReachHere(); 119 } 120 return; 121 122 case T_INT: 123 switch (src_type) { 124 case T_BOOLEAN: 125 case T_BYTE: 126 case T_CHAR: 127 case T_SHORT: 128 case T_INT: 129 case T_LONG: 130 case T_OBJECT: 131 case T_ARRAY: 132 case T_VOID: 133 case T_ADDRESS: 134 lr_if_needed(dst, src); 135 // llgfr_if_needed(dst, src); // zero-extend (in case we need to find a bug). 136 return; 137 138 default: 139 assert(false, "non-integer src type"); 140 return; 141 } 142 case T_LONG: 143 switch (src_type) { 144 case T_BOOLEAN: 145 case T_BYTE: 146 case T_CHAR: 147 case T_SHORT: 148 case T_INT: 149 z_lgfr(dst, src); // sign extension 150 return; 151 152 case T_LONG: 153 case T_OBJECT: 154 case T_ARRAY: 155 case T_VOID: 156 case T_ADDRESS: 157 lgr_if_needed(dst, src); 158 return; 159 160 default: 161 assert(false, "non-integer src type"); 162 return; 163 } 164 return; 165 case T_OBJECT: 166 case T_ARRAY: 167 case T_VOID: 168 case T_ADDRESS: 169 switch (src_type) { 170 // These types don't make sense to be converted to pointers: 171 // case T_BOOLEAN: 172 // case T_BYTE: 173 // case T_CHAR: 174 // case T_SHORT: 175 176 case T_INT: 177 z_llgfr(dst, src); // zero extension 178 return; 179 180 case T_LONG: 181 case T_OBJECT: 182 case T_ARRAY: 183 case T_VOID: 184 case T_ADDRESS: 185 lgr_if_needed(dst, src); 186 return; 187 188 default: 189 assert(false, "non-integer src type"); 190 return; 191 } 192 return; 193 default: 194 assert(false, "non-integer dst type"); 195 return; 196 } 197 } 198 199 // Move float register if destination and source are different. 200 void MacroAssembler::move_freg_if_needed(FloatRegister dst, BasicType dst_type, 201 FloatRegister src, BasicType src_type) { 202 assert((dst_type == T_FLOAT) || (dst_type == T_DOUBLE), "use move_reg for int types"); 203 assert((src_type == T_FLOAT) || (src_type == T_DOUBLE), "use move_reg for int types"); 204 if (dst_type == src_type) { 205 ldr_if_needed(dst, src); // Just move all 64 bits. 206 } else { 207 switch (dst_type) { 208 case T_FLOAT: 209 assert(src_type == T_DOUBLE, "invalid float type combination"); 210 z_ledbr(dst, src); 211 return; 212 case T_DOUBLE: 213 assert(src_type == T_FLOAT, "invalid float type combination"); 214 z_ldebr(dst, src); 215 return; 216 default: 217 assert(false, "non-float dst type"); 218 return; 219 } 220 } 221 } 222 223 // Optimized emitter for reg to mem operations. 224 // Uses modern instructions if running on modern hardware, classic instructions 225 // otherwise. Prefers (usually shorter) classic instructions if applicable. 226 // Data register (reg) cannot be used as work register. 227 // 228 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default). 229 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs! 230 void MacroAssembler::freg2mem_opt(FloatRegister reg, 231 int64_t disp, 232 Register index, 233 Register base, 234 void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register), 235 void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register), 236 Register scratch) { 237 index = (index == noreg) ? Z_R0 : index; 238 if (Displacement::is_shortDisp(disp)) { 239 (this->*classic)(reg, disp, index, base); 240 } else { 241 if (Displacement::is_validDisp(disp)) { 242 (this->*modern)(reg, disp, index, base); 243 } else { 244 if (scratch != Z_R0 && scratch != Z_R1) { 245 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 246 } else { 247 if (scratch != Z_R0) { // scratch == Z_R1 248 if ((scratch == index) || (index == base)) { 249 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 250 } else { 251 add2reg(scratch, disp, base); 252 (this->*classic)(reg, 0, index, scratch); 253 if (base == scratch) { 254 add2reg(base, -disp); // Restore base. 255 } 256 } 257 } else { // scratch == Z_R0 258 z_lgr(scratch, base); 259 add2reg(base, disp); 260 (this->*classic)(reg, 0, index, base); 261 z_lgr(base, scratch); // Restore base. 262 } 263 } 264 } 265 } 266 } 267 268 void MacroAssembler::freg2mem_opt(FloatRegister reg, const Address &a, bool is_double) { 269 if (is_double) { 270 freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stdy), CLASSIC_FFUN(z_std)); 271 } else { 272 freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stey), CLASSIC_FFUN(z_ste)); 273 } 274 } 275 276 // Optimized emitter for mem to reg operations. 277 // Uses modern instructions if running on modern hardware, classic instructions 278 // otherwise. Prefers (usually shorter) classic instructions if applicable. 279 // data register (reg) cannot be used as work register. 280 // 281 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default). 282 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs! 283 void MacroAssembler::mem2freg_opt(FloatRegister reg, 284 int64_t disp, 285 Register index, 286 Register base, 287 void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register), 288 void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register), 289 Register scratch) { 290 index = (index == noreg) ? Z_R0 : index; 291 if (Displacement::is_shortDisp(disp)) { 292 (this->*classic)(reg, disp, index, base); 293 } else { 294 if (Displacement::is_validDisp(disp)) { 295 (this->*modern)(reg, disp, index, base); 296 } else { 297 if (scratch != Z_R0 && scratch != Z_R1) { 298 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 299 } else { 300 if (scratch != Z_R0) { // scratch == Z_R1 301 if ((scratch == index) || (index == base)) { 302 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 303 } else { 304 add2reg(scratch, disp, base); 305 (this->*classic)(reg, 0, index, scratch); 306 if (base == scratch) { 307 add2reg(base, -disp); // Restore base. 308 } 309 } 310 } else { // scratch == Z_R0 311 z_lgr(scratch, base); 312 add2reg(base, disp); 313 (this->*classic)(reg, 0, index, base); 314 z_lgr(base, scratch); // Restore base. 315 } 316 } 317 } 318 } 319 } 320 321 void MacroAssembler::mem2freg_opt(FloatRegister reg, const Address &a, bool is_double) { 322 if (is_double) { 323 mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ldy), CLASSIC_FFUN(z_ld)); 324 } else { 325 mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ley), CLASSIC_FFUN(z_le)); 326 } 327 } 328 329 // Optimized emitter for reg to mem operations. 330 // Uses modern instructions if running on modern hardware, classic instructions 331 // otherwise. Prefers (usually shorter) classic instructions if applicable. 332 // Data register (reg) cannot be used as work register. 333 // 334 // Don't rely on register locking, instead pass a scratch register 335 // (Z_R0 by default) 336 // CAUTION! passing registers >= Z_R2 may produce bad results on old CPUs! 337 void MacroAssembler::reg2mem_opt(Register reg, 338 int64_t disp, 339 Register index, 340 Register base, 341 void (MacroAssembler::*modern) (Register, int64_t, Register, Register), 342 void (MacroAssembler::*classic)(Register, int64_t, Register, Register), 343 Register scratch) { 344 index = (index == noreg) ? Z_R0 : index; 345 if (Displacement::is_shortDisp(disp)) { 346 (this->*classic)(reg, disp, index, base); 347 } else { 348 if (Displacement::is_validDisp(disp)) { 349 (this->*modern)(reg, disp, index, base); 350 } else { 351 if (scratch != Z_R0 && scratch != Z_R1) { 352 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 353 } else { 354 if (scratch != Z_R0) { // scratch == Z_R1 355 if ((scratch == index) || (index == base)) { 356 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 357 } else { 358 add2reg(scratch, disp, base); 359 (this->*classic)(reg, 0, index, scratch); 360 if (base == scratch) { 361 add2reg(base, -disp); // Restore base. 362 } 363 } 364 } else { // scratch == Z_R0 365 if ((scratch == reg) || (scratch == base) || (reg == base)) { 366 (this->*modern)(reg, disp, index, base); // Will fail with disp out of range. 367 } else { 368 z_lgr(scratch, base); 369 add2reg(base, disp); 370 (this->*classic)(reg, 0, index, base); 371 z_lgr(base, scratch); // Restore base. 372 } 373 } 374 } 375 } 376 } 377 } 378 379 int MacroAssembler::reg2mem_opt(Register reg, const Address &a, bool is_double) { 380 int store_offset = offset(); 381 if (is_double) { 382 reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_stg), CLASSIC_IFUN(z_stg)); 383 } else { 384 reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_sty), CLASSIC_IFUN(z_st)); 385 } 386 return store_offset; 387 } 388 389 // Optimized emitter for mem to reg operations. 390 // Uses modern instructions if running on modern hardware, classic instructions 391 // otherwise. Prefers (usually shorter) classic instructions if applicable. 392 // Data register (reg) will be used as work register where possible. 393 void MacroAssembler::mem2reg_opt(Register reg, 394 int64_t disp, 395 Register index, 396 Register base, 397 void (MacroAssembler::*modern) (Register, int64_t, Register, Register), 398 void (MacroAssembler::*classic)(Register, int64_t, Register, Register)) { 399 index = (index == noreg) ? Z_R0 : index; 400 if (Displacement::is_shortDisp(disp)) { 401 (this->*classic)(reg, disp, index, base); 402 } else { 403 if (Displacement::is_validDisp(disp)) { 404 (this->*modern)(reg, disp, index, base); 405 } else { 406 if ((reg == index) && (reg == base)) { 407 z_sllg(reg, reg, 1); 408 add2reg(reg, disp); 409 (this->*classic)(reg, 0, noreg, reg); 410 } else if ((reg == index) && (reg != Z_R0)) { 411 add2reg(reg, disp); 412 (this->*classic)(reg, 0, reg, base); 413 } else if (reg == base) { 414 add2reg(reg, disp); 415 (this->*classic)(reg, 0, index, reg); 416 } else if (reg != Z_R0) { 417 add2reg(reg, disp, base); 418 (this->*classic)(reg, 0, index, reg); 419 } else { // reg == Z_R0 && reg != base here 420 add2reg(base, disp); 421 (this->*classic)(reg, 0, index, base); 422 add2reg(base, -disp); 423 } 424 } 425 } 426 } 427 428 void MacroAssembler::mem2reg_opt(Register reg, const Address &a, bool is_double) { 429 if (is_double) { 430 z_lg(reg, a); 431 } else { 432 mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_ly), CLASSIC_IFUN(z_l)); 433 } 434 } 435 436 void MacroAssembler::mem2reg_signed_opt(Register reg, const Address &a) { 437 mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_lgf), CLASSIC_IFUN(z_lgf)); 438 } 439 440 void MacroAssembler::and_imm(Register r, long mask, 441 Register tmp /* = Z_R0 */, 442 bool wide /* = false */) { 443 assert(wide || Immediate::is_simm32(mask), "mask value too large"); 444 445 if (!wide) { 446 z_nilf(r, mask); 447 return; 448 } 449 450 assert(r != tmp, " need a different temporary register !"); 451 load_const_optimized(tmp, mask); 452 z_ngr(r, tmp); 453 } 454 455 // Calculate the 1's complement. 456 // Note: The condition code is neither preserved nor correctly set by this code!!! 457 // Note: (wide == false) does not protect the high order half of the target register 458 // from alteration. It only serves as optimization hint for 32-bit results. 459 void MacroAssembler::not_(Register r1, Register r2, bool wide) { 460 461 if ((r2 == noreg) || (r2 == r1)) { // Calc 1's complement in place. 462 z_xilf(r1, -1); 463 if (wide) { 464 z_xihf(r1, -1); 465 } 466 } else { // Distinct src and dst registers. 467 if (VM_Version::has_DistinctOpnds()) { 468 load_const_optimized(r1, -1); 469 z_xgrk(r1, r2, r1); 470 } else { 471 if (wide) { 472 z_lgr(r1, r2); 473 z_xilf(r1, -1); 474 z_xihf(r1, -1); 475 } else { 476 z_lr(r1, r2); 477 z_xilf(r1, -1); 478 } 479 } 480 } 481 } 482 483 unsigned long MacroAssembler::create_mask(int lBitPos, int rBitPos) { 484 assert(lBitPos >= 0, "zero is leftmost bit position"); 485 assert(rBitPos <= 63, "63 is rightmost bit position"); 486 assert(lBitPos <= rBitPos, "inverted selection interval"); 487 return (lBitPos == 0 ? (unsigned long)(-1L) : ((1UL<<(63-lBitPos+1))-1)) & (~((1UL<<(63-rBitPos))-1)); 488 } 489 490 // Helper function for the "Rotate_then_<logicalOP>" emitters. 491 // Rotate src, then mask register contents such that only bits in range survive. 492 // For oneBits == false, all bits not in range are set to 0. Useful for deleting all bits outside range. 493 // For oneBits == true, all bits not in range are set to 1. Useful for preserving all bits outside range. 494 // The caller must ensure that the selected range only contains bits with defined value. 495 void MacroAssembler::rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos, 496 int nRotate, bool src32bit, bool dst32bit, bool oneBits) { 497 assert(!(dst32bit && lBitPos < 32), "selection interval out of range for int destination"); 498 bool sll4rll = (nRotate >= 0) && (nRotate <= (63-rBitPos)); // Substitute SLL(G) for RLL(G). 499 bool srl4rll = (nRotate < 0) && (-nRotate <= lBitPos); // Substitute SRL(G) for RLL(G). 500 // Pre-determine which parts of dst will be zero after shift/rotate. 501 bool llZero = sll4rll && (nRotate >= 16); 502 bool lhZero = (sll4rll && (nRotate >= 32)) || (srl4rll && (nRotate <= -48)); 503 bool lfZero = llZero && lhZero; 504 bool hlZero = (sll4rll && (nRotate >= 48)) || (srl4rll && (nRotate <= -32)); 505 bool hhZero = (srl4rll && (nRotate <= -16)); 506 bool hfZero = hlZero && hhZero; 507 508 // rotate then mask src operand. 509 // if oneBits == true, all bits outside selected range are 1s. 510 // if oneBits == false, all bits outside selected range are 0s. 511 if (src32bit) { // There might be garbage in the upper 32 bits which will get masked away. 512 if (dst32bit) { 513 z_rll(dst, src, nRotate); // Copy and rotate, upper half of reg remains undisturbed. 514 } else { 515 if (sll4rll) { z_sllg(dst, src, nRotate); } 516 else if (srl4rll) { z_srlg(dst, src, -nRotate); } 517 else { z_rllg(dst, src, nRotate); } 518 } 519 } else { 520 if (sll4rll) { z_sllg(dst, src, nRotate); } 521 else if (srl4rll) { z_srlg(dst, src, -nRotate); } 522 else { z_rllg(dst, src, nRotate); } 523 } 524 525 unsigned long range_mask = create_mask(lBitPos, rBitPos); 526 unsigned int range_mask_h = (unsigned int)(range_mask >> 32); 527 unsigned int range_mask_l = (unsigned int)range_mask; 528 unsigned short range_mask_hh = (unsigned short)(range_mask >> 48); 529 unsigned short range_mask_hl = (unsigned short)(range_mask >> 32); 530 unsigned short range_mask_lh = (unsigned short)(range_mask >> 16); 531 unsigned short range_mask_ll = (unsigned short)range_mask; 532 // Works for z9 and newer H/W. 533 if (oneBits) { 534 if ((~range_mask_l) != 0) { z_oilf(dst, ~range_mask_l); } // All bits outside range become 1s. 535 if (((~range_mask_h) != 0) && !dst32bit) { z_oihf(dst, ~range_mask_h); } 536 } else { 537 // All bits outside range become 0s 538 if (((~range_mask_l) != 0) && !lfZero) { 539 z_nilf(dst, range_mask_l); 540 } 541 if (((~range_mask_h) != 0) && !dst32bit && !hfZero) { 542 z_nihf(dst, range_mask_h); 543 } 544 } 545 } 546 547 // Rotate src, then insert selected range from rotated src into dst. 548 // Clear dst before, if requested. 549 void MacroAssembler::rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos, 550 int nRotate, bool clear_dst) { 551 // This version does not depend on src being zero-extended int2long. 552 nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. 553 z_risbg(dst, src, lBitPos, rBitPos, nRotate, clear_dst); // Rotate, then insert selected, clear the rest. 554 } 555 556 // Rotate src, then and selected range from rotated src into dst. 557 // Set condition code only if so requested. Otherwise it is unpredictable. 558 // See performance note in macroAssembler_s390.hpp for important information. 559 void MacroAssembler::rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos, 560 int nRotate, bool test_only) { 561 guarantee(!test_only, "Emitter not fit for test_only instruction variant."); 562 // This version does not depend on src being zero-extended int2long. 563 nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. 564 z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected. 565 } 566 567 // Rotate src, then or selected range from rotated src into dst. 568 // Set condition code only if so requested. Otherwise it is unpredictable. 569 // See performance note in macroAssembler_s390.hpp for important information. 570 void MacroAssembler::rotate_then_or(Register dst, Register src, int lBitPos, int rBitPos, 571 int nRotate, bool test_only) { 572 guarantee(!test_only, "Emitter not fit for test_only instruction variant."); 573 // This version does not depend on src being zero-extended int2long. 574 nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. 575 z_rosbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected. 576 } 577 578 // Rotate src, then xor selected range from rotated src into dst. 579 // Set condition code only if so requested. Otherwise it is unpredictable. 580 // See performance note in macroAssembler_s390.hpp for important information. 581 void MacroAssembler::rotate_then_xor(Register dst, Register src, int lBitPos, int rBitPos, 582 int nRotate, bool test_only) { 583 guarantee(!test_only, "Emitter not fit for test_only instruction variant."); 584 // This version does not depend on src being zero-extended int2long. 585 nRotate &= 0x003f; // For risbg, pretend it's an unsigned value. 586 z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected. 587 } 588 589 void MacroAssembler::add64(Register r1, RegisterOrConstant inc) { 590 if (inc.is_register()) { 591 z_agr(r1, inc.as_register()); 592 } else { // constant 593 intptr_t imm = inc.as_constant(); 594 add2reg(r1, imm); 595 } 596 } 597 // Helper function to multiply the 64bit contents of a register by a 16bit constant. 598 // The optimization tries to avoid the mghi instruction, since it uses the FPU for 599 // calculation and is thus rather slow. 600 // 601 // There is no handling for special cases, e.g. cval==0 or cval==1. 602 // 603 // Returns len of generated code block. 604 unsigned int MacroAssembler::mul_reg64_const16(Register rval, Register work, int cval) { 605 int block_start = offset(); 606 607 bool sign_flip = cval < 0; 608 cval = sign_flip ? -cval : cval; 609 610 BLOCK_COMMENT("Reg64*Con16 {"); 611 612 int bit1 = cval & -cval; 613 if (bit1 == cval) { 614 z_sllg(rval, rval, exact_log2(bit1)); 615 if (sign_flip) { z_lcgr(rval, rval); } 616 } else { 617 int bit2 = (cval-bit1) & -(cval-bit1); 618 if ((bit1+bit2) == cval) { 619 z_sllg(work, rval, exact_log2(bit1)); 620 z_sllg(rval, rval, exact_log2(bit2)); 621 z_agr(rval, work); 622 if (sign_flip) { z_lcgr(rval, rval); } 623 } else { 624 if (sign_flip) { z_mghi(rval, -cval); } 625 else { z_mghi(rval, cval); } 626 } 627 } 628 BLOCK_COMMENT("} Reg64*Con16"); 629 630 int block_end = offset(); 631 return block_end - block_start; 632 } 633 634 // Generic operation r1 := r2 + imm. 635 // 636 // Should produce the best code for each supported CPU version. 637 // r2 == noreg yields r1 := r1 + imm 638 // imm == 0 emits either no instruction or r1 := r2 ! 639 // NOTES: 1) Don't use this function where fixed sized 640 // instruction sequences are required!!! 641 // 2) Don't use this function if condition code 642 // setting is required! 643 // 3) Despite being declared as int64_t, the parameter imm 644 // must be a simm_32 value (= signed 32-bit integer). 645 void MacroAssembler::add2reg(Register r1, int64_t imm, Register r2) { 646 assert(Immediate::is_simm32(imm), "probably an implicit conversion went wrong"); 647 648 if (r2 == noreg) { r2 = r1; } 649 650 // Handle special case imm == 0. 651 if (imm == 0) { 652 lgr_if_needed(r1, r2); 653 // Nothing else to do. 654 return; 655 } 656 657 if (!PreferLAoverADD || (r2 == Z_R0)) { 658 bool distinctOpnds = VM_Version::has_DistinctOpnds(); 659 660 // Can we encode imm in 16 bits signed? 661 if (Immediate::is_simm16(imm)) { 662 if (r1 == r2) { 663 z_aghi(r1, imm); 664 return; 665 } 666 if (distinctOpnds) { 667 z_aghik(r1, r2, imm); 668 return; 669 } 670 z_lgr(r1, r2); 671 z_aghi(r1, imm); 672 return; 673 } 674 } else { 675 // Can we encode imm in 12 bits unsigned? 676 if (Displacement::is_shortDisp(imm)) { 677 z_la(r1, imm, r2); 678 return; 679 } 680 // Can we encode imm in 20 bits signed? 681 if (Displacement::is_validDisp(imm)) { 682 // Always use LAY instruction, so we don't need the tmp register. 683 z_lay(r1, imm, r2); 684 return; 685 } 686 687 } 688 689 // Can handle it (all possible values) with long immediates. 690 lgr_if_needed(r1, r2); 691 z_agfi(r1, imm); 692 } 693 694 // Generic operation r := b + x + d 695 // 696 // Addition of several operands with address generation semantics - sort of: 697 // - no restriction on the registers. Any register will do for any operand. 698 // - x == noreg: operand will be disregarded. 699 // - b == noreg: will use (contents of) result reg as operand (r := r + d). 700 // - x == Z_R0: just disregard 701 // - b == Z_R0: use as operand. This is not address generation semantics!!! 702 // 703 // The same restrictions as on add2reg() are valid!!! 704 void MacroAssembler::add2reg_with_index(Register r, int64_t d, Register x, Register b) { 705 assert(Immediate::is_simm32(d), "probably an implicit conversion went wrong"); 706 707 if (x == noreg) { x = Z_R0; } 708 if (b == noreg) { b = r; } 709 710 // Handle special case x == R0. 711 if (x == Z_R0) { 712 // Can simply add the immediate value to the base register. 713 add2reg(r, d, b); 714 return; 715 } 716 717 if (!PreferLAoverADD || (b == Z_R0)) { 718 bool distinctOpnds = VM_Version::has_DistinctOpnds(); 719 // Handle special case d == 0. 720 if (d == 0) { 721 if (b == x) { z_sllg(r, b, 1); return; } 722 if (r == x) { z_agr(r, b); return; } 723 if (r == b) { z_agr(r, x); return; } 724 if (distinctOpnds) { z_agrk(r, x, b); return; } 725 z_lgr(r, b); 726 z_agr(r, x); 727 } else { 728 if (x == b) { z_sllg(r, x, 1); } 729 else if (r == x) { z_agr(r, b); } 730 else if (r == b) { z_agr(r, x); } 731 else if (distinctOpnds) { z_agrk(r, x, b); } 732 else { 733 z_lgr(r, b); 734 z_agr(r, x); 735 } 736 add2reg(r, d); 737 } 738 } else { 739 // Can we encode imm in 12 bits unsigned? 740 if (Displacement::is_shortDisp(d)) { 741 z_la(r, d, x, b); 742 return; 743 } 744 // Can we encode imm in 20 bits signed? 745 if (Displacement::is_validDisp(d)) { 746 z_lay(r, d, x, b); 747 return; 748 } 749 z_la(r, 0, x, b); 750 add2reg(r, d); 751 } 752 } 753 754 // Generic emitter (32bit) for direct memory increment. 755 // For optimal code, do not specify Z_R0 as temp register. 756 void MacroAssembler::add2mem_32(const Address &a, int64_t imm, Register tmp) { 757 if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) { 758 z_asi(a, imm); 759 } else { 760 z_lgf(tmp, a); 761 add2reg(tmp, imm); 762 z_st(tmp, a); 763 } 764 } 765 766 void MacroAssembler::add2mem_64(const Address &a, int64_t imm, Register tmp) { 767 if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) { 768 z_agsi(a, imm); 769 } else { 770 z_lg(tmp, a); 771 add2reg(tmp, imm); 772 z_stg(tmp, a); 773 } 774 } 775 776 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) { 777 switch (size_in_bytes) { 778 case 8: z_lg(dst, src); break; 779 case 4: is_signed ? z_lgf(dst, src) : z_llgf(dst, src); break; 780 case 2: is_signed ? z_lgh(dst, src) : z_llgh(dst, src); break; 781 case 1: is_signed ? z_lgb(dst, src) : z_llgc(dst, src); break; 782 default: ShouldNotReachHere(); 783 } 784 } 785 786 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) { 787 switch (size_in_bytes) { 788 case 8: z_stg(src, dst); break; 789 case 4: z_st(src, dst); break; 790 case 2: z_sth(src, dst); break; 791 case 1: z_stc(src, dst); break; 792 default: ShouldNotReachHere(); 793 } 794 } 795 796 // Split a si20 offset (20bit, signed) into an ui12 offset (12bit, unsigned) and 797 // a high-order summand in register tmp. 798 // 799 // return value: < 0: No split required, si20 actually has property uimm12. 800 // >= 0: Split performed. Use return value as uimm12 displacement and 801 // tmp as index register. 802 int MacroAssembler::split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate) { 803 assert(Immediate::is_simm20(si20_offset), "sanity"); 804 int lg_off = (int)si20_offset & 0x0fff; // Punch out low-order 12 bits, always positive. 805 int ll_off = (int)si20_offset & ~0x0fff; // Force low-order 12 bits to zero. 806 assert((Displacement::is_shortDisp(si20_offset) && (ll_off == 0)) || 807 !Displacement::is_shortDisp(si20_offset), "unexpected offset values"); 808 assert((lg_off+ll_off) == si20_offset, "offset splitup error"); 809 810 Register work = accumulate? Z_R0 : tmp; 811 812 if (fixed_codelen) { // Len of code = 10 = 4 + 6. 813 z_lghi(work, ll_off>>12); // Implicit sign extension. 814 z_slag(work, work, 12); 815 } else { // Len of code = 0..10. 816 if (ll_off == 0) { return -1; } 817 // ll_off has 8 significant bits (at most) plus sign. 818 if ((ll_off & 0x0000f000) == 0) { // Non-zero bits only in upper halfbyte. 819 z_llilh(work, ll_off >> 16); 820 if (ll_off < 0) { // Sign-extension required. 821 z_lgfr(work, work); 822 } 823 } else { 824 if ((ll_off & 0x000f0000) == 0) { // Non-zero bits only in lower halfbyte. 825 z_llill(work, ll_off); 826 } else { // Non-zero bits in both halfbytes. 827 z_lghi(work, ll_off>>12); // Implicit sign extension. 828 z_slag(work, work, 12); 829 } 830 } 831 } 832 if (accumulate) { z_algr(tmp, work); } // len of code += 4 833 return lg_off; 834 } 835 836 void MacroAssembler::load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) { 837 if (Displacement::is_validDisp(si20)) { 838 z_ley(t, si20, a); 839 } else { 840 // Fixed_codelen = true is a simple way to ensure that the size of load_float_largeoffset 841 // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant 842 // pool loads). 843 bool accumulate = true; 844 bool fixed_codelen = true; 845 Register work; 846 847 if (fixed_codelen) { 848 z_lgr(tmp, a); // Lgr_if_needed not applicable due to fixed_codelen. 849 } else { 850 accumulate = (a == tmp); 851 } 852 work = tmp; 853 854 int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate); 855 if (disp12 < 0) { 856 z_le(t, si20, work); 857 } else { 858 if (accumulate) { 859 z_le(t, disp12, work); 860 } else { 861 z_le(t, disp12, work, a); 862 } 863 } 864 } 865 } 866 867 void MacroAssembler::load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) { 868 if (Displacement::is_validDisp(si20)) { 869 z_ldy(t, si20, a); 870 } else { 871 // Fixed_codelen = true is a simple way to ensure that the size of load_double_largeoffset 872 // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant 873 // pool loads). 874 bool accumulate = true; 875 bool fixed_codelen = true; 876 Register work; 877 878 if (fixed_codelen) { 879 z_lgr(tmp, a); // Lgr_if_needed not applicable due to fixed_codelen. 880 } else { 881 accumulate = (a == tmp); 882 } 883 work = tmp; 884 885 int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate); 886 if (disp12 < 0) { 887 z_ld(t, si20, work); 888 } else { 889 if (accumulate) { 890 z_ld(t, disp12, work); 891 } else { 892 z_ld(t, disp12, work, a); 893 } 894 } 895 } 896 } 897 898 // PCrelative TOC access. 899 // Returns distance (in bytes) from current position to start of consts section. 900 // Returns 0 (zero) if no consts section exists or if it has size zero. 901 long MacroAssembler::toc_distance() { 902 CodeSection* cs = code()->consts(); 903 return (long)((cs != NULL) ? cs->start()-pc() : 0); 904 } 905 906 // Implementation on x86/sparc assumes that constant and instruction section are 907 // adjacent, but this doesn't hold. Two special situations may occur, that we must 908 // be able to handle: 909 // 1. const section may be located apart from the inst section. 910 // 2. const section may be empty 911 // In both cases, we use the const section's start address to compute the "TOC", 912 // this seems to occur only temporarily; in the final step we always seem to end up 913 // with the pc-relatice variant. 914 // 915 // PC-relative offset could be +/-2**32 -> use long for disp 916 // Furthermore: makes no sense to have special code for 917 // adjacent const and inst sections. 918 void MacroAssembler::load_toc(Register Rtoc) { 919 // Simply use distance from start of const section (should be patched in the end). 920 long disp = toc_distance(); 921 922 RelocationHolder rspec = internal_word_Relocation::spec(pc() + disp); 923 relocate(rspec); 924 z_larl(Rtoc, RelAddr::pcrel_off32(disp)); // Offset is in halfwords. 925 } 926 927 // PCrelative TOC access. 928 // Load from anywhere pcrelative (with relocation of load instr) 929 void MacroAssembler::load_long_pcrelative(Register Rdst, address dataLocation) { 930 address pc = this->pc(); 931 ptrdiff_t total_distance = dataLocation - pc; 932 RelocationHolder rspec = internal_word_Relocation::spec(dataLocation); 933 934 assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory"); 935 assert(total_distance != 0, "sanity"); 936 937 // Some extra safety net. 938 if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) { 939 guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "too far away"); 940 } 941 942 (this)->relocate(rspec, relocInfo::pcrel_addr_format); 943 z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance)); 944 } 945 946 947 // PCrelative TOC access. 948 // Load from anywhere pcrelative (with relocation of load instr) 949 // loaded addr has to be relocated when added to constant pool. 950 void MacroAssembler::load_addr_pcrelative(Register Rdst, address addrLocation) { 951 address pc = this->pc(); 952 ptrdiff_t total_distance = addrLocation - pc; 953 RelocationHolder rspec = internal_word_Relocation::spec(addrLocation); 954 955 assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory"); 956 957 // Some extra safety net. 958 if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) { 959 guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "too far away"); 960 } 961 962 (this)->relocate(rspec, relocInfo::pcrel_addr_format); 963 z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance)); 964 } 965 966 // Generic operation: load a value from memory and test. 967 // CondCode indicates the sign (<0, ==0, >0) of the loaded value. 968 void MacroAssembler::load_and_test_byte(Register dst, const Address &a) { 969 z_lb(dst, a); 970 z_ltr(dst, dst); 971 } 972 973 void MacroAssembler::load_and_test_short(Register dst, const Address &a) { 974 int64_t disp = a.disp20(); 975 if (Displacement::is_shortDisp(disp)) { 976 z_lh(dst, a); 977 } else if (Displacement::is_longDisp(disp)) { 978 z_lhy(dst, a); 979 } else { 980 guarantee(false, "displacement out of range"); 981 } 982 z_ltr(dst, dst); 983 } 984 985 void MacroAssembler::load_and_test_int(Register dst, const Address &a) { 986 z_lt(dst, a); 987 } 988 989 void MacroAssembler::load_and_test_int2long(Register dst, const Address &a) { 990 z_ltgf(dst, a); 991 } 992 993 void MacroAssembler::load_and_test_long(Register dst, const Address &a) { 994 z_ltg(dst, a); 995 } 996 997 // Test a bit in memory. 998 void MacroAssembler::testbit(const Address &a, unsigned int bit) { 999 assert(a.index() == noreg, "no index reg allowed in testbit"); 1000 if (bit <= 7) { 1001 z_tm(a.disp() + 3, a.base(), 1 << bit); 1002 } else if (bit <= 15) { 1003 z_tm(a.disp() + 2, a.base(), 1 << (bit - 8)); 1004 } else if (bit <= 23) { 1005 z_tm(a.disp() + 1, a.base(), 1 << (bit - 16)); 1006 } else if (bit <= 31) { 1007 z_tm(a.disp() + 0, a.base(), 1 << (bit - 24)); 1008 } else { 1009 ShouldNotReachHere(); 1010 } 1011 } 1012 1013 // Test a bit in a register. Result is reflected in CC. 1014 void MacroAssembler::testbit(Register r, unsigned int bitPos) { 1015 if (bitPos < 16) { 1016 z_tmll(r, 1U<<bitPos); 1017 } else if (bitPos < 32) { 1018 z_tmlh(r, 1U<<(bitPos-16)); 1019 } else if (bitPos < 48) { 1020 z_tmhl(r, 1U<<(bitPos-32)); 1021 } else if (bitPos < 64) { 1022 z_tmhh(r, 1U<<(bitPos-48)); 1023 } else { 1024 ShouldNotReachHere(); 1025 } 1026 } 1027 1028 // Clear a register, i.e. load const zero into reg. 1029 // Return len (in bytes) of generated instruction(s). 1030 // whole_reg: Clear 64 bits if true, 32 bits otherwise. 1031 // set_cc: Use instruction that sets the condition code, if true. 1032 int MacroAssembler::clear_reg(Register r, bool whole_reg, bool set_cc) { 1033 unsigned int start_off = offset(); 1034 if (whole_reg) { 1035 set_cc ? z_xgr(r, r) : z_laz(r, 0, Z_R0); 1036 } else { // Only 32bit register. 1037 set_cc ? z_xr(r, r) : z_lhi(r, 0); 1038 } 1039 return offset() - start_off; 1040 } 1041 1042 #ifdef ASSERT 1043 int MacroAssembler::preset_reg(Register r, unsigned long pattern, int pattern_len) { 1044 switch (pattern_len) { 1045 case 1: 1046 pattern = (pattern & 0x000000ff) | ((pattern & 0x000000ff)<<8); 1047 case 2: 1048 pattern = (pattern & 0x0000ffff) | ((pattern & 0x0000ffff)<<16); 1049 case 4: 1050 pattern = (pattern & 0xffffffffL) | ((pattern & 0xffffffffL)<<32); 1051 case 8: 1052 return load_const_optimized_rtn_len(r, pattern, true); 1053 break; 1054 default: 1055 guarantee(false, "preset_reg: bad len"); 1056 } 1057 return 0; 1058 } 1059 #endif 1060 1061 // addr: Address descriptor of memory to clear index register will not be used ! 1062 // size: Number of bytes to clear. 1063 // !!! DO NOT USE THEM FOR ATOMIC MEMORY CLEARING !!! 1064 // !!! Use store_const() instead !!! 1065 void MacroAssembler::clear_mem(const Address& addr, unsigned size) { 1066 guarantee(size <= 256, "MacroAssembler::clear_mem: size too large"); 1067 1068 if (size == 1) { 1069 z_mvi(addr, 0); 1070 return; 1071 } 1072 1073 switch (size) { 1074 case 2: z_mvhhi(addr, 0); 1075 return; 1076 case 4: z_mvhi(addr, 0); 1077 return; 1078 case 8: z_mvghi(addr, 0); 1079 return; 1080 default: ; // Fallthru to xc. 1081 } 1082 1083 z_xc(addr, size, addr); 1084 } 1085 1086 void MacroAssembler::align(int modulus) { 1087 while (offset() % modulus != 0) z_nop(); 1088 } 1089 1090 // Special version for non-relocateable code if required alignment 1091 // is larger than CodeEntryAlignment. 1092 void MacroAssembler::align_address(int modulus) { 1093 while ((uintptr_t)pc() % modulus != 0) z_nop(); 1094 } 1095 1096 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 1097 Register temp_reg, 1098 int64_t extra_slot_offset) { 1099 // On Z, we can have index and disp in an Address. So don't call argument_offset, 1100 // which issues an unnecessary add instruction. 1101 int stackElementSize = Interpreter::stackElementSize; 1102 int64_t offset = extra_slot_offset * stackElementSize; 1103 const Register argbase = Z_esp; 1104 if (arg_slot.is_constant()) { 1105 offset += arg_slot.as_constant() * stackElementSize; 1106 return Address(argbase, offset); 1107 } 1108 // else 1109 assert(temp_reg != noreg, "must specify"); 1110 assert(temp_reg != Z_ARG1, "base and index are conflicting"); 1111 z_sllg(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize)); // tempreg = arg_slot << 3 1112 return Address(argbase, temp_reg, offset); 1113 } 1114 1115 1116 //=================================================================== 1117 //=== START C O N S T A N T S I N C O D E S T R E A M === 1118 //=================================================================== 1119 //=== P A T CH A B L E C O N S T A N T S === 1120 //=================================================================== 1121 1122 1123 //--------------------------------------------------- 1124 // Load (patchable) constant into register 1125 //--------------------------------------------------- 1126 1127 1128 // Load absolute address (and try to optimize). 1129 // Note: This method is usable only for position-fixed code, 1130 // referring to a position-fixed target location. 1131 // If not so, relocations and patching must be used. 1132 void MacroAssembler::load_absolute_address(Register d, address addr) { 1133 assert(addr != NULL, "should not happen"); 1134 BLOCK_COMMENT("load_absolute_address:"); 1135 if (addr == NULL) { 1136 z_larl(d, pc()); // Dummy emit for size calc. 1137 return; 1138 } 1139 1140 if (RelAddr::is_in_range_of_RelAddr32(addr, pc())) { 1141 z_larl(d, addr); 1142 return; 1143 } 1144 1145 load_const_optimized(d, (long)addr); 1146 } 1147 1148 // Load a 64bit constant. 1149 // Patchable code sequence, but not atomically patchable. 1150 // Make sure to keep code size constant -> no value-dependent optimizations. 1151 // Do not kill condition code. 1152 void MacroAssembler::load_const(Register t, long x) { 1153 Assembler::z_iihf(t, (int)(x >> 32)); 1154 Assembler::z_iilf(t, (int)(x & 0xffffffff)); 1155 } 1156 1157 // Load a 32bit constant into a 64bit register, sign-extend or zero-extend. 1158 // Patchable code sequence, but not atomically patchable. 1159 // Make sure to keep code size constant -> no value-dependent optimizations. 1160 // Do not kill condition code. 1161 void MacroAssembler::load_const_32to64(Register t, int64_t x, bool sign_extend) { 1162 if (sign_extend) { Assembler::z_lgfi(t, x); } 1163 else { Assembler::z_llilf(t, x); } 1164 } 1165 1166 // Load narrow oop constant, no decompression. 1167 void MacroAssembler::load_narrow_oop(Register t, narrowOop a) { 1168 assert(UseCompressedOops, "must be on to call this method"); 1169 load_const_32to64(t, a, false /*sign_extend*/); 1170 } 1171 1172 // Load narrow klass constant, compression required. 1173 void MacroAssembler::load_narrow_klass(Register t, Klass* k) { 1174 assert(UseCompressedClassPointers, "must be on to call this method"); 1175 narrowKlass encoded_k = Klass::encode_klass(k); 1176 load_const_32to64(t, encoded_k, false /*sign_extend*/); 1177 } 1178 1179 //------------------------------------------------------ 1180 // Compare (patchable) constant with register. 1181 //------------------------------------------------------ 1182 1183 // Compare narrow oop in reg with narrow oop constant, no decompression. 1184 void MacroAssembler::compare_immediate_narrow_oop(Register oop1, narrowOop oop2) { 1185 assert(UseCompressedOops, "must be on to call this method"); 1186 1187 Assembler::z_clfi(oop1, oop2); 1188 } 1189 1190 // Compare narrow oop in reg with narrow oop constant, no decompression. 1191 void MacroAssembler::compare_immediate_narrow_klass(Register klass1, Klass* klass2) { 1192 assert(UseCompressedClassPointers, "must be on to call this method"); 1193 narrowKlass encoded_k = Klass::encode_klass(klass2); 1194 1195 Assembler::z_clfi(klass1, encoded_k); 1196 } 1197 1198 //---------------------------------------------------------- 1199 // Check which kind of load_constant we have here. 1200 //---------------------------------------------------------- 1201 1202 // Detection of CPU version dependent load_const sequence. 1203 // The detection is valid only for code sequences generated by load_const, 1204 // not load_const_optimized. 1205 bool MacroAssembler::is_load_const(address a) { 1206 unsigned long inst1, inst2; 1207 unsigned int len1, len2; 1208 1209 len1 = get_instruction(a, &inst1); 1210 len2 = get_instruction(a + len1, &inst2); 1211 1212 return is_z_iihf(inst1) && is_z_iilf(inst2); 1213 } 1214 1215 // Detection of CPU version dependent load_const_32to64 sequence. 1216 // Mostly used for narrow oops and narrow Klass pointers. 1217 // The detection is valid only for code sequences generated by load_const_32to64. 1218 bool MacroAssembler::is_load_const_32to64(address pos) { 1219 unsigned long inst1, inst2; 1220 unsigned int len1; 1221 1222 len1 = get_instruction(pos, &inst1); 1223 return is_z_llilf(inst1); 1224 } 1225 1226 // Detection of compare_immediate_narrow sequence. 1227 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop. 1228 bool MacroAssembler::is_compare_immediate32(address pos) { 1229 return is_equal(pos, CLFI_ZOPC, RIL_MASK); 1230 } 1231 1232 // Detection of compare_immediate_narrow sequence. 1233 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop. 1234 bool MacroAssembler::is_compare_immediate_narrow_oop(address pos) { 1235 return is_compare_immediate32(pos); 1236 } 1237 1238 // Detection of compare_immediate_narrow sequence. 1239 // The detection is valid only for code sequences generated by compare_immediate_narrow_klass. 1240 bool MacroAssembler::is_compare_immediate_narrow_klass(address pos) { 1241 return is_compare_immediate32(pos); 1242 } 1243 1244 //----------------------------------- 1245 // patch the load_constant 1246 //----------------------------------- 1247 1248 // CPU-version dependend patching of load_const. 1249 void MacroAssembler::patch_const(address a, long x) { 1250 assert(is_load_const(a), "not a load of a constant"); 1251 set_imm32((address)a, (int) ((x >> 32) & 0xffffffff)); 1252 set_imm32((address)(a + 6), (int)(x & 0xffffffff)); 1253 } 1254 1255 // Patching the value of CPU version dependent load_const_32to64 sequence. 1256 // The passed ptr MUST be in compressed format! 1257 int MacroAssembler::patch_load_const_32to64(address pos, int64_t np) { 1258 assert(is_load_const_32to64(pos), "not a load of a narrow ptr (oop or klass)"); 1259 1260 set_imm32(pos, np); 1261 return 6; 1262 } 1263 1264 // Patching the value of CPU version dependent compare_immediate_narrow sequence. 1265 // The passed ptr MUST be in compressed format! 1266 int MacroAssembler::patch_compare_immediate_32(address pos, int64_t np) { 1267 assert(is_compare_immediate32(pos), "not a compressed ptr compare"); 1268 1269 set_imm32(pos, np); 1270 return 6; 1271 } 1272 1273 // Patching the immediate value of CPU version dependent load_narrow_oop sequence. 1274 // The passed ptr must NOT be in compressed format! 1275 int MacroAssembler::patch_load_narrow_oop(address pos, oop o) { 1276 assert(UseCompressedOops, "Can only patch compressed oops"); 1277 1278 narrowOop no = oopDesc::encode_heap_oop(o); 1279 return patch_load_const_32to64(pos, no); 1280 } 1281 1282 // Patching the immediate value of CPU version dependent load_narrow_klass sequence. 1283 // The passed ptr must NOT be in compressed format! 1284 int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) { 1285 assert(UseCompressedClassPointers, "Can only patch compressed klass pointers"); 1286 1287 narrowKlass nk = Klass::encode_klass(k); 1288 return patch_load_const_32to64(pos, nk); 1289 } 1290 1291 // Patching the immediate value of CPU version dependent compare_immediate_narrow_oop sequence. 1292 // The passed ptr must NOT be in compressed format! 1293 int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) { 1294 assert(UseCompressedOops, "Can only patch compressed oops"); 1295 1296 narrowOop no = oopDesc::encode_heap_oop(o); 1297 return patch_compare_immediate_32(pos, no); 1298 } 1299 1300 // Patching the immediate value of CPU version dependent compare_immediate_narrow_klass sequence. 1301 // The passed ptr must NOT be in compressed format! 1302 int MacroAssembler::patch_compare_immediate_narrow_klass(address pos, Klass* k) { 1303 assert(UseCompressedClassPointers, "Can only patch compressed klass pointers"); 1304 1305 narrowKlass nk = Klass::encode_klass(k); 1306 return patch_compare_immediate_32(pos, nk); 1307 } 1308 1309 //------------------------------------------------------------------------ 1310 // Extract the constant from a load_constant instruction stream. 1311 //------------------------------------------------------------------------ 1312 1313 // Get constant from a load_const sequence. 1314 long MacroAssembler::get_const(address a) { 1315 assert(is_load_const(a), "not a load of a constant"); 1316 unsigned long x; 1317 x = (((unsigned long) (get_imm32(a,0) & 0xffffffff)) << 32); 1318 x |= (((unsigned long) (get_imm32(a,1) & 0xffffffff))); 1319 return (long) x; 1320 } 1321 1322 //-------------------------------------- 1323 // Store a constant in memory. 1324 //-------------------------------------- 1325 1326 // General emitter to move a constant to memory. 1327 // The store is atomic. 1328 // o Address must be given in RS format (no index register) 1329 // o Displacement should be 12bit unsigned for efficiency. 20bit signed also supported. 1330 // o Constant can be 1, 2, 4, or 8 bytes, signed or unsigned. 1331 // o Memory slot can be 1, 2, 4, or 8 bytes, signed or unsigned. 1332 // o Memory slot must be at least as wide as constant, will assert otherwise. 1333 // o Signed constants will sign-extend, unsigned constants will zero-extend to slot width. 1334 int MacroAssembler::store_const(const Address &dest, long imm, 1335 unsigned int lm, unsigned int lc, 1336 Register scratch) { 1337 int64_t disp = dest.disp(); 1338 Register base = dest.base(); 1339 assert(!dest.has_index(), "not supported"); 1340 assert((lm==1)||(lm==2)||(lm==4)||(lm==8), "memory length not supported"); 1341 assert((lc==1)||(lc==2)||(lc==4)||(lc==8), "constant length not supported"); 1342 assert(lm>=lc, "memory slot too small"); 1343 assert(lc==8 || Immediate::is_simm(imm, lc*8), "const out of range"); 1344 assert(Displacement::is_validDisp(disp), "displacement out of range"); 1345 1346 bool is_shortDisp = Displacement::is_shortDisp(disp); 1347 int store_offset = -1; 1348 1349 // For target len == 1 it's easy. 1350 if (lm == 1) { 1351 store_offset = offset(); 1352 if (is_shortDisp) { 1353 z_mvi(disp, base, imm); 1354 return store_offset; 1355 } else { 1356 z_mviy(disp, base, imm); 1357 return store_offset; 1358 } 1359 } 1360 1361 // All the "good stuff" takes an unsigned displacement. 1362 if (is_shortDisp) { 1363 // NOTE: Cannot use clear_mem for imm==0, because it is not atomic. 1364 1365 store_offset = offset(); 1366 switch (lm) { 1367 case 2: // Lc == 1 handled correctly here, even for unsigned. Instruction does no widening. 1368 z_mvhhi(disp, base, imm); 1369 return store_offset; 1370 case 4: 1371 if (Immediate::is_simm16(imm)) { 1372 z_mvhi(disp, base, imm); 1373 return store_offset; 1374 } 1375 break; 1376 case 8: 1377 if (Immediate::is_simm16(imm)) { 1378 z_mvghi(disp, base, imm); 1379 return store_offset; 1380 } 1381 break; 1382 default: 1383 ShouldNotReachHere(); 1384 break; 1385 } 1386 } 1387 1388 // Can't optimize, so load value and store it. 1389 guarantee(scratch != noreg, " need a scratch register here !"); 1390 if (imm != 0) { 1391 load_const_optimized(scratch, imm); // Preserves CC anyway. 1392 } else { 1393 // Leave CC alone!! 1394 (void) clear_reg(scratch, true, false); // Indicate unused result. 1395 } 1396 1397 store_offset = offset(); 1398 if (is_shortDisp) { 1399 switch (lm) { 1400 case 2: 1401 z_sth(scratch, disp, Z_R0, base); 1402 return store_offset; 1403 case 4: 1404 z_st(scratch, disp, Z_R0, base); 1405 return store_offset; 1406 case 8: 1407 z_stg(scratch, disp, Z_R0, base); 1408 return store_offset; 1409 default: 1410 ShouldNotReachHere(); 1411 break; 1412 } 1413 } else { 1414 switch (lm) { 1415 case 2: 1416 z_sthy(scratch, disp, Z_R0, base); 1417 return store_offset; 1418 case 4: 1419 z_sty(scratch, disp, Z_R0, base); 1420 return store_offset; 1421 case 8: 1422 z_stg(scratch, disp, Z_R0, base); 1423 return store_offset; 1424 default: 1425 ShouldNotReachHere(); 1426 break; 1427 } 1428 } 1429 return -1; // should not reach here 1430 } 1431 1432 //=================================================================== 1433 //=== N O T P A T CH A B L E C O N S T A N T S === 1434 //=================================================================== 1435 1436 // Load constant x into register t with a fast instrcution sequence 1437 // depending on the bits in x. Preserves CC under all circumstances. 1438 int MacroAssembler::load_const_optimized_rtn_len(Register t, long x, bool emit) { 1439 if (x == 0) { 1440 int len; 1441 if (emit) { 1442 len = clear_reg(t, true, false); 1443 } else { 1444 len = 4; 1445 } 1446 return len; 1447 } 1448 1449 if (Immediate::is_simm16(x)) { 1450 if (emit) { z_lghi(t, x); } 1451 return 4; 1452 } 1453 1454 // 64 bit value: | part1 | part2 | part3 | part4 | 1455 // At least one part is not zero! 1456 int part1 = ((x >> 32) & 0xffff0000) >> 16; 1457 int part2 = (x >> 32) & 0x0000ffff; 1458 int part3 = (x & 0xffff0000) >> 16; 1459 int part4 = (x & 0x0000ffff); 1460 1461 // Lower word only (unsigned). 1462 if ((part1 == 0) && (part2 == 0)) { 1463 if (part3 == 0) { 1464 if (emit) z_llill(t, part4); 1465 return 4; 1466 } 1467 if (part4 == 0) { 1468 if (emit) z_llilh(t, part3); 1469 return 4; 1470 } 1471 if (emit) z_llilf(t, (int)(x & 0xffffffff)); 1472 return 6; 1473 } 1474 1475 // Upper word only. 1476 if ((part3 == 0) && (part4 == 0)) { 1477 if (part1 == 0) { 1478 if (emit) z_llihl(t, part2); 1479 return 4; 1480 } 1481 if (part2 == 0) { 1482 if (emit) z_llihh(t, part1); 1483 return 4; 1484 } 1485 if (emit) z_llihf(t, (int)(x >> 32)); 1486 return 6; 1487 } 1488 1489 // Lower word only (signed). 1490 if ((part1 == 0x0000ffff) && (part2 == 0x0000ffff) && ((part3 & 0x00008000) != 0)) { 1491 if (emit) z_lgfi(t, (int)(x & 0xffffffff)); 1492 return 6; 1493 } 1494 1495 int len = 0; 1496 1497 if ((part1 == 0) || (part2 == 0)) { 1498 if (part1 == 0) { 1499 if (emit) z_llihl(t, part2); 1500 len += 4; 1501 } else { 1502 if (emit) z_llihh(t, part1); 1503 len += 4; 1504 } 1505 } else { 1506 if (emit) z_llihf(t, (int)(x >> 32)); 1507 len += 6; 1508 } 1509 1510 if ((part3 == 0) || (part4 == 0)) { 1511 if (part3 == 0) { 1512 if (emit) z_iill(t, part4); 1513 len += 4; 1514 } else { 1515 if (emit) z_iilh(t, part3); 1516 len += 4; 1517 } 1518 } else { 1519 if (emit) z_iilf(t, (int)(x & 0xffffffff)); 1520 len += 6; 1521 } 1522 return len; 1523 } 1524 1525 //===================================================================== 1526 //=== H I G H E R L E V E L B R A N C H E M I T T E R S === 1527 //===================================================================== 1528 1529 // Note: In the worst case, one of the scratch registers is destroyed!!! 1530 void MacroAssembler::compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { 1531 // Right operand is constant. 1532 if (x2.is_constant()) { 1533 jlong value = x2.as_constant(); 1534 compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/true); 1535 return; 1536 } 1537 1538 // Right operand is in register. 1539 compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/true); 1540 } 1541 1542 // Note: In the worst case, one of the scratch registers is destroyed!!! 1543 void MacroAssembler::compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { 1544 // Right operand is constant. 1545 if (x2.is_constant()) { 1546 jlong value = x2.as_constant(); 1547 compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/false); 1548 return; 1549 } 1550 1551 // Right operand is in register. 1552 compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/false); 1553 } 1554 1555 // Note: In the worst case, one of the scratch registers is destroyed!!! 1556 void MacroAssembler::compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { 1557 // Right operand is constant. 1558 if (x2.is_constant()) { 1559 jlong value = x2.as_constant(); 1560 compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/true); 1561 return; 1562 } 1563 1564 // Right operand is in register. 1565 compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/true); 1566 } 1567 1568 void MacroAssembler::compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) { 1569 // Right operand is constant. 1570 if (x2.is_constant()) { 1571 jlong value = x2.as_constant(); 1572 compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/false); 1573 return; 1574 } 1575 1576 // Right operand is in register. 1577 compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/false); 1578 } 1579 1580 // Generate an optimal branch to the branch target. 1581 // Optimal means that a relative branch (brc or brcl) is used if the 1582 // branch distance is short enough. Loading the target address into a 1583 // register and branching via reg is used as fallback only. 1584 // 1585 // Used registers: 1586 // Z_R1 - work reg. Holds branch target address. 1587 // Used in fallback case only. 1588 // 1589 // This version of branch_optimized is good for cases where the target address is known 1590 // and constant, i.e. is never changed (no relocation, no patching). 1591 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, address branch_addr) { 1592 address branch_origin = pc(); 1593 1594 if (RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) { 1595 z_brc(cond, branch_addr); 1596 } else if (RelAddr::is_in_range_of_RelAddr32(branch_addr, branch_origin)) { 1597 z_brcl(cond, branch_addr); 1598 } else { 1599 load_const_optimized(Z_R1, branch_addr); // CC must not get killed by load_const_optimized. 1600 z_bcr(cond, Z_R1); 1601 } 1602 } 1603 1604 // This version of branch_optimized is good for cases where the target address 1605 // is potentially not yet known at the time the code is emitted. 1606 // 1607 // One very common case is a branch to an unbound label which is handled here. 1608 // The caller might know (or hope) that the branch distance is short enough 1609 // to be encoded in a 16bit relative address. In this case he will pass a 1610 // NearLabel branch_target. 1611 // Care must be taken with unbound labels. Each call to target(label) creates 1612 // an entry in the patch queue for that label to patch all references of the label 1613 // once it gets bound. Those recorded patch locations must be patchable. Otherwise, 1614 // an assertion fires at patch time. 1615 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& branch_target) { 1616 if (branch_target.is_bound()) { 1617 address branch_addr = target(branch_target); 1618 branch_optimized(cond, branch_addr); 1619 } else { 1620 z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time. 1621 } 1622 } 1623 1624 // Generate an optimal compare and branch to the branch target. 1625 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the 1626 // branch distance is short enough. Loading the target address into a 1627 // register and branching via reg is used as fallback only. 1628 // 1629 // Input: 1630 // r1 - left compare operand 1631 // r2 - right compare operand 1632 void MacroAssembler::compare_and_branch_optimized(Register r1, 1633 Register r2, 1634 Assembler::branch_condition cond, 1635 address branch_addr, 1636 bool len64, 1637 bool has_sign) { 1638 unsigned int casenum = (len64?2:0)+(has_sign?0:1); 1639 1640 address branch_origin = pc(); 1641 if (VM_Version::has_CompareBranch() && RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) { 1642 switch (casenum) { 1643 case 0: z_crj( r1, r2, cond, branch_addr); break; 1644 case 1: z_clrj (r1, r2, cond, branch_addr); break; 1645 case 2: z_cgrj(r1, r2, cond, branch_addr); break; 1646 case 3: z_clgrj(r1, r2, cond, branch_addr); break; 1647 default: ShouldNotReachHere(); break; 1648 } 1649 } else { 1650 switch (casenum) { 1651 case 0: z_cr( r1, r2); break; 1652 case 1: z_clr(r1, r2); break; 1653 case 2: z_cgr(r1, r2); break; 1654 case 3: z_clgr(r1, r2); break; 1655 default: ShouldNotReachHere(); break; 1656 } 1657 branch_optimized(cond, branch_addr); 1658 } 1659 } 1660 1661 // Generate an optimal compare and branch to the branch target. 1662 // Optimal means that a relative branch (clgij, brc or brcl) is used if the 1663 // branch distance is short enough. Loading the target address into a 1664 // register and branching via reg is used as fallback only. 1665 // 1666 // Input: 1667 // r1 - left compare operand (in register) 1668 // x2 - right compare operand (immediate) 1669 void MacroAssembler::compare_and_branch_optimized(Register r1, 1670 jlong x2, 1671 Assembler::branch_condition cond, 1672 Label& branch_target, 1673 bool len64, 1674 bool has_sign) { 1675 address branch_origin = pc(); 1676 bool x2_imm8 = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2)); 1677 bool is_RelAddr16 = (branch_target.is_bound() && 1678 RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin)); 1679 unsigned int casenum = (len64?2:0)+(has_sign?0:1); 1680 1681 if (VM_Version::has_CompareBranch() && is_RelAddr16 && x2_imm8) { 1682 switch (casenum) { 1683 case 0: z_cij( r1, x2, cond, branch_target); break; 1684 case 1: z_clij(r1, x2, cond, branch_target); break; 1685 case 2: z_cgij(r1, x2, cond, branch_target); break; 1686 case 3: z_clgij(r1, x2, cond, branch_target); break; 1687 default: ShouldNotReachHere(); break; 1688 } 1689 return; 1690 } 1691 1692 if (x2 == 0) { 1693 switch (casenum) { 1694 case 0: z_ltr(r1, r1); break; 1695 case 1: z_ltr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication! 1696 case 2: z_ltgr(r1, r1); break; 1697 case 3: z_ltgr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication! 1698 default: ShouldNotReachHere(); break; 1699 } 1700 } else { 1701 if ((has_sign && Immediate::is_simm16(x2)) || (!has_sign && Immediate::is_uimm(x2, 15))) { 1702 switch (casenum) { 1703 case 0: z_chi(r1, x2); break; 1704 case 1: z_chi(r1, x2); break; // positive immediate < 2**15 1705 case 2: z_cghi(r1, x2); break; 1706 case 3: z_cghi(r1, x2); break; // positive immediate < 2**15 1707 default: break; 1708 } 1709 } else if ( (has_sign && Immediate::is_simm32(x2)) || (!has_sign && Immediate::is_uimm32(x2)) ) { 1710 switch (casenum) { 1711 case 0: z_cfi( r1, x2); break; 1712 case 1: z_clfi(r1, x2); break; 1713 case 2: z_cgfi(r1, x2); break; 1714 case 3: z_clgfi(r1, x2); break; 1715 default: ShouldNotReachHere(); break; 1716 } 1717 } else { 1718 // No instruction with immediate operand possible, so load into register. 1719 Register scratch = (r1 != Z_R0) ? Z_R0 : Z_R1; 1720 load_const_optimized(scratch, x2); 1721 switch (casenum) { 1722 case 0: z_cr( r1, scratch); break; 1723 case 1: z_clr(r1, scratch); break; 1724 case 2: z_cgr(r1, scratch); break; 1725 case 3: z_clgr(r1, scratch); break; 1726 default: ShouldNotReachHere(); break; 1727 } 1728 } 1729 } 1730 branch_optimized(cond, branch_target); 1731 } 1732 1733 // Generate an optimal compare and branch to the branch target. 1734 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the 1735 // branch distance is short enough. Loading the target address into a 1736 // register and branching via reg is used as fallback only. 1737 // 1738 // Input: 1739 // r1 - left compare operand 1740 // r2 - right compare operand 1741 void MacroAssembler::compare_and_branch_optimized(Register r1, 1742 Register r2, 1743 Assembler::branch_condition cond, 1744 Label& branch_target, 1745 bool len64, 1746 bool has_sign) { 1747 unsigned int casenum = (len64?2:0)+(has_sign?0:1); 1748 1749 if (branch_target.is_bound()) { 1750 address branch_addr = target(branch_target); 1751 compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign); 1752 } else { 1753 { 1754 switch (casenum) { 1755 case 0: z_cr( r1, r2); break; 1756 case 1: z_clr(r1, r2); break; 1757 case 2: z_cgr(r1, r2); break; 1758 case 3: z_clgr(r1, r2); break; 1759 default: ShouldNotReachHere(); break; 1760 } 1761 branch_optimized(cond, branch_target); 1762 } 1763 } 1764 } 1765 1766 //=========================================================================== 1767 //=== END H I G H E R L E V E L B R A N C H E M I T T E R S === 1768 //=========================================================================== 1769 1770 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) { 1771 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1772 int index = oop_recorder()->allocate_metadata_index(obj); 1773 RelocationHolder rspec = metadata_Relocation::spec(index); 1774 return AddressLiteral((address)obj, rspec); 1775 } 1776 1777 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) { 1778 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1779 int index = oop_recorder()->find_index(obj); 1780 RelocationHolder rspec = metadata_Relocation::spec(index); 1781 return AddressLiteral((address)obj, rspec); 1782 } 1783 1784 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) { 1785 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1786 int oop_index = oop_recorder()->allocate_oop_index(obj); 1787 return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); 1788 } 1789 1790 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) { 1791 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); 1792 int oop_index = oop_recorder()->find_index(obj); 1793 return AddressLiteral(address(obj), oop_Relocation::spec(oop_index)); 1794 } 1795 1796 // NOTE: destroys r 1797 void MacroAssembler::c2bool(Register r, Register t) { 1798 z_lcr(t, r); // t = -r 1799 z_or(r, t); // r = -r OR r 1800 z_srl(r, 31); // Yields 0 if r was 0, 1 otherwise. 1801 } 1802 1803 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 1804 Register tmp, 1805 int offset) { 1806 intptr_t value = *delayed_value_addr; 1807 if (value != 0) { 1808 return RegisterOrConstant(value + offset); 1809 } 1810 1811 BLOCK_COMMENT("delayed_value {"); 1812 // Load indirectly to solve generation ordering problem. 1813 load_absolute_address(tmp, (address) delayed_value_addr); // tmp = a; 1814 z_lg(tmp, 0, tmp); // tmp = *tmp; 1815 1816 #ifdef ASSERT 1817 NearLabel L; 1818 compare64_and_branch(tmp, (intptr_t)0L, Assembler::bcondNotEqual, L); 1819 z_illtrap(); 1820 bind(L); 1821 #endif 1822 1823 if (offset != 0) { 1824 z_agfi(tmp, offset); // tmp = tmp + offset; 1825 } 1826 1827 BLOCK_COMMENT("} delayed_value"); 1828 return RegisterOrConstant(tmp); 1829 } 1830 1831 // Patch instruction `inst' at offset `inst_pos' to refer to `dest_pos' 1832 // and return the resulting instruction. 1833 // Dest_pos and inst_pos are 32 bit only. These parms can only designate 1834 // relative positions. 1835 // Use correct argument types. Do not pre-calculate distance. 1836 unsigned long MacroAssembler::patched_branch(address dest_pos, unsigned long inst, address inst_pos) { 1837 int c = 0; 1838 unsigned long patched_inst = 0; 1839 if (is_call_pcrelative_short(inst) || 1840 is_branch_pcrelative_short(inst) || 1841 is_branchoncount_pcrelative_short(inst) || 1842 is_branchonindex32_pcrelative_short(inst)) { 1843 c = 1; 1844 int m = fmask(15, 0); // simm16(-1, 16, 32); 1845 int v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 32); 1846 patched_inst = (inst & ~m) | v; 1847 } else if (is_compareandbranch_pcrelative_short(inst)) { 1848 c = 2; 1849 long m = fmask(31, 16); // simm16(-1, 16, 48); 1850 long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48); 1851 patched_inst = (inst & ~m) | v; 1852 } else if (is_branchonindex64_pcrelative_short(inst)) { 1853 c = 3; 1854 long m = fmask(31, 16); // simm16(-1, 16, 48); 1855 long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48); 1856 patched_inst = (inst & ~m) | v; 1857 } else if (is_call_pcrelative_long(inst) || is_branch_pcrelative_long(inst)) { 1858 c = 4; 1859 long m = fmask(31, 0); // simm32(-1, 16, 48); 1860 long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48); 1861 patched_inst = (inst & ~m) | v; 1862 } else if (is_pcrelative_long(inst)) { // These are the non-branch pc-relative instructions. 1863 c = 5; 1864 long m = fmask(31, 0); // simm32(-1, 16, 48); 1865 long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48); 1866 patched_inst = (inst & ~m) | v; 1867 } else { 1868 print_dbg_msg(tty, inst, "not a relative branch", 0); 1869 dump_code_range(tty, inst_pos, 32, "not a pcrelative branch"); 1870 ShouldNotReachHere(); 1871 } 1872 1873 long new_off = get_pcrel_offset(patched_inst); 1874 if (new_off != (dest_pos-inst_pos)) { 1875 tty->print_cr("case %d: dest_pos = %p, inst_pos = %p, disp = %ld(%12.12lx)", c, dest_pos, inst_pos, new_off, new_off); 1876 print_dbg_msg(tty, inst, "<- original instruction: branch patching error", 0); 1877 print_dbg_msg(tty, patched_inst, "<- patched instruction: branch patching error", 0); 1878 #ifdef LUCY_DBG 1879 VM_Version::z_SIGSEGV(); 1880 #endif 1881 ShouldNotReachHere(); 1882 } 1883 return patched_inst; 1884 } 1885 1886 // Only called when binding labels (share/vm/asm/assembler.cpp) 1887 // Pass arguments as intended. Do not pre-calculate distance. 1888 void MacroAssembler::pd_patch_instruction(address branch, address target) { 1889 unsigned long stub_inst; 1890 int inst_len = get_instruction(branch, &stub_inst); 1891 1892 set_instruction(branch, patched_branch(target, stub_inst, branch), inst_len); 1893 } 1894 1895 1896 // Extract relative address (aka offset). 1897 // inv_simm16 works for 4-byte instructions only. 1898 // compare and branch instructions are 6-byte and have a 16bit offset "in the middle". 1899 long MacroAssembler::get_pcrel_offset(unsigned long inst) { 1900 1901 if (MacroAssembler::is_pcrelative_short(inst)) { 1902 if (((inst&0xFFFFffff00000000UL) == 0) && ((inst&0x00000000FFFF0000UL) != 0)) { 1903 return RelAddr::inv_pcrel_off16(inv_simm16(inst)); 1904 } else { 1905 return RelAddr::inv_pcrel_off16(inv_simm16_48(inst)); 1906 } 1907 } 1908 1909 if (MacroAssembler::is_pcrelative_long(inst)) { 1910 return RelAddr::inv_pcrel_off32(inv_simm32(inst)); 1911 } 1912 1913 print_dbg_msg(tty, inst, "not a pcrelative instruction", 6); 1914 #ifdef LUCY_DBG 1915 VM_Version::z_SIGSEGV(); 1916 #else 1917 ShouldNotReachHere(); 1918 #endif 1919 return -1; 1920 } 1921 1922 long MacroAssembler::get_pcrel_offset(address pc) { 1923 unsigned long inst; 1924 unsigned int len = get_instruction(pc, &inst); 1925 1926 #ifdef ASSERT 1927 long offset; 1928 if (MacroAssembler::is_pcrelative_short(inst) || MacroAssembler::is_pcrelative_long(inst)) { 1929 offset = get_pcrel_offset(inst); 1930 } else { 1931 offset = -1; 1932 } 1933 1934 if (offset == -1) { 1935 dump_code_range(tty, pc, 32, "not a pcrelative instruction"); 1936 #ifdef LUCY_DBG 1937 VM_Version::z_SIGSEGV(); 1938 #else 1939 ShouldNotReachHere(); 1940 #endif 1941 } 1942 return offset; 1943 #else 1944 return get_pcrel_offset(inst); 1945 #endif // ASSERT 1946 } 1947 1948 // Get target address from pc-relative instructions. 1949 address MacroAssembler::get_target_addr_pcrel(address pc) { 1950 assert(is_pcrelative_long(pc), "not a pcrelative instruction"); 1951 return pc + get_pcrel_offset(pc); 1952 } 1953 1954 // Patch pc relative load address. 1955 void MacroAssembler::patch_target_addr_pcrel(address pc, address con) { 1956 unsigned long inst; 1957 // Offset is +/- 2**32 -> use long. 1958 ptrdiff_t distance = con - pc; 1959 1960 get_instruction(pc, &inst); 1961 1962 if (is_pcrelative_short(inst)) { 1963 *(short *)(pc+2) = RelAddr::pcrel_off16(con, pc); // Instructions are at least 2-byte aligned, no test required. 1964 1965 // Some extra safety net. 1966 if (!RelAddr::is_in_range_of_RelAddr16(distance)) { 1967 print_dbg_msg(tty, inst, "distance out of range (16bit)", 4); 1968 dump_code_range(tty, pc, 32, "distance out of range (16bit)"); 1969 guarantee(RelAddr::is_in_range_of_RelAddr16(distance), "too far away (more than +/- 2**16"); 1970 } 1971 return; 1972 } 1973 1974 if (is_pcrelative_long(inst)) { 1975 *(int *)(pc+2) = RelAddr::pcrel_off32(con, pc); 1976 1977 // Some Extra safety net. 1978 if (!RelAddr::is_in_range_of_RelAddr32(distance)) { 1979 print_dbg_msg(tty, inst, "distance out of range (32bit)", 6); 1980 dump_code_range(tty, pc, 32, "distance out of range (32bit)"); 1981 guarantee(RelAddr::is_in_range_of_RelAddr32(distance), "too far away (more than +/- 2**32"); 1982 } 1983 return; 1984 } 1985 1986 guarantee(false, "not a pcrelative instruction to patch!"); 1987 } 1988 1989 // "Current PC" here means the address just behind the basr instruction. 1990 address MacroAssembler::get_PC(Register result) { 1991 z_basr(result, Z_R0); // Don't branch, just save next instruction address in result. 1992 return pc(); 1993 } 1994 1995 // Get current PC + offset. 1996 // Offset given in bytes, must be even! 1997 // "Current PC" here means the address of the larl instruction plus the given offset. 1998 address MacroAssembler::get_PC(Register result, int64_t offset) { 1999 address here = pc(); 2000 z_larl(result, offset/2); // Save target instruction address in result. 2001 return here + offset; 2002 } 2003 2004 // Resize_frame with SP(new) = SP(old) - [offset]. 2005 void MacroAssembler::resize_frame_sub(Register offset, Register fp, bool load_fp) 2006 { 2007 assert_different_registers(offset, fp, Z_SP); 2008 if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); } 2009 2010 z_sgr(Z_SP, offset); 2011 z_stg(fp, _z_abi(callers_sp), Z_SP); 2012 } 2013 2014 // Resize_frame with SP(new) = [addr]. 2015 void MacroAssembler::resize_frame_absolute(Register addr, Register fp, bool load_fp) { 2016 assert_different_registers(addr, fp, Z_SP); 2017 if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); } 2018 2019 if (addr != Z_R0) { 2020 // Minimize stalls by not using Z_SP immediately after update. 2021 z_stg(fp, _z_abi(callers_sp), addr); 2022 z_lgr(Z_SP, addr); 2023 } else { 2024 z_lgr(Z_SP, addr); 2025 z_stg(fp, _z_abi(callers_sp), Z_SP); 2026 } 2027 } 2028 2029 // Resize_frame with SP(new) = SP(old) + offset. 2030 void MacroAssembler::resize_frame(RegisterOrConstant offset, Register fp, bool load_fp) { 2031 assert_different_registers(fp, Z_SP); 2032 if (load_fp) z_lg(fp, _z_abi(callers_sp), Z_SP); 2033 2034 if (Displacement::is_validDisp((int)_z_abi(callers_sp) + offset.constant_or_zero())) { 2035 // Minimize stalls by first using, then updating Z_SP. 2036 // Do that only if we have a small positive offset or if ExtImm are available. 2037 z_stg(fp, Address(Z_SP, offset, _z_abi(callers_sp))); 2038 add64(Z_SP, offset); 2039 } else { 2040 add64(Z_SP, offset); 2041 z_stg(fp, _z_abi(callers_sp), Z_SP); 2042 } 2043 } 2044 2045 void MacroAssembler::push_frame(Register bytes, Register old_sp, bool copy_sp, bool bytes_with_inverted_sign) { 2046 #ifdef ASSERT 2047 assert_different_registers(bytes, old_sp, Z_SP); 2048 if (!copy_sp) { 2049 z_cgr(old_sp, Z_SP); 2050 asm_assert_eq("[old_sp]!=[Z_SP]", 0x211); 2051 } 2052 #endif 2053 if (copy_sp) { z_lgr(old_sp, Z_SP); } 2054 if (bytes_with_inverted_sign) { 2055 z_stg(old_sp, 0, bytes, Z_SP); 2056 add2reg_with_index(Z_SP, 0, bytes, Z_SP); 2057 } else { 2058 z_sgr(Z_SP, bytes); // Z_sgfr sufficient, but probably not faster. 2059 z_stg(old_sp, 0, Z_SP); 2060 } 2061 } 2062 2063 unsigned int MacroAssembler::push_frame(unsigned int bytes, Register scratch) { 2064 long offset = Assembler::align(bytes, frame::alignment_in_bytes); 2065 2066 if (Displacement::is_validDisp(-offset)) { 2067 // Minimize stalls by first using, then updating Z_SP. 2068 // Do that only if we have ExtImm available. 2069 z_stg(Z_SP, -offset, Z_SP); 2070 add2reg(Z_SP, -offset); 2071 } else { 2072 if (scratch != Z_R0 && scratch != Z_R1) { 2073 z_stg(Z_SP, -offset, Z_SP); 2074 add2reg(Z_SP, -offset); 2075 } else { // scratch == Z_R0 || scratch == Z_R1 2076 z_lgr(scratch, Z_SP); 2077 add2reg(Z_SP, -offset); 2078 z_stg(scratch, 0, Z_SP); 2079 } 2080 } 2081 return offset; 2082 } 2083 2084 // Push a frame of size `bytes' plus abi160 on top. 2085 unsigned int MacroAssembler::push_frame_abi160(unsigned int bytes) { 2086 BLOCK_COMMENT("push_frame_abi160 {"); 2087 unsigned int res = push_frame(bytes + frame::z_abi_160_size); 2088 BLOCK_COMMENT("} push_frame_abi160"); 2089 return res; 2090 } 2091 2092 // Pop current C frame. 2093 void MacroAssembler::pop_frame() { 2094 BLOCK_COMMENT("pop_frame:"); 2095 Assembler::z_lg(Z_SP, _z_abi(callers_sp), Z_SP); 2096 } 2097 2098 void MacroAssembler::call_VM_leaf_base(address entry_point, bool allow_relocation) { 2099 if (allow_relocation) { 2100 call_c(entry_point); 2101 } else { 2102 call_c_static(entry_point); 2103 } 2104 } 2105 2106 void MacroAssembler::call_VM_leaf_base(address entry_point) { 2107 bool allow_relocation = true; 2108 call_VM_leaf_base(entry_point, allow_relocation); 2109 } 2110 2111 void MacroAssembler::call_VM_base(Register oop_result, 2112 Register last_java_sp, 2113 address entry_point, 2114 bool allow_relocation, 2115 bool check_exceptions) { // Defaults to true. 2116 // Allow_relocation indicates, if true, that the generated code shall 2117 // be fit for code relocation or referenced data relocation. In other 2118 // words: all addresses must be considered variable. PC-relative addressing 2119 // is not possible then. 2120 // On the other hand, if (allow_relocation == false), addresses and offsets 2121 // may be considered stable, enabling us to take advantage of some PC-relative 2122 // addressing tweaks. These might improve performance and reduce code size. 2123 2124 // Determine last_java_sp register. 2125 if (!last_java_sp->is_valid()) { 2126 last_java_sp = Z_SP; // Load Z_SP as SP. 2127 } 2128 2129 set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, Z_R1, allow_relocation); 2130 2131 // ARG1 must hold thread address. 2132 z_lgr(Z_ARG1, Z_thread); 2133 2134 address return_pc = NULL; 2135 if (allow_relocation) { 2136 return_pc = call_c(entry_point); 2137 } else { 2138 return_pc = call_c_static(entry_point); 2139 } 2140 2141 reset_last_Java_frame(allow_relocation); 2142 2143 // C++ interp handles this in the interpreter. 2144 check_and_handle_popframe(Z_thread); 2145 check_and_handle_earlyret(Z_thread); 2146 2147 // Check for pending exceptions. 2148 if (check_exceptions) { 2149 // Check for pending exceptions (java_thread is set upon return). 2150 load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset())); 2151 2152 // This used to conditionally jump to forward_exception however it is 2153 // possible if we relocate that the branch will not reach. So we must jump 2154 // around so we can always reach. 2155 2156 Label ok; 2157 z_bre(ok); // Bcondequal is the same as bcondZero. 2158 call_stub(StubRoutines::forward_exception_entry()); 2159 bind(ok); 2160 } 2161 2162 // Get oop result if there is one and reset the value in the thread. 2163 if (oop_result->is_valid()) { 2164 get_vm_result(oop_result); 2165 } 2166 2167 _last_calls_return_pc = return_pc; // Wipe out other (error handling) calls. 2168 } 2169 2170 void MacroAssembler::call_VM_base(Register oop_result, 2171 Register last_java_sp, 2172 address entry_point, 2173 bool check_exceptions) { // Defaults to true. 2174 bool allow_relocation = true; 2175 call_VM_base(oop_result, last_java_sp, entry_point, allow_relocation, check_exceptions); 2176 } 2177 2178 // VM calls without explicit last_java_sp. 2179 2180 void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) { 2181 // Call takes possible detour via InterpreterMacroAssembler. 2182 call_VM_base(oop_result, noreg, entry_point, true, check_exceptions); 2183 } 2184 2185 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 2186 // Z_ARG1 is reserved for the thread. 2187 lgr_if_needed(Z_ARG2, arg_1); 2188 call_VM(oop_result, entry_point, check_exceptions); 2189 } 2190 2191 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 2192 // Z_ARG1 is reserved for the thread. 2193 lgr_if_needed(Z_ARG2, arg_1); 2194 assert(arg_2 != Z_ARG2, "smashed argument"); 2195 lgr_if_needed(Z_ARG3, arg_2); 2196 call_VM(oop_result, entry_point, check_exceptions); 2197 } 2198 2199 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, 2200 Register arg_3, bool check_exceptions) { 2201 // Z_ARG1 is reserved for the thread. 2202 lgr_if_needed(Z_ARG2, arg_1); 2203 assert(arg_2 != Z_ARG2, "smashed argument"); 2204 lgr_if_needed(Z_ARG3, arg_2); 2205 assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument"); 2206 lgr_if_needed(Z_ARG4, arg_3); 2207 call_VM(oop_result, entry_point, check_exceptions); 2208 } 2209 2210 // VM static calls without explicit last_java_sp. 2211 2212 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, bool check_exceptions) { 2213 // Call takes possible detour via InterpreterMacroAssembler. 2214 call_VM_base(oop_result, noreg, entry_point, false, check_exceptions); 2215 } 2216 2217 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2, 2218 Register arg_3, bool check_exceptions) { 2219 // Z_ARG1 is reserved for the thread. 2220 lgr_if_needed(Z_ARG2, arg_1); 2221 assert(arg_2 != Z_ARG2, "smashed argument"); 2222 lgr_if_needed(Z_ARG3, arg_2); 2223 assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument"); 2224 lgr_if_needed(Z_ARG4, arg_3); 2225 call_VM_static(oop_result, entry_point, check_exceptions); 2226 } 2227 2228 // VM calls with explicit last_java_sp. 2229 2230 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions) { 2231 // Call takes possible detour via InterpreterMacroAssembler. 2232 call_VM_base(oop_result, last_java_sp, entry_point, true, check_exceptions); 2233 } 2234 2235 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) { 2236 // Z_ARG1 is reserved for the thread. 2237 lgr_if_needed(Z_ARG2, arg_1); 2238 call_VM(oop_result, last_java_sp, entry_point, check_exceptions); 2239 } 2240 2241 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, 2242 Register arg_2, bool check_exceptions) { 2243 // Z_ARG1 is reserved for the thread. 2244 lgr_if_needed(Z_ARG2, arg_1); 2245 assert(arg_2 != Z_ARG2, "smashed argument"); 2246 lgr_if_needed(Z_ARG3, arg_2); 2247 call_VM(oop_result, last_java_sp, entry_point, check_exceptions); 2248 } 2249 2250 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, 2251 Register arg_2, Register arg_3, bool check_exceptions) { 2252 // Z_ARG1 is reserved for the thread. 2253 lgr_if_needed(Z_ARG2, arg_1); 2254 assert(arg_2 != Z_ARG2, "smashed argument"); 2255 lgr_if_needed(Z_ARG3, arg_2); 2256 assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument"); 2257 lgr_if_needed(Z_ARG4, arg_3); 2258 call_VM(oop_result, last_java_sp, entry_point, check_exceptions); 2259 } 2260 2261 // VM leaf calls. 2262 2263 void MacroAssembler::call_VM_leaf(address entry_point) { 2264 // Call takes possible detour via InterpreterMacroAssembler. 2265 call_VM_leaf_base(entry_point, true); 2266 } 2267 2268 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) { 2269 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2270 call_VM_leaf(entry_point); 2271 } 2272 2273 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) { 2274 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2275 assert(arg_2 != Z_ARG1, "smashed argument"); 2276 if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); 2277 call_VM_leaf(entry_point); 2278 } 2279 2280 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) { 2281 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2282 assert(arg_2 != Z_ARG1, "smashed argument"); 2283 if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); 2284 assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument"); 2285 if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3); 2286 call_VM_leaf(entry_point); 2287 } 2288 2289 // Static VM leaf calls. 2290 // Really static VM leaf calls are never patched. 2291 2292 void MacroAssembler::call_VM_leaf_static(address entry_point) { 2293 // Call takes possible detour via InterpreterMacroAssembler. 2294 call_VM_leaf_base(entry_point, false); 2295 } 2296 2297 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1) { 2298 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2299 call_VM_leaf_static(entry_point); 2300 } 2301 2302 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2) { 2303 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2304 assert(arg_2 != Z_ARG1, "smashed argument"); 2305 if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); 2306 call_VM_leaf_static(entry_point); 2307 } 2308 2309 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3) { 2310 if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1); 2311 assert(arg_2 != Z_ARG1, "smashed argument"); 2312 if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2); 2313 assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument"); 2314 if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3); 2315 call_VM_leaf_static(entry_point); 2316 } 2317 2318 // Don't use detour via call_c(reg). 2319 address MacroAssembler::call_c(address function_entry) { 2320 load_const(Z_R1, function_entry); 2321 return call(Z_R1); 2322 } 2323 2324 // Variant for really static (non-relocatable) calls which are never patched. 2325 address MacroAssembler::call_c_static(address function_entry) { 2326 load_absolute_address(Z_R1, function_entry); 2327 #if 0 // def ASSERT 2328 // Verify that call site did not move. 2329 load_const_optimized(Z_R0, function_entry); 2330 z_cgr(Z_R1, Z_R0); 2331 z_brc(bcondEqual, 3); 2332 z_illtrap(0xba); 2333 #endif 2334 return call(Z_R1); 2335 } 2336 2337 address MacroAssembler::call_c_opt(address function_entry) { 2338 bool success = call_far_patchable(function_entry, -2 /* emit relocation + constant */); 2339 _last_calls_return_pc = success ? pc() : NULL; 2340 return _last_calls_return_pc; 2341 } 2342 2343 // Identify a call_far_patchable instruction: LARL + LG + BASR 2344 // 2345 // nop ; optionally, if required for alignment 2346 // lgrl rx,A(TOC entry) ; PC-relative access into constant pool 2347 // basr Z_R14,rx ; end of this instruction must be aligned to a word boundary 2348 // 2349 // Code pattern will eventually get patched into variant2 (see below for detection code). 2350 // 2351 bool MacroAssembler::is_call_far_patchable_variant0_at(address instruction_addr) { 2352 address iaddr = instruction_addr; 2353 2354 // Check for the actual load instruction. 2355 if (!is_load_const_from_toc(iaddr)) { return false; } 2356 iaddr += load_const_from_toc_size(); 2357 2358 // Check for the call (BASR) instruction, finally. 2359 assert(iaddr-instruction_addr+call_byregister_size() == call_far_patchable_size(), "size mismatch"); 2360 return is_call_byregister(iaddr); 2361 } 2362 2363 // Identify a call_far_patchable instruction: BRASL 2364 // 2365 // Code pattern to suits atomic patching: 2366 // nop ; Optionally, if required for alignment. 2367 // nop ... ; Multiple filler nops to compensate for size difference (variant0 is longer). 2368 // nop ; For code pattern detection: Prepend each BRASL with a nop. 2369 // brasl Z_R14,<reladdr> ; End of code must be 4-byte aligned ! 2370 bool MacroAssembler::is_call_far_patchable_variant2_at(address instruction_addr) { 2371 const address call_addr = (address)((intptr_t)instruction_addr + call_far_patchable_size() - call_far_pcrelative_size()); 2372 2373 // Check for correct number of leading nops. 2374 address iaddr; 2375 for (iaddr = instruction_addr; iaddr < call_addr; iaddr += nop_size()) { 2376 if (!is_z_nop(iaddr)) { return false; } 2377 } 2378 assert(iaddr == call_addr, "sanity"); 2379 2380 // --> Check for call instruction. 2381 if (is_call_far_pcrelative(call_addr)) { 2382 assert(call_addr-instruction_addr+call_far_pcrelative_size() == call_far_patchable_size(), "size mismatch"); 2383 return true; 2384 } 2385 2386 return false; 2387 } 2388 2389 // Emit a NOT mt-safely patchable 64 bit absolute call. 2390 // If toc_offset == -2, then the destination of the call (= target) is emitted 2391 // to the constant pool and a runtime_call relocation is added 2392 // to the code buffer. 2393 // If toc_offset != -2, target must already be in the constant pool at 2394 // _ctableStart+toc_offset (a caller can retrieve toc_offset 2395 // from the runtime_call relocation). 2396 // Special handling of emitting to scratch buffer when there is no constant pool. 2397 // Slightly changed code pattern. We emit an additional nop if we would 2398 // not end emitting at a word aligned address. This is to ensure 2399 // an atomically patchable displacement in brasl instructions. 2400 // 2401 // A call_far_patchable comes in different flavors: 2402 // - LARL(CP) / LG(CP) / BR (address in constant pool, access via CP register) 2403 // - LGRL(CP) / BR (address in constant pool, pc-relative accesss) 2404 // - BRASL (relative address of call target coded in instruction) 2405 // All flavors occupy the same amount of space. Length differences are compensated 2406 // by leading nops, such that the instruction sequence always ends at the same 2407 // byte offset. This is required to keep the return offset constant. 2408 // Furthermore, the return address (the end of the instruction sequence) is forced 2409 // to be on a 4-byte boundary. This is required for atomic patching, should we ever 2410 // need to patch the call target of the BRASL flavor. 2411 // RETURN value: false, if no constant pool entry could be allocated, true otherwise. 2412 bool MacroAssembler::call_far_patchable(address target, int64_t tocOffset) { 2413 // Get current pc and ensure word alignment for end of instr sequence. 2414 const address start_pc = pc(); 2415 const intptr_t start_off = offset(); 2416 assert(!call_far_patchable_requires_alignment_nop(start_pc), "call_far_patchable requires aligned address"); 2417 const ptrdiff_t dist = (ptrdiff_t)(target - (start_pc + 2)); // Prepend each BRASL with a nop. 2418 const bool emit_target_to_pool = (tocOffset == -2) && !code_section()->scratch_emit(); 2419 const bool emit_relative_call = !emit_target_to_pool && 2420 RelAddr::is_in_range_of_RelAddr32(dist) && 2421 ReoptimizeCallSequences && 2422 !code_section()->scratch_emit(); 2423 2424 if (emit_relative_call) { 2425 // Add padding to get the same size as below. 2426 const unsigned int padding = call_far_patchable_size() - call_far_pcrelative_size(); 2427 unsigned int current_padding; 2428 for (current_padding = 0; current_padding < padding; current_padding += nop_size()) { z_nop(); } 2429 assert(current_padding == padding, "sanity"); 2430 2431 // relative call: len = 2(nop) + 6 (brasl) 2432 // CodeBlob resize cannot occur in this case because 2433 // this call is emitted into pre-existing space. 2434 z_nop(); // Prepend each BRASL with a nop. 2435 z_brasl(Z_R14, target); 2436 } else { 2437 // absolute call: Get address from TOC. 2438 // len = (load TOC){6|0} + (load from TOC){6} + (basr){2} = {14|8} 2439 if (emit_target_to_pool) { 2440 // When emitting the call for the first time, we do not need to use 2441 // the pc-relative version. It will be patched anyway, when the code 2442 // buffer is copied. 2443 // Relocation is not needed when !ReoptimizeCallSequences. 2444 relocInfo::relocType rt = ReoptimizeCallSequences ? relocInfo::runtime_call_w_cp_type : relocInfo::none; 2445 AddressLiteral dest(target, rt); 2446 // Store_oop_in_toc() adds dest to the constant table. As side effect, this kills 2447 // inst_mark(). Reset if possible. 2448 bool reset_mark = (inst_mark() == pc()); 2449 tocOffset = store_oop_in_toc(dest); 2450 if (reset_mark) { set_inst_mark(); } 2451 if (tocOffset == -1) { 2452 return false; // Couldn't create constant pool entry. 2453 } 2454 } 2455 assert(offset() == start_off, "emit no code before this point!"); 2456 2457 address tocPos = pc() + tocOffset; 2458 if (emit_target_to_pool) { 2459 tocPos = code()->consts()->start() + tocOffset; 2460 } 2461 load_long_pcrelative(Z_R14, tocPos); 2462 z_basr(Z_R14, Z_R14); 2463 } 2464 2465 #ifdef ASSERT 2466 // Assert that we can identify the emitted call. 2467 assert(is_call_far_patchable_at(addr_at(start_off)), "can't identify emitted call"); 2468 assert(offset() == start_off+call_far_patchable_size(), "wrong size"); 2469 2470 if (emit_target_to_pool) { 2471 assert(get_dest_of_call_far_patchable_at(addr_at(start_off), code()->consts()->start()) == target, 2472 "wrong encoding of dest address"); 2473 } 2474 #endif 2475 return true; // success 2476 } 2477 2478 // Identify a call_far_patchable instruction. 2479 // For more detailed information see header comment of call_far_patchable. 2480 bool MacroAssembler::is_call_far_patchable_at(address instruction_addr) { 2481 return is_call_far_patchable_variant2_at(instruction_addr) || // short version: BRASL 2482 is_call_far_patchable_variant0_at(instruction_addr); // long version LARL + LG + BASR 2483 } 2484 2485 // Does the call_far_patchable instruction use a pc-relative encoding 2486 // of the call destination? 2487 bool MacroAssembler::is_call_far_patchable_pcrelative_at(address instruction_addr) { 2488 // Variant 2 is pc-relative. 2489 return is_call_far_patchable_variant2_at(instruction_addr); 2490 } 2491 2492 bool MacroAssembler::is_call_far_pcrelative(address instruction_addr) { 2493 // Prepend each BRASL with a nop. 2494 return is_z_nop(instruction_addr) && is_z_brasl(instruction_addr + nop_size()); // Match at position after one nop required. 2495 } 2496 2497 // Set destination address of a call_far_patchable instruction. 2498 void MacroAssembler::set_dest_of_call_far_patchable_at(address instruction_addr, address dest, int64_t tocOffset) { 2499 ResourceMark rm; 2500 2501 // Now that CP entry is verified, patch call to a pc-relative call (if circumstances permit). 2502 int code_size = MacroAssembler::call_far_patchable_size(); 2503 CodeBuffer buf(instruction_addr, code_size); 2504 MacroAssembler masm(&buf); 2505 masm.call_far_patchable(dest, tocOffset); 2506 ICache::invalidate_range(instruction_addr, code_size); // Empty on z. 2507 } 2508 2509 // Get dest address of a call_far_patchable instruction. 2510 address MacroAssembler::get_dest_of_call_far_patchable_at(address instruction_addr, address ctable) { 2511 // Dynamic TOC: absolute address in constant pool. 2512 // Check variant2 first, it is more frequent. 2513 2514 // Relative address encoded in call instruction. 2515 if (is_call_far_patchable_variant2_at(instruction_addr)) { 2516 return MacroAssembler::get_target_addr_pcrel(instruction_addr + nop_size()); // Prepend each BRASL with a nop. 2517 2518 // Absolute address in constant pool. 2519 } else if (is_call_far_patchable_variant0_at(instruction_addr)) { 2520 address iaddr = instruction_addr; 2521 2522 long tocOffset = get_load_const_from_toc_offset(iaddr); 2523 address tocLoc = iaddr + tocOffset; 2524 return *(address *)(tocLoc); 2525 } else { 2526 fprintf(stderr, "MacroAssembler::get_dest_of_call_far_patchable_at has a problem at %p:\n", instruction_addr); 2527 fprintf(stderr, "not a call_far_patchable: %16.16lx %16.16lx, len = %d\n", 2528 *(unsigned long*)instruction_addr, 2529 *(unsigned long*)(instruction_addr+8), 2530 call_far_patchable_size()); 2531 Disassembler::decode(instruction_addr, instruction_addr+call_far_patchable_size()); 2532 ShouldNotReachHere(); 2533 return NULL; 2534 } 2535 } 2536 2537 void MacroAssembler::align_call_far_patchable(address pc) { 2538 if (call_far_patchable_requires_alignment_nop(pc)) { z_nop(); } 2539 } 2540 2541 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { 2542 } 2543 2544 void MacroAssembler::check_and_handle_popframe(Register java_thread) { 2545 } 2546 2547 // Read from the polling page. 2548 // Use TM or TMY instruction, depending on read offset. 2549 // offset = 0: Use TM, safepoint polling. 2550 // offset < 0: Use TMY, profiling safepoint polling. 2551 void MacroAssembler::load_from_polling_page(Register polling_page_address, int64_t offset) { 2552 if (Immediate::is_uimm12(offset)) { 2553 z_tm(offset, polling_page_address, mask_safepoint); 2554 } else { 2555 z_tmy(offset, polling_page_address, mask_profiling); 2556 } 2557 } 2558 2559 // Check whether z_instruction is a read access to the polling page 2560 // which was emitted by load_from_polling_page(..). 2561 bool MacroAssembler::is_load_from_polling_page(address instr_loc) { 2562 unsigned long z_instruction; 2563 unsigned int ilen = get_instruction(instr_loc, &z_instruction); 2564 2565 if (ilen == 2) { return false; } // It's none of the allowed instructions. 2566 2567 if (ilen == 4) { 2568 if (!is_z_tm(z_instruction)) { return false; } // It's len=4, but not a z_tm. fail. 2569 2570 int ms = inv_mask(z_instruction,8,32); // mask 2571 int ra = inv_reg(z_instruction,16,32); // base register 2572 int ds = inv_uimm12(z_instruction); // displacement 2573 2574 if (!(ds == 0 && ra != 0 && ms == mask_safepoint)) { 2575 return false; // It's not a z_tm(0, ra, mask_safepoint). Fail. 2576 } 2577 2578 } else { /* if (ilen == 6) */ 2579 2580 assert(!is_z_lg(z_instruction), "old form (LG) polling page access. Please fix and use TM(Y)."); 2581 2582 if (!is_z_tmy(z_instruction)) { return false; } // It's len=6, but not a z_tmy. fail. 2583 2584 int ms = inv_mask(z_instruction,8,48); // mask 2585 int ra = inv_reg(z_instruction,16,48); // base register 2586 int ds = inv_simm20(z_instruction); // displacement 2587 } 2588 2589 return true; 2590 } 2591 2592 // Extract poll address from instruction and ucontext. 2593 address MacroAssembler::get_poll_address(address instr_loc, void* ucontext) { 2594 assert(ucontext != NULL, "must have ucontext"); 2595 ucontext_t* uc = (ucontext_t*) ucontext; 2596 unsigned long z_instruction; 2597 unsigned int ilen = get_instruction(instr_loc, &z_instruction); 2598 2599 if (ilen == 4 && is_z_tm(z_instruction)) { 2600 int ra = inv_reg(z_instruction, 16, 32); // base register 2601 int ds = inv_uimm12(z_instruction); // displacement 2602 address addr = (address)uc->uc_mcontext.gregs[ra]; 2603 return addr + ds; 2604 } else if (ilen == 6 && is_z_tmy(z_instruction)) { 2605 int ra = inv_reg(z_instruction, 16, 48); // base register 2606 int ds = inv_simm20(z_instruction); // displacement 2607 address addr = (address)uc->uc_mcontext.gregs[ra]; 2608 return addr + ds; 2609 } 2610 2611 ShouldNotReachHere(); 2612 return NULL; 2613 } 2614 2615 // Extract poll register from instruction. 2616 uint MacroAssembler::get_poll_register(address instr_loc) { 2617 unsigned long z_instruction; 2618 unsigned int ilen = get_instruction(instr_loc, &z_instruction); 2619 2620 if (ilen == 4 && is_z_tm(z_instruction)) { 2621 return (uint)inv_reg(z_instruction, 16, 32); // base register 2622 } else if (ilen == 6 && is_z_tmy(z_instruction)) { 2623 return (uint)inv_reg(z_instruction, 16, 48); // base register 2624 } 2625 2626 ShouldNotReachHere(); 2627 return 0; 2628 } 2629 2630 bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) { 2631 ShouldNotCallThis(); 2632 return false; 2633 } 2634 2635 // Write serialization page so VM thread can do a pseudo remote membar 2636 // We use the current thread pointer to calculate a thread specific 2637 // offset to write to within the page. This minimizes bus traffic 2638 // due to cache line collision. 2639 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) { 2640 assert_different_registers(tmp1, tmp2); 2641 z_sllg(tmp2, thread, os::get_serialize_page_shift_count()); 2642 load_const_optimized(tmp1, (long) os::get_memory_serialize_page()); 2643 2644 int mask = os::get_serialize_page_mask(); 2645 if (Immediate::is_uimm16(mask)) { 2646 z_nill(tmp2, mask); 2647 z_llghr(tmp2, tmp2); 2648 } else { 2649 z_nilf(tmp2, mask); 2650 z_llgfr(tmp2, tmp2); 2651 } 2652 2653 z_release(); 2654 z_st(Z_R0, 0, tmp2, tmp1); 2655 } 2656 2657 // Don't rely on register locking, always use Z_R1 as scratch register instead. 2658 void MacroAssembler::bang_stack_with_offset(int offset) { 2659 // Stack grows down, caller passes positive offset. 2660 assert(offset > 0, "must bang with positive offset"); 2661 if (Displacement::is_validDisp(-offset)) { 2662 z_tmy(-offset, Z_SP, mask_stackbang); 2663 } else { 2664 add2reg(Z_R1, -offset, Z_SP); // Do not destroy Z_SP!!! 2665 z_tm(0, Z_R1, mask_stackbang); // Just banging. 2666 } 2667 } 2668 2669 void MacroAssembler::reserved_stack_check(Register return_pc) { 2670 // Test if reserved zone needs to be enabled. 2671 Label no_reserved_zone_enabling; 2672 BLOCK_COMMENT("reserved_stack_check {"); 2673 2674 z_cg(Z_SP, Address(Z_thread, JavaThread::reserved_stack_activation_offset())); 2675 z_brnh(no_reserved_zone_enabling); 2676 2677 // Enable reserved zone again, throw stack overflow exception. 2678 lgr_if_needed(Z_R14, return_pc); 2679 save_return_pc(); 2680 push_frame_abi160(0); 2681 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), Z_thread); 2682 pop_frame(); 2683 restore_return_pc(); 2684 2685 load_const_optimized(Z_R1, StubRoutines::throw_delayed_StackOverflowError_entry()); 2686 // Don't use call() or z_basr(), they will invalidate Z_R14 which contains the return pc. 2687 z_br(Z_R1); 2688 2689 should_not_reach_here(); 2690 2691 bind(no_reserved_zone_enabling); 2692 BLOCK_COMMENT("} reserved_stack_check"); 2693 } 2694 2695 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 2696 void MacroAssembler::tlab_allocate(Register obj, 2697 Register var_size_in_bytes, 2698 int con_size_in_bytes, 2699 Register t1, 2700 Label& slow_case) { 2701 assert_different_registers(obj, var_size_in_bytes, t1); 2702 Register end = t1; 2703 Register thread = Z_thread; 2704 2705 z_lg(obj, Address(thread, JavaThread::tlab_top_offset())); 2706 if (var_size_in_bytes == noreg) { 2707 z_lay(end, Address(obj, con_size_in_bytes)); 2708 } else { 2709 z_lay(end, Address(obj, var_size_in_bytes)); 2710 } 2711 z_cg(end, Address(thread, JavaThread::tlab_end_offset())); 2712 branch_optimized(bcondHigh, slow_case); 2713 2714 // Update the tlab top pointer. 2715 z_stg(end, Address(thread, JavaThread::tlab_top_offset())); 2716 2717 // Recover var_size_in_bytes if necessary. 2718 if (var_size_in_bytes == end) { 2719 z_sgr(var_size_in_bytes, obj); 2720 } 2721 } 2722 2723 // Emitter for interface method lookup. 2724 // input: recv_klass, intf_klass, itable_index 2725 // output: method_result 2726 // kills: itable_index, temp1_reg, Z_R0, Z_R1 2727 // TODO: Temp2_reg is unused. we may use this emitter also in the itable stubs. 2728 // If the register is still not needed then, remove it. 2729 void MacroAssembler::lookup_interface_method(Register recv_klass, 2730 Register intf_klass, 2731 RegisterOrConstant itable_index, 2732 Register method_result, 2733 Register temp1_reg, 2734 Register temp2_reg, 2735 Label& no_such_interface) { 2736 2737 const Register vtable_len = temp1_reg; // Used to compute itable_entry_addr. 2738 const Register itable_entry_addr = Z_R1_scratch; 2739 const Register itable_interface = Z_R0_scratch; 2740 2741 BLOCK_COMMENT("lookup_interface_method {"); 2742 2743 // Load start of itable entries into itable_entry_addr. 2744 z_llgf(vtable_len, Address(recv_klass, InstanceKlass::vtable_length_offset())); 2745 z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes())); 2746 2747 // Loop over all itable entries until desired interfaceOop(Rinterface) found. 2748 const int vtable_base_offset = in_bytes(InstanceKlass::vtable_start_offset()); 2749 2750 add2reg_with_index(itable_entry_addr, 2751 vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(), 2752 recv_klass, vtable_len); 2753 2754 const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize; 2755 Label search; 2756 2757 bind(search); 2758 2759 // Handle IncompatibleClassChangeError. 2760 // If the entry is NULL then we've reached the end of the table 2761 // without finding the expected interface, so throw an exception. 2762 load_and_test_long(itable_interface, Address(itable_entry_addr)); 2763 z_bre(no_such_interface); 2764 2765 add2reg(itable_entry_addr, itable_offset_search_inc); 2766 z_cgr(itable_interface, intf_klass); 2767 z_brne(search); 2768 2769 // Entry found and itable_entry_addr points to it, get offset of vtable for interface. 2770 2771 const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() - 2772 itableOffsetEntry::interface_offset_in_bytes()) - 2773 itable_offset_search_inc; 2774 2775 // Compute itableMethodEntry and get method and entry point 2776 // we use addressing with index and displacement, since the formula 2777 // for computing the entry's offset has a fixed and a dynamic part, 2778 // the latter depending on the matched interface entry and on the case, 2779 // that the itable index has been passed as a register, not a constant value. 2780 int method_offset = itableMethodEntry::method_offset_in_bytes(); 2781 // Fixed part (displacement), common operand. 2782 Register itable_offset; // Dynamic part (index register). 2783 2784 if (itable_index.is_register()) { 2785 // Compute the method's offset in that register, for the formula, see the 2786 // else-clause below. 2787 itable_offset = itable_index.as_register(); 2788 2789 z_sllg(itable_offset, itable_offset, exact_log2(itableMethodEntry::size() * wordSize)); 2790 z_agf(itable_offset, vtable_offset_offset, itable_entry_addr); 2791 } else { 2792 itable_offset = Z_R1_scratch; 2793 // Displacement increases. 2794 method_offset += itableMethodEntry::size() * wordSize * itable_index.as_constant(); 2795 2796 // Load index from itable. 2797 z_llgf(itable_offset, vtable_offset_offset, itable_entry_addr); 2798 } 2799 2800 // Finally load the method's oop. 2801 z_lg(method_result, method_offset, itable_offset, recv_klass); 2802 BLOCK_COMMENT("} lookup_interface_method"); 2803 } 2804 2805 // Lookup for virtual method invocation. 2806 void MacroAssembler::lookup_virtual_method(Register recv_klass, 2807 RegisterOrConstant vtable_index, 2808 Register method_result) { 2809 assert_different_registers(recv_klass, vtable_index.register_or_noreg()); 2810 assert(vtableEntry::size() * wordSize == wordSize, 2811 "else adjust the scaling in the code below"); 2812 2813 BLOCK_COMMENT("lookup_virtual_method {"); 2814 2815 const int base = in_bytes(Klass::vtable_start_offset()); 2816 2817 if (vtable_index.is_constant()) { 2818 // Load with base + disp. 2819 Address vtable_entry_addr(recv_klass, 2820 vtable_index.as_constant() * wordSize + 2821 base + 2822 vtableEntry::method_offset_in_bytes()); 2823 2824 z_lg(method_result, vtable_entry_addr); 2825 } else { 2826 // Shift index properly and load with base + index + disp. 2827 Register vindex = vtable_index.as_register(); 2828 Address vtable_entry_addr(recv_klass, vindex, 2829 base + vtableEntry::method_offset_in_bytes()); 2830 2831 z_sllg(vindex, vindex, exact_log2(wordSize)); 2832 z_lg(method_result, vtable_entry_addr); 2833 } 2834 BLOCK_COMMENT("} lookup_virtual_method"); 2835 } 2836 2837 // Factor out code to call ic_miss_handler. 2838 // Generate code to call the inline cache miss handler. 2839 // 2840 // In most cases, this code will be generated out-of-line. 2841 // The method parameters are intended to provide some variability. 2842 // ICM - Label which has to be bound to the start of useful code (past any traps). 2843 // trapMarker - Marking byte for the generated illtrap instructions (if any). 2844 // Any value except 0x00 is supported. 2845 // = 0x00 - do not generate illtrap instructions. 2846 // use nops to fill ununsed space. 2847 // requiredSize - required size of the generated code. If the actually 2848 // generated code is smaller, use padding instructions to fill up. 2849 // = 0 - no size requirement, no padding. 2850 // scratch - scratch register to hold branch target address. 2851 // 2852 // The method returns the code offset of the bound label. 2853 unsigned int MacroAssembler::call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch) { 2854 intptr_t startOffset = offset(); 2855 2856 // Prevent entry at content_begin(). 2857 if (trapMarker != 0) { 2858 z_illtrap(trapMarker); 2859 } 2860 2861 // Load address of inline cache miss code into scratch register 2862 // and branch to cache miss handler. 2863 BLOCK_COMMENT("IC miss handler {"); 2864 BIND(ICM); 2865 unsigned int labelOffset = offset(); 2866 AddressLiteral icmiss(SharedRuntime::get_ic_miss_stub()); 2867 2868 load_const_optimized(scratch, icmiss); 2869 z_br(scratch); 2870 2871 // Fill unused space. 2872 if (requiredSize > 0) { 2873 while ((offset() - startOffset) < requiredSize) { 2874 if (trapMarker == 0) { 2875 z_nop(); 2876 } else { 2877 z_illtrap(trapMarker); 2878 } 2879 } 2880 } 2881 BLOCK_COMMENT("} IC miss handler"); 2882 return labelOffset; 2883 } 2884 2885 void MacroAssembler::nmethod_UEP(Label& ic_miss) { 2886 Register ic_reg = as_Register(Matcher::inline_cache_reg_encode()); 2887 int klass_offset = oopDesc::klass_offset_in_bytes(); 2888 if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) { 2889 if (VM_Version::has_CompareBranch()) { 2890 z_cgij(Z_ARG1, 0, Assembler::bcondEqual, ic_miss); 2891 } else { 2892 z_ltgr(Z_ARG1, Z_ARG1); 2893 z_bre(ic_miss); 2894 } 2895 } 2896 // Compare cached class against klass from receiver. 2897 compare_klass_ptr(ic_reg, klass_offset, Z_ARG1, false); 2898 z_brne(ic_miss); 2899 } 2900 2901 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 2902 Register super_klass, 2903 Register temp1_reg, 2904 Label* L_success, 2905 Label* L_failure, 2906 Label* L_slow_path, 2907 RegisterOrConstant super_check_offset) { 2908 2909 const int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 2910 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2911 2912 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 2913 bool need_slow_path = (must_load_sco || 2914 super_check_offset.constant_or_zero() == sc_offset); 2915 2916 // Input registers must not overlap. 2917 assert_different_registers(sub_klass, super_klass, temp1_reg); 2918 if (super_check_offset.is_register()) { 2919 assert_different_registers(sub_klass, super_klass, 2920 super_check_offset.as_register()); 2921 } else if (must_load_sco) { 2922 assert(temp1_reg != noreg, "supply either a temp or a register offset"); 2923 } 2924 2925 const Register Rsuper_check_offset = temp1_reg; 2926 2927 NearLabel L_fallthrough; 2928 int label_nulls = 0; 2929 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 2930 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 2931 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 2932 assert(label_nulls <= 1 || 2933 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path), 2934 "at most one NULL in the batch, usually"); 2935 2936 BLOCK_COMMENT("check_klass_subtype_fast_path {"); 2937 // If the pointers are equal, we are done (e.g., String[] elements). 2938 // This self-check enables sharing of secondary supertype arrays among 2939 // non-primary types such as array-of-interface. Otherwise, each such 2940 // type would need its own customized SSA. 2941 // We move this check to the front of the fast path because many 2942 // type checks are in fact trivially successful in this manner, 2943 // so we get a nicely predicted branch right at the start of the check. 2944 compare64_and_branch(sub_klass, super_klass, bcondEqual, *L_success); 2945 2946 // Check the supertype display, which is uint. 2947 if (must_load_sco) { 2948 z_llgf(Rsuper_check_offset, sco_offset, super_klass); 2949 super_check_offset = RegisterOrConstant(Rsuper_check_offset); 2950 } 2951 Address super_check_addr(sub_klass, super_check_offset, 0); 2952 z_cg(super_klass, super_check_addr); // compare w/ displayed supertype 2953 2954 // This check has worked decisively for primary supers. 2955 // Secondary supers are sought in the super_cache ('super_cache_addr'). 2956 // (Secondary supers are interfaces and very deeply nested subtypes.) 2957 // This works in the same check above because of a tricky aliasing 2958 // between the super_cache and the primary super display elements. 2959 // (The 'super_check_addr' can address either, as the case requires.) 2960 // Note that the cache is updated below if it does not help us find 2961 // what we need immediately. 2962 // So if it was a primary super, we can just fail immediately. 2963 // Otherwise, it's the slow path for us (no success at this point). 2964 2965 // Hacked jmp, which may only be used just before L_fallthrough. 2966 #define final_jmp(label) \ 2967 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 2968 else { branch_optimized(Assembler::bcondAlways, label); } /*omit semicolon*/ 2969 2970 if (super_check_offset.is_register()) { 2971 branch_optimized(Assembler::bcondEqual, *L_success); 2972 z_cfi(super_check_offset.as_register(), sc_offset); 2973 if (L_failure == &L_fallthrough) { 2974 branch_optimized(Assembler::bcondEqual, *L_slow_path); 2975 } else { 2976 branch_optimized(Assembler::bcondNotEqual, *L_failure); 2977 final_jmp(*L_slow_path); 2978 } 2979 } else if (super_check_offset.as_constant() == sc_offset) { 2980 // Need a slow path; fast failure is impossible. 2981 if (L_slow_path == &L_fallthrough) { 2982 branch_optimized(Assembler::bcondEqual, *L_success); 2983 } else { 2984 branch_optimized(Assembler::bcondNotEqual, *L_slow_path); 2985 final_jmp(*L_success); 2986 } 2987 } else { 2988 // No slow path; it's a fast decision. 2989 if (L_failure == &L_fallthrough) { 2990 branch_optimized(Assembler::bcondEqual, *L_success); 2991 } else { 2992 branch_optimized(Assembler::bcondNotEqual, *L_failure); 2993 final_jmp(*L_success); 2994 } 2995 } 2996 2997 bind(L_fallthrough); 2998 #undef local_brc 2999 #undef final_jmp 3000 BLOCK_COMMENT("} check_klass_subtype_fast_path"); 3001 // fallthru (to slow path) 3002 } 3003 3004 void MacroAssembler::check_klass_subtype_slow_path(Register Rsubklass, 3005 Register Rsuperklass, 3006 Register Rarray_ptr, // tmp 3007 Register Rlength, // tmp 3008 Label* L_success, 3009 Label* L_failure) { 3010 // Input registers must not overlap. 3011 // Also check for R1 which is explicitely used here. 3012 assert_different_registers(Z_R1, Rsubklass, Rsuperklass, Rarray_ptr, Rlength); 3013 NearLabel L_fallthrough, L_loop; 3014 int label_nulls = 0; 3015 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 3016 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 3017 assert(label_nulls <= 1, "at most one NULL in the batch"); 3018 3019 const int ss_offset = in_bytes(Klass::secondary_supers_offset()); 3020 const int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 3021 3022 const int length_offset = Array<Klass*>::length_offset_in_bytes(); 3023 const int base_offset = Array<Klass*>::base_offset_in_bytes(); 3024 3025 // Hacked jmp, which may only be used just before L_fallthrough. 3026 #define final_jmp(label) \ 3027 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 3028 else branch_optimized(Assembler::bcondAlways, label) /*omit semicolon*/ 3029 3030 NearLabel loop_iterate, loop_count, match; 3031 3032 BLOCK_COMMENT("check_klass_subtype_slow_path {"); 3033 z_lg(Rarray_ptr, ss_offset, Rsubklass); 3034 3035 load_and_test_int(Rlength, Address(Rarray_ptr, length_offset)); 3036 branch_optimized(Assembler::bcondZero, *L_failure); 3037 3038 // Oops in table are NO MORE compressed. 3039 z_cg(Rsuperklass, base_offset, Rarray_ptr); // Check array element for match. 3040 z_bre(match); // Shortcut for array length = 1. 3041 3042 // No match yet, so we must walk the array's elements. 3043 z_lngfr(Rlength, Rlength); 3044 z_sllg(Rlength, Rlength, LogBytesPerWord); // -#bytes of cache array 3045 z_llill(Z_R1, BytesPerWord); // Set increment/end index. 3046 add2reg(Rlength, 2 * BytesPerWord); // start index = -(n-2)*BytesPerWord 3047 z_slgr(Rarray_ptr, Rlength); // start addr: += (n-2)*BytesPerWord 3048 z_bru(loop_count); 3049 3050 BIND(loop_iterate); 3051 z_cg(Rsuperklass, base_offset, Rlength, Rarray_ptr); // Check array element for match. 3052 z_bre(match); 3053 BIND(loop_count); 3054 z_brxlg(Rlength, Z_R1, loop_iterate); 3055 3056 // Rsuperklass not found among secondary super classes -> failure. 3057 branch_optimized(Assembler::bcondAlways, *L_failure); 3058 3059 // Got a hit. Return success (zero result). Set cache. 3060 // Cache load doesn't happen here. For speed it is directly emitted by the compiler. 3061 3062 BIND(match); 3063 3064 z_stg(Rsuperklass, sc_offset, Rsubklass); // Save result to cache. 3065 3066 final_jmp(*L_success); 3067 3068 // Exit to the surrounding code. 3069 BIND(L_fallthrough); 3070 #undef local_brc 3071 #undef final_jmp 3072 BLOCK_COMMENT("} check_klass_subtype_slow_path"); 3073 } 3074 3075 // Emitter for combining fast and slow path. 3076 void MacroAssembler::check_klass_subtype(Register sub_klass, 3077 Register super_klass, 3078 Register temp1_reg, 3079 Register temp2_reg, 3080 Label& L_success) { 3081 NearLabel failure; 3082 BLOCK_COMMENT(err_msg("check_klass_subtype(%s subclass of %s) {", sub_klass->name(), super_klass->name())); 3083 check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg, 3084 &L_success, &failure, NULL); 3085 check_klass_subtype_slow_path(sub_klass, super_klass, 3086 temp1_reg, temp2_reg, &L_success, NULL); 3087 BIND(failure); 3088 BLOCK_COMMENT("} check_klass_subtype"); 3089 } 3090 3091 // Increment a counter at counter_address when the eq condition code is 3092 // set. Kills registers tmp1_reg and tmp2_reg and preserves the condition code. 3093 void MacroAssembler::increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg) { 3094 Label l; 3095 z_brne(l); 3096 load_const(tmp1_reg, counter_address); 3097 add2mem_32(Address(tmp1_reg), 1, tmp2_reg); 3098 z_cr(tmp1_reg, tmp1_reg); // Set cc to eq. 3099 bind(l); 3100 } 3101 3102 // Semantics are dependent on the slow_case label: 3103 // If the slow_case label is not NULL, failure to biased-lock the object 3104 // transfers control to the location of the slow_case label. If the 3105 // object could be biased-locked, control is transferred to the done label. 3106 // The condition code is unpredictable. 3107 // 3108 // If the slow_case label is NULL, failure to biased-lock the object results 3109 // in a transfer of control to the done label with a condition code of not_equal. 3110 // If the biased-lock could be successfully obtained, control is transfered to 3111 // the done label with a condition code of equal. 3112 // It is mandatory to react on the condition code At the done label. 3113 // 3114 void MacroAssembler::biased_locking_enter(Register obj_reg, 3115 Register mark_reg, 3116 Register temp_reg, 3117 Register temp2_reg, // May be Z_RO! 3118 Label &done, 3119 Label *slow_case) { 3120 assert(UseBiasedLocking, "why call this otherwise?"); 3121 assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg); 3122 3123 Label cas_label; // Try, if implemented, CAS locking. Fall thru to slow path otherwise. 3124 3125 BLOCK_COMMENT("biased_locking_enter {"); 3126 3127 // Biased locking 3128 // See whether the lock is currently biased toward our thread and 3129 // whether the epoch is still valid. 3130 // Note that the runtime guarantees sufficient alignment of JavaThread 3131 // pointers to allow age to be placed into low bits. 3132 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, 3133 "biased locking makes assumptions about bit layout"); 3134 z_lr(temp_reg, mark_reg); 3135 z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place); 3136 z_chi(temp_reg, markOopDesc::biased_lock_pattern); 3137 z_brne(cas_label); // Try cas if object is not biased, i.e. cannot be biased locked. 3138 3139 load_prototype_header(temp_reg, obj_reg); 3140 load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place)); 3141 3142 z_ogr(temp_reg, Z_thread); 3143 z_xgr(temp_reg, mark_reg); 3144 z_ngr(temp_reg, temp2_reg); 3145 if (PrintBiasedLockingStatistics) { 3146 increment_counter_eq((address) BiasedLocking::biased_lock_entry_count_addr(), mark_reg, temp2_reg); 3147 // Restore mark_reg. 3148 z_lg(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg); 3149 } 3150 branch_optimized(Assembler::bcondEqual, done); // Biased lock obtained, return success. 3151 3152 Label try_revoke_bias; 3153 Label try_rebias; 3154 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes()); 3155 3156 //---------------------------------------------------------------------------- 3157 // At this point we know that the header has the bias pattern and 3158 // that we are not the bias owner in the current epoch. We need to 3159 // figure out more details about the state of the header in order to 3160 // know what operations can be legally performed on the object's 3161 // header. 3162 3163 // If the low three bits in the xor result aren't clear, that means 3164 // the prototype header is no longer biased and we have to revoke 3165 // the bias on this object. 3166 z_tmll(temp_reg, markOopDesc::biased_lock_mask_in_place); 3167 z_brnaz(try_revoke_bias); 3168 3169 // Biasing is still enabled for this data type. See whether the 3170 // epoch of the current bias is still valid, meaning that the epoch 3171 // bits of the mark word are equal to the epoch bits of the 3172 // prototype header. (Note that the prototype header's epoch bits 3173 // only change at a safepoint.) If not, attempt to rebias the object 3174 // toward the current thread. Note that we must be absolutely sure 3175 // that the current epoch is invalid in order to do this because 3176 // otherwise the manipulations it performs on the mark word are 3177 // illegal. 3178 z_tmll(temp_reg, markOopDesc::epoch_mask_in_place); 3179 z_brnaz(try_rebias); 3180 3181 //---------------------------------------------------------------------------- 3182 // The epoch of the current bias is still valid but we know nothing 3183 // about the owner; it might be set or it might be clear. Try to 3184 // acquire the bias of the object using an atomic operation. If this 3185 // fails we will go in to the runtime to revoke the object's bias. 3186 // Note that we first construct the presumed unbiased header so we 3187 // don't accidentally blow away another thread's valid bias. 3188 z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | 3189 markOopDesc::epoch_mask_in_place); 3190 z_lgr(temp_reg, Z_thread); 3191 z_llgfr(mark_reg, mark_reg); 3192 z_ogr(temp_reg, mark_reg); 3193 3194 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 3195 3196 z_csg(mark_reg, temp_reg, 0, obj_reg); 3197 3198 // If the biasing toward our thread failed, this means that 3199 // another thread succeeded in biasing it toward itself and we 3200 // need to revoke that bias. The revocation will occur in the 3201 // interpreter runtime in the slow case. 3202 3203 if (PrintBiasedLockingStatistics) { 3204 increment_counter_eq((address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), 3205 temp_reg, temp2_reg); 3206 } 3207 if (slow_case != NULL) { 3208 branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way. 3209 } 3210 branch_optimized(Assembler::bcondAlways, done); // Biased lock status given in condition code. 3211 3212 //---------------------------------------------------------------------------- 3213 bind(try_rebias); 3214 // At this point we know the epoch has expired, meaning that the 3215 // current "bias owner", if any, is actually invalid. Under these 3216 // circumstances _only_, we are allowed to use the current header's 3217 // value as the comparison value when doing the cas to acquire the 3218 // bias in the current epoch. In other words, we allow transfer of 3219 // the bias from one thread to another directly in this situation. 3220 3221 z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); 3222 load_prototype_header(temp_reg, obj_reg); 3223 z_llgfr(mark_reg, mark_reg); 3224 3225 z_ogr(temp_reg, Z_thread); 3226 3227 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 3228 3229 z_csg(mark_reg, temp_reg, 0, obj_reg); 3230 3231 // If the biasing toward our thread failed, this means that 3232 // another thread succeeded in biasing it toward itself and we 3233 // need to revoke that bias. The revocation will occur in the 3234 // interpreter runtime in the slow case. 3235 3236 if (PrintBiasedLockingStatistics) { 3237 increment_counter_eq((address) BiasedLocking::rebiased_lock_entry_count_addr(), temp_reg, temp2_reg); 3238 } 3239 if (slow_case != NULL) { 3240 branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way. 3241 } 3242 z_bru(done); // Biased lock status given in condition code. 3243 3244 //---------------------------------------------------------------------------- 3245 bind(try_revoke_bias); 3246 // The prototype mark in the klass doesn't have the bias bit set any 3247 // more, indicating that objects of this data type are not supposed 3248 // to be biased any more. We are going to try to reset the mark of 3249 // this object to the prototype value and fall through to the 3250 // CAS-based locking scheme. Note that if our CAS fails, it means 3251 // that another thread raced us for the privilege of revoking the 3252 // bias of this particular object, so it's okay to continue in the 3253 // normal locking code. 3254 load_prototype_header(temp_reg, obj_reg); 3255 3256 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 3257 3258 z_csg(mark_reg, temp_reg, 0, obj_reg); 3259 3260 // Fall through to the normal CAS-based lock, because no matter what 3261 // the result of the above CAS, some thread must have succeeded in 3262 // removing the bias bit from the object's header. 3263 if (PrintBiasedLockingStatistics) { 3264 // z_cgr(mark_reg, temp2_reg); 3265 increment_counter_eq((address) BiasedLocking::revoked_lock_entry_count_addr(), temp_reg, temp2_reg); 3266 } 3267 3268 bind(cas_label); 3269 BLOCK_COMMENT("} biased_locking_enter"); 3270 } 3271 3272 void MacroAssembler::biased_locking_exit(Register mark_addr, Register temp_reg, Label& done) { 3273 // Check for biased locking unlock case, which is a no-op 3274 // Note: we do not have to check the thread ID for two reasons. 3275 // First, the interpreter checks for IllegalMonitorStateException at 3276 // a higher level. Second, if the bias was revoked while we held the 3277 // lock, the object could not be rebiased toward another thread, so 3278 // the bias bit would be clear. 3279 BLOCK_COMMENT("biased_locking_exit {"); 3280 3281 z_lg(temp_reg, 0, mark_addr); 3282 z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place); 3283 3284 z_chi(temp_reg, markOopDesc::biased_lock_pattern); 3285 z_bre(done); 3286 BLOCK_COMMENT("} biased_locking_exit"); 3287 } 3288 3289 void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) { 3290 Register displacedHeader = temp1; 3291 Register currentHeader = temp1; 3292 Register temp = temp2; 3293 NearLabel done, object_has_monitor; 3294 3295 BLOCK_COMMENT("compiler_fast_lock_object {"); 3296 3297 // Load markOop from oop into mark. 3298 z_lg(displacedHeader, 0, oop); 3299 3300 if (try_bias) { 3301 biased_locking_enter(oop, displacedHeader, temp, Z_R0, done); 3302 } 3303 3304 // Handle existing monitor. 3305 if ((EmitSync & 0x01) == 0) { 3306 // The object has an existing monitor iff (mark & monitor_value) != 0. 3307 guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word"); 3308 z_lr(temp, displacedHeader); 3309 z_nill(temp, markOopDesc::monitor_value); 3310 z_brne(object_has_monitor); 3311 } 3312 3313 // Set mark to markOop | markOopDesc::unlocked_value. 3314 z_oill(displacedHeader, markOopDesc::unlocked_value); 3315 3316 // Load Compare Value application register. 3317 3318 // Initialize the box (must happen before we update the object mark). 3319 z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box); 3320 3321 // Memory Fence (in cmpxchgd) 3322 // Compare object markOop with mark and if equal exchange scratch1 with object markOop. 3323 3324 // If the compare-and-swap succeeded, then we found an unlocked object and we 3325 // have now locked it. 3326 z_csg(displacedHeader, box, 0, oop); 3327 assert(currentHeader==displacedHeader, "must be same register"); // Identified two registers from z/Architecture. 3328 z_bre(done); 3329 3330 // We did not see an unlocked object so try the fast recursive case. 3331 3332 z_sgr(currentHeader, Z_SP); 3333 load_const_optimized(temp, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place)); 3334 3335 z_ngr(currentHeader, temp); 3336 // z_brne(done); 3337 // z_release(); 3338 z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box); 3339 3340 z_bru(done); 3341 3342 if ((EmitSync & 0x01) == 0) { 3343 Register zero = temp; 3344 Register monitor_tagged = displacedHeader; // Tagged with markOopDesc::monitor_value. 3345 bind(object_has_monitor); 3346 // The object's monitor m is unlocked iff m->owner == NULL, 3347 // otherwise m->owner may contain a thread or a stack address. 3348 // 3349 // Try to CAS m->owner from NULL to current thread. 3350 z_lghi(zero, 0); 3351 // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ. 3352 z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged); 3353 // Store a non-null value into the box. 3354 z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box); 3355 #ifdef ASSERT 3356 z_brne(done); 3357 // We've acquired the monitor, check some invariants. 3358 // Invariant 1: _recursions should be 0. 3359 asm_assert_mem8_is_zero(OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), monitor_tagged, 3360 "monitor->_recursions should be 0", -1); 3361 z_ltgr(zero, zero); // Set CR=EQ. 3362 #endif 3363 } 3364 bind(done); 3365 3366 BLOCK_COMMENT("} compiler_fast_lock_object"); 3367 // If locking was successful, CR should indicate 'EQ'. 3368 // The compiler or the native wrapper generates a branch to the runtime call 3369 // _complete_monitor_locking_Java. 3370 } 3371 3372 void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) { 3373 Register displacedHeader = temp1; 3374 Register currentHeader = temp2; 3375 Register temp = temp1; 3376 Register monitor = temp2; 3377 3378 Label done, object_has_monitor; 3379 3380 BLOCK_COMMENT("compiler_fast_unlock_object {"); 3381 3382 if (try_bias) { 3383 biased_locking_exit(oop, currentHeader, done); 3384 } 3385 3386 // Find the lock address and load the displaced header from the stack. 3387 // if the displaced header is zero, we have a recursive unlock. 3388 load_and_test_long(displacedHeader, Address(box, BasicLock::displaced_header_offset_in_bytes())); 3389 z_bre(done); 3390 3391 // Handle existing monitor. 3392 if ((EmitSync & 0x02) == 0) { 3393 // The object has an existing monitor iff (mark & monitor_value) != 0. 3394 z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop); 3395 guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word"); 3396 z_nill(currentHeader, markOopDesc::monitor_value); 3397 z_brne(object_has_monitor); 3398 } 3399 3400 // Check if it is still a light weight lock, this is true if we see 3401 // the stack address of the basicLock in the markOop of the object 3402 // copy box to currentHeader such that csg does not kill it. 3403 z_lgr(currentHeader, box); 3404 z_csg(currentHeader, displacedHeader, 0, oop); 3405 z_bru(done); // Csg sets CR as desired. 3406 3407 // Handle existing monitor. 3408 if ((EmitSync & 0x02) == 0) { 3409 bind(object_has_monitor); 3410 z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop); // CurrentHeader is tagged with monitor_value set. 3411 load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); 3412 z_brne(done); 3413 load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 3414 z_brne(done); 3415 load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); 3416 z_brne(done); 3417 load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); 3418 z_brne(done); 3419 z_release(); 3420 z_stg(temp/*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), currentHeader); 3421 } 3422 3423 bind(done); 3424 3425 BLOCK_COMMENT("} compiler_fast_unlock_object"); 3426 // flag == EQ indicates success 3427 // flag == NE indicates failure 3428 } 3429 3430 // Write to card table for modification at store_addr - register is destroyed afterwards. 3431 void MacroAssembler::card_write_barrier_post(Register store_addr, Register tmp) { 3432 CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set(); 3433 assert(bs->kind() == BarrierSet::CardTableForRS || 3434 bs->kind() == BarrierSet::CardTableExtension, "wrong barrier"); 3435 assert_different_registers(store_addr, tmp); 3436 z_srlg(store_addr, store_addr, CardTableModRefBS::card_shift); 3437 load_absolute_address(tmp, (address)bs->byte_map_base); 3438 z_agr(store_addr, tmp); 3439 z_mvi(0, store_addr, 0); // Store byte 0. 3440 } 3441 3442 #if INCLUDE_ALL_GCS 3443 3444 //------------------------------------------------------ 3445 // General G1 pre-barrier generator. 3446 // Purpose: record the previous value if it is not null. 3447 // All non-tmps are preserved. 3448 //------------------------------------------------------ 3449 void MacroAssembler::g1_write_barrier_pre(Register Robj, 3450 RegisterOrConstant offset, 3451 Register Rpre_val, // Ideally, this is a non-volatile register. 3452 Register Rval, // Will be preserved. 3453 Register Rtmp1, // If Rpre_val is volatile, either Rtmp1 3454 Register Rtmp2, // or Rtmp2 has to be non-volatile.. 3455 bool pre_val_needed // Save Rpre_val across runtime call, caller uses it. 3456 ) { 3457 Label callRuntime, filtered; 3458 const int active_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()); 3459 const int buffer_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_buf()); 3460 const int index_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_index()); 3461 assert_different_registers(Rtmp1, Rtmp2, Z_R0_scratch); // None of the Rtmp<i> must be Z_R0!! 3462 3463 BLOCK_COMMENT("g1_write_barrier_pre {"); 3464 3465 // Is marking active? 3466 // Note: value is loaded for test purposes only. No further use here. 3467 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 3468 load_and_test_int(Rtmp1, Address(Z_thread, active_offset)); 3469 } else { 3470 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 3471 load_and_test_byte(Rtmp1, Address(Z_thread, active_offset)); 3472 } 3473 z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently. 3474 3475 // Do we need to load the previous value into Rpre_val? 3476 if (Robj != noreg) { 3477 // Load the previous value... 3478 Register ixReg = offset.is_register() ? offset.register_or_noreg() : Z_R0; 3479 if (UseCompressedOops) { 3480 z_llgf(Rpre_val, offset.constant_or_zero(), ixReg, Robj); 3481 } else { 3482 z_lg(Rpre_val, offset.constant_or_zero(), ixReg, Robj); 3483 } 3484 } 3485 assert(Rpre_val != noreg, "must have a real register"); 3486 3487 // Is the previous value NULL? 3488 // Note: pre_val is loaded, decompressed and stored (directly or via runtime call). 3489 // Register contents is preserved across runtime call if caller requests to do so. 3490 z_ltgr(Rpre_val, Rpre_val); 3491 z_bre(filtered); // previous value is NULL, so we don't need to record it. 3492 3493 // Decode the oop now. We know it's not NULL. 3494 if (Robj != noreg && UseCompressedOops) { 3495 oop_decoder(Rpre_val, Rpre_val, /*maybeNULL=*/false); 3496 } 3497 3498 // OK, it's not filtered, so we'll need to call enqueue. 3499 3500 // We can store the original value in the thread's buffer 3501 // only if index > 0. Otherwise, we need runtime to handle. 3502 // (The index field is typed as size_t.) 3503 Register Rbuffer = Rtmp1, Rindex = Rtmp2; 3504 3505 z_lg(Rbuffer, buffer_offset, Z_thread); 3506 3507 load_and_test_long(Rindex, Address(Z_thread, index_offset)); 3508 z_bre(callRuntime); // If index == 0, goto runtime. 3509 3510 add2reg(Rindex, -wordSize); // Decrement index. 3511 z_stg(Rindex, index_offset, Z_thread); 3512 3513 // Record the previous value. 3514 z_stg(Rpre_val, 0, Rbuffer, Rindex); 3515 z_bru(filtered); // We are done. 3516 3517 Rbuffer = noreg; // end of life 3518 Rindex = noreg; // end of life 3519 3520 bind(callRuntime); 3521 3522 // Save Rpre_val (result) over runtime call. 3523 // Requires Rtmp1, Rtmp2, or Rpre_val to be non-volatile. 3524 Register Rpre_save = Rpre_val; 3525 if (pre_val_needed && Rpre_val->is_volatile()) { 3526 guarantee(!Rtmp1->is_volatile() || !Rtmp2->is_volatile(), "oops!"); 3527 Rpre_save = !Rtmp1->is_volatile() ? Rtmp1 : Rtmp2; 3528 } 3529 lgr_if_needed(Rpre_save, Rpre_val); 3530 3531 // Preserve inputs by spilling them into the top frame. 3532 if (Robj != noreg && Robj->is_volatile()) { 3533 z_stg(Robj, Robj->encoding()*BytesPerWord, Z_SP); 3534 } 3535 if (offset.is_register() && offset.as_register()->is_volatile()) { 3536 Register Roff = offset.as_register(); 3537 z_stg(Roff, Roff->encoding()*BytesPerWord, Z_SP); 3538 } 3539 if (Rval != noreg && Rval->is_volatile()) { 3540 z_stg(Rval, Rval->encoding()*BytesPerWord, Z_SP); 3541 } 3542 3543 // Push frame to protect top frame with return pc and spilled register values. 3544 save_return_pc(); 3545 push_frame_abi160(0); // Will use Z_R0 as tmp on old CPUs. 3546 3547 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_val, Z_thread); 3548 3549 pop_frame(); 3550 restore_return_pc(); 3551 3552 // Restore spilled values. 3553 if (Robj != noreg && Robj->is_volatile()) { 3554 z_lg(Robj, Robj->encoding()*BytesPerWord, Z_SP); 3555 } 3556 if (offset.is_register() && offset.as_register()->is_volatile()) { 3557 Register Roff = offset.as_register(); 3558 z_lg(Roff, Roff->encoding()*BytesPerWord, Z_SP); 3559 } 3560 if (Rval != noreg && Rval->is_volatile()) { 3561 z_lg(Rval, Rval->encoding()*BytesPerWord, Z_SP); 3562 } 3563 3564 // Restore Rpre_val (result) after runtime call. 3565 lgr_if_needed(Rpre_val, Rpre_save); 3566 3567 bind(filtered); 3568 BLOCK_COMMENT("} g1_write_barrier_pre"); 3569 } 3570 3571 // General G1 post-barrier generator. 3572 // Purpose: Store cross-region card. 3573 void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, 3574 Register Rnew_val, 3575 Register Rtmp1, 3576 Register Rtmp2, 3577 Register Rtmp3) { 3578 Label callRuntime, filtered; 3579 3580 assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); // Most probably, Rnew_val == Rtmp3. 3581 3582 G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set(); 3583 assert(bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier"); 3584 3585 BLOCK_COMMENT("g1_write_barrier_post {"); 3586 3587 // Does store cross heap regions? 3588 // It does if the two addresses specify different grain addresses. 3589 if (G1RSBarrierRegionFilter) { 3590 if (VM_Version::has_DistinctOpnds()) { 3591 z_xgrk(Rtmp1, Rstore_addr, Rnew_val); 3592 } else { 3593 z_lgr(Rtmp1, Rstore_addr); 3594 z_xgr(Rtmp1, Rnew_val); 3595 } 3596 z_srag(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes); 3597 z_bre(filtered); 3598 } 3599 3600 // Crosses regions, storing NULL? 3601 #ifdef ASSERT 3602 z_ltgr(Rnew_val, Rnew_val); 3603 asm_assert_ne("null oop not allowed (G1)", 0x255); // TODO: also on z? Checked by caller on PPC64, so following branch is obsolete: 3604 z_bre(filtered); // Safety net: don't break if we have a NULL oop. 3605 #endif 3606 Rnew_val = noreg; // end of lifetime 3607 3608 // Storing region crossing non-NULL, is card already dirty? 3609 assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code"); 3610 assert_different_registers(Rtmp1, Rtmp2, Rtmp3); 3611 // Make sure not to use Z_R0 for any of these registers. 3612 Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3; 3613 Register Rbase = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp3; 3614 3615 // calculate address of card 3616 load_const_optimized(Rbase, (address)bs->byte_map_base); // Card table base. 3617 z_srlg(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift); // Index into card table. 3618 add2reg_with_index(Rcard_addr, 0, Rcard_addr, Rbase); // Explicit calculation needed for cli. 3619 Rbase = noreg; // end of lifetime 3620 3621 // Filter young. 3622 assert((unsigned int)G1SATBCardTableModRefBS::g1_young_card_val() <= 255, "otherwise check this code"); 3623 z_cli(0, Rcard_addr, (int)G1SATBCardTableModRefBS::g1_young_card_val()); 3624 z_bre(filtered); 3625 3626 // Check the card value. If dirty, we're done. 3627 // This also avoids false sharing of the (already dirty) card. 3628 z_sync(); // Required to support concurrent cleaning. 3629 assert((unsigned int)CardTableModRefBS::dirty_card_val() <= 255, "otherwise check this code"); 3630 z_cli(0, Rcard_addr, CardTableModRefBS::dirty_card_val()); // Reload after membar. 3631 z_bre(filtered); 3632 3633 // Storing a region crossing, non-NULL oop, card is clean. 3634 // Dirty card and log. 3635 z_mvi(0, Rcard_addr, CardTableModRefBS::dirty_card_val()); 3636 3637 Register Rcard_addr_x = Rcard_addr; 3638 Register Rqueue_index = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp1; 3639 Register Rqueue_buf = (Rtmp3 != Z_R0_scratch) ? Rtmp3 : Rtmp1; 3640 const int qidx_off = in_bytes(JavaThread::dirty_card_queue_offset() + SATBMarkQueue::byte_offset_of_index()); 3641 const int qbuf_off = in_bytes(JavaThread::dirty_card_queue_offset() + SATBMarkQueue::byte_offset_of_buf()); 3642 if ((Rcard_addr == Rqueue_buf) || (Rcard_addr == Rqueue_index)) { 3643 Rcard_addr_x = Z_R0_scratch; // Register shortage. We have to use Z_R0. 3644 } 3645 lgr_if_needed(Rcard_addr_x, Rcard_addr); 3646 3647 load_and_test_long(Rqueue_index, Address(Z_thread, qidx_off)); 3648 z_bre(callRuntime); // Index == 0 then jump to runtime. 3649 3650 z_lg(Rqueue_buf, qbuf_off, Z_thread); 3651 3652 add2reg(Rqueue_index, -wordSize); // Decrement index. 3653 z_stg(Rqueue_index, qidx_off, Z_thread); 3654 3655 z_stg(Rcard_addr_x, 0, Rqueue_index, Rqueue_buf); // Store card. 3656 z_bru(filtered); 3657 3658 bind(callRuntime); 3659 3660 // TODO: do we need a frame? Introduced to be on the safe side. 3661 bool needs_frame = true; 3662 3663 // VM call need frame to access(write) O register. 3664 if (needs_frame) { 3665 save_return_pc(); 3666 push_frame_abi160(0); // Will use Z_R0 as tmp on old CPUs. 3667 } 3668 3669 // Save the live input values. 3670 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr_x, Z_thread); 3671 3672 if (needs_frame) { 3673 pop_frame(); 3674 restore_return_pc(); 3675 } 3676 3677 bind(filtered); 3678 3679 BLOCK_COMMENT("} g1_write_barrier_post"); 3680 } 3681 #endif // INCLUDE_ALL_GCS 3682 3683 // Last_Java_sp must comply to the rules in frame_s390.hpp. 3684 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation) { 3685 BLOCK_COMMENT("set_last_Java_frame {"); 3686 3687 // Always set last_Java_pc and flags first because once last_Java_sp 3688 // is visible has_last_Java_frame is true and users will look at the 3689 // rest of the fields. (Note: flags should always be zero before we 3690 // get here so doesn't need to be set.) 3691 3692 // Verify that last_Java_pc was zeroed on return to Java. 3693 if (allow_relocation) { 3694 asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()), 3695 Z_thread, 3696 "last_Java_pc not zeroed before leaving Java", 3697 0x200); 3698 } else { 3699 asm_assert_mem8_is_zero_static(in_bytes(JavaThread::last_Java_pc_offset()), 3700 Z_thread, 3701 "last_Java_pc not zeroed before leaving Java", 3702 0x200); 3703 } 3704 3705 // When returning from calling out from Java mode the frame anchor's 3706 // last_Java_pc will always be set to NULL. It is set here so that 3707 // if we are doing a call to native (not VM) that we capture the 3708 // known pc and don't have to rely on the native call having a 3709 // standard frame linkage where we can find the pc. 3710 if (last_Java_pc!=noreg) { 3711 z_stg(last_Java_pc, Address(Z_thread, JavaThread::last_Java_pc_offset())); 3712 } 3713 3714 // This membar release is not required on z/Architecture, since the sequence of stores 3715 // in maintained. Nevertheless, we leave it in to document the required ordering. 3716 // The implementation of z_release() should be empty. 3717 // z_release(); 3718 3719 z_stg(last_Java_sp, Address(Z_thread, JavaThread::last_Java_sp_offset())); 3720 BLOCK_COMMENT("} set_last_Java_frame"); 3721 } 3722 3723 void MacroAssembler::reset_last_Java_frame(bool allow_relocation) { 3724 BLOCK_COMMENT("reset_last_Java_frame {"); 3725 3726 if (allow_relocation) { 3727 asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()), 3728 Z_thread, 3729 "SP was not set, still zero", 3730 0x202); 3731 } else { 3732 asm_assert_mem8_isnot_zero_static(in_bytes(JavaThread::last_Java_sp_offset()), 3733 Z_thread, 3734 "SP was not set, still zero", 3735 0x202); 3736 } 3737 3738 // _last_Java_sp = 0 3739 // Clearing storage must be atomic here, so don't use clear_mem()! 3740 store_const(Address(Z_thread, JavaThread::last_Java_sp_offset()), 0); 3741 3742 // _last_Java_pc = 0 3743 store_const(Address(Z_thread, JavaThread::last_Java_pc_offset()), 0); 3744 3745 BLOCK_COMMENT("} reset_last_Java_frame"); 3746 return; 3747 } 3748 3749 void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation) { 3750 assert_different_registers(sp, tmp1); 3751 3752 // We cannot trust that code generated by the C++ compiler saves R14 3753 // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at 3754 // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()). 3755 // Therefore we load the PC into tmp1 and let set_last_Java_frame() save 3756 // it into the frame anchor. 3757 get_PC(tmp1); 3758 set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1, allow_relocation); 3759 } 3760 3761 void MacroAssembler::set_thread_state(JavaThreadState new_state) { 3762 z_release(); 3763 3764 assert(Immediate::is_uimm16(_thread_max_state), "enum value out of range for instruction"); 3765 assert(sizeof(JavaThreadState) == sizeof(int), "enum value must have base type int"); 3766 store_const(Address(Z_thread, JavaThread::thread_state_offset()), new_state, Z_R0, false); 3767 } 3768 3769 void MacroAssembler::get_vm_result(Register oop_result) { 3770 verify_thread(); 3771 3772 z_lg(oop_result, Address(Z_thread, JavaThread::vm_result_offset())); 3773 clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(void*)); 3774 3775 verify_oop(oop_result); 3776 } 3777 3778 void MacroAssembler::get_vm_result_2(Register result) { 3779 verify_thread(); 3780 3781 z_lg(result, Address(Z_thread, JavaThread::vm_result_2_offset())); 3782 clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(void*)); 3783 } 3784 3785 // We require that C code which does not return a value in vm_result will 3786 // leave it undisturbed. 3787 void MacroAssembler::set_vm_result(Register oop_result) { 3788 z_stg(oop_result, Address(Z_thread, JavaThread::vm_result_offset())); 3789 } 3790 3791 // Explicit null checks (used for method handle code). 3792 void MacroAssembler::null_check(Register reg, Register tmp, int64_t offset) { 3793 if (!ImplicitNullChecks) { 3794 NearLabel ok; 3795 3796 compare64_and_branch(reg, (intptr_t) 0, Assembler::bcondNotEqual, ok); 3797 3798 // We just put the address into reg if it was 0 (tmp==Z_R0 is allowed so we can't use it for the address). 3799 address exception_entry = Interpreter::throw_NullPointerException_entry(); 3800 load_absolute_address(reg, exception_entry); 3801 z_br(reg); 3802 3803 bind(ok); 3804 } else { 3805 if (needs_explicit_null_check((intptr_t)offset)) { 3806 // Provoke OS NULL exception if reg = NULL by 3807 // accessing M[reg] w/o changing any registers. 3808 z_lg(tmp, 0, reg); 3809 } 3810 // else 3811 // Nothing to do, (later) access of M[reg + offset] 3812 // will provoke OS NULL exception if reg = NULL. 3813 } 3814 } 3815 3816 //------------------------------------- 3817 // Compressed Klass Pointers 3818 //------------------------------------- 3819 3820 // Klass oop manipulations if compressed. 3821 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 3822 Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible. 3823 address base = Universe::narrow_klass_base(); 3824 int shift = Universe::narrow_klass_shift(); 3825 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 3826 3827 BLOCK_COMMENT("cKlass encoder {"); 3828 3829 #ifdef ASSERT 3830 Label ok; 3831 z_tmll(current, KlassAlignmentInBytes-1); // Check alignment. 3832 z_brc(Assembler::bcondAllZero, ok); 3833 // The plain disassembler does not recognize illtrap. It instead displays 3834 // a 32-bit value. Issueing two illtraps assures the disassembler finds 3835 // the proper beginning of the next instruction. 3836 z_illtrap(0xee); 3837 z_illtrap(0xee); 3838 bind(ok); 3839 #endif 3840 3841 if (base != NULL) { 3842 unsigned int base_h = ((unsigned long)base)>>32; 3843 unsigned int base_l = (unsigned int)((unsigned long)base); 3844 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 3845 lgr_if_needed(dst, current); 3846 z_aih(dst, -((int)base_h)); // Base has no set bits in lower half. 3847 } else if ((base_h == 0) && (base_l != 0)) { 3848 lgr_if_needed(dst, current); 3849 z_agfi(dst, -(int)base_l); 3850 } else { 3851 load_const(Z_R0, base); 3852 lgr_if_needed(dst, current); 3853 z_sgr(dst, Z_R0); 3854 } 3855 current = dst; 3856 } 3857 if (shift != 0) { 3858 assert (LogKlassAlignmentInBytes == shift, "decode alg wrong"); 3859 z_srlg(dst, current, shift); 3860 current = dst; 3861 } 3862 lgr_if_needed(dst, current); // Move may be required (if neither base nor shift != 0). 3863 3864 BLOCK_COMMENT("} cKlass encoder"); 3865 } 3866 3867 // This function calculates the size of the code generated by 3868 // decode_klass_not_null(register dst, Register src) 3869 // when (Universe::heap() != NULL). Hence, if the instructions 3870 // it generates change, then this method needs to be updated. 3871 int MacroAssembler::instr_size_for_decode_klass_not_null() { 3872 address base = Universe::narrow_klass_base(); 3873 int shift_size = Universe::narrow_klass_shift() == 0 ? 0 : 6; /* sllg */ 3874 int addbase_size = 0; 3875 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 3876 3877 if (base != NULL) { 3878 unsigned int base_h = ((unsigned long)base)>>32; 3879 unsigned int base_l = (unsigned int)((unsigned long)base); 3880 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 3881 addbase_size += 6; /* aih */ 3882 } else if ((base_h == 0) && (base_l != 0)) { 3883 addbase_size += 6; /* algfi */ 3884 } else { 3885 addbase_size += load_const_size(); 3886 addbase_size += 4; /* algr */ 3887 } 3888 } 3889 #ifdef ASSERT 3890 addbase_size += 10; 3891 addbase_size += 2; // Extra sigill. 3892 #endif 3893 return addbase_size + shift_size; 3894 } 3895 3896 // !!! If the instructions that get generated here change 3897 // then function instr_size_for_decode_klass_not_null() 3898 // needs to get updated. 3899 // This variant of decode_klass_not_null() must generate predictable code! 3900 // The code must only depend on globally known parameters. 3901 void MacroAssembler::decode_klass_not_null(Register dst) { 3902 address base = Universe::narrow_klass_base(); 3903 int shift = Universe::narrow_klass_shift(); 3904 int beg_off = offset(); 3905 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 3906 3907 BLOCK_COMMENT("cKlass decoder (const size) {"); 3908 3909 if (shift != 0) { // Shift required? 3910 z_sllg(dst, dst, shift); 3911 } 3912 if (base != NULL) { 3913 unsigned int base_h = ((unsigned long)base)>>32; 3914 unsigned int base_l = (unsigned int)((unsigned long)base); 3915 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 3916 z_aih(dst, base_h); // Base has no set bits in lower half. 3917 } else if ((base_h == 0) && (base_l != 0)) { 3918 z_algfi(dst, base_l); // Base has no set bits in upper half. 3919 } else { 3920 load_const(Z_R0, base); // Base has set bits everywhere. 3921 z_algr(dst, Z_R0); 3922 } 3923 } 3924 3925 #ifdef ASSERT 3926 Label ok; 3927 z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment. 3928 z_brc(Assembler::bcondAllZero, ok); 3929 // The plain disassembler does not recognize illtrap. It instead displays 3930 // a 32-bit value. Issueing two illtraps assures the disassembler finds 3931 // the proper beginning of the next instruction. 3932 z_illtrap(0xd1); 3933 z_illtrap(0xd1); 3934 bind(ok); 3935 #endif 3936 assert(offset() == beg_off + instr_size_for_decode_klass_not_null(), "Code gen mismatch."); 3937 3938 BLOCK_COMMENT("} cKlass decoder (const size)"); 3939 } 3940 3941 // This variant of decode_klass_not_null() is for cases where 3942 // 1) the size of the generated instructions may vary 3943 // 2) the result is (potentially) stored in a register different from the source. 3944 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 3945 address base = Universe::narrow_klass_base(); 3946 int shift = Universe::narrow_klass_shift(); 3947 assert(UseCompressedClassPointers, "only for compressed klass ptrs"); 3948 3949 BLOCK_COMMENT("cKlass decoder {"); 3950 3951 if (src == noreg) src = dst; 3952 3953 if (shift != 0) { // Shift or at least move required? 3954 z_sllg(dst, src, shift); 3955 } else { 3956 lgr_if_needed(dst, src); 3957 } 3958 3959 if (base != NULL) { 3960 unsigned int base_h = ((unsigned long)base)>>32; 3961 unsigned int base_l = (unsigned int)((unsigned long)base); 3962 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 3963 z_aih(dst, base_h); // Base has not set bits in lower half. 3964 } else if ((base_h == 0) && (base_l != 0)) { 3965 z_algfi(dst, base_l); // Base has no set bits in upper half. 3966 } else { 3967 load_const_optimized(Z_R0, base); // Base has set bits everywhere. 3968 z_algr(dst, Z_R0); 3969 } 3970 } 3971 3972 #ifdef ASSERT 3973 Label ok; 3974 z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment. 3975 z_brc(Assembler::bcondAllZero, ok); 3976 // The plain disassembler does not recognize illtrap. It instead displays 3977 // a 32-bit value. Issueing two illtraps assures the disassembler finds 3978 // the proper beginning of the next instruction. 3979 z_illtrap(0xd2); 3980 z_illtrap(0xd2); 3981 bind(ok); 3982 #endif 3983 BLOCK_COMMENT("} cKlass decoder"); 3984 } 3985 3986 void MacroAssembler::load_klass(Register klass, Address mem) { 3987 if (UseCompressedClassPointers) { 3988 z_llgf(klass, mem); 3989 // Attention: no null check here! 3990 decode_klass_not_null(klass); 3991 } else { 3992 z_lg(klass, mem); 3993 } 3994 } 3995 3996 void MacroAssembler::load_klass(Register klass, Register src_oop) { 3997 if (UseCompressedClassPointers) { 3998 z_llgf(klass, oopDesc::klass_offset_in_bytes(), src_oop); 3999 // Attention: no null check here! 4000 decode_klass_not_null(klass); 4001 } else { 4002 z_lg(klass, oopDesc::klass_offset_in_bytes(), src_oop); 4003 } 4004 } 4005 4006 void MacroAssembler::load_prototype_header(Register Rheader, Register Rsrc_oop) { 4007 assert_different_registers(Rheader, Rsrc_oop); 4008 load_klass(Rheader, Rsrc_oop); 4009 z_lg(Rheader, Address(Rheader, Klass::prototype_header_offset())); 4010 } 4011 4012 void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) { 4013 if (UseCompressedClassPointers) { 4014 assert_different_registers(dst_oop, klass, Z_R0); 4015 if (ck == noreg) ck = klass; 4016 encode_klass_not_null(ck, klass); 4017 z_st(ck, Address(dst_oop, oopDesc::klass_offset_in_bytes())); 4018 } else { 4019 z_stg(klass, Address(dst_oop, oopDesc::klass_offset_in_bytes())); 4020 } 4021 } 4022 4023 void MacroAssembler::store_klass_gap(Register s, Register d) { 4024 if (UseCompressedClassPointers) { 4025 assert(s != d, "not enough registers"); 4026 z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes())); 4027 } 4028 } 4029 4030 // Compare klass ptr in memory against klass ptr in register. 4031 // 4032 // Rop1 - klass in register, always uncompressed. 4033 // disp - Offset of klass in memory, compressed/uncompressed, depending on runtime flag. 4034 // Rbase - Base address of cKlass in memory. 4035 // maybeNULL - True if Rop1 possibly is a NULL. 4036 void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL) { 4037 4038 BLOCK_COMMENT("compare klass ptr {"); 4039 4040 if (UseCompressedClassPointers) { 4041 const int shift = Universe::narrow_klass_shift(); 4042 address base = Universe::narrow_klass_base(); 4043 4044 assert((shift == 0) || (shift == LogKlassAlignmentInBytes), "cKlass encoder detected bad shift"); 4045 assert_different_registers(Rop1, Z_R0); 4046 assert_different_registers(Rop1, Rbase, Z_R1); 4047 4048 // First encode register oop and then compare with cOop in memory. 4049 // This sequence saves an unnecessary cOop load and decode. 4050 if (base == NULL) { 4051 if (shift == 0) { 4052 z_cl(Rop1, disp, Rbase); // Unscaled 4053 } else { 4054 z_srlg(Z_R0, Rop1, shift); // ZeroBased 4055 z_cl(Z_R0, disp, Rbase); 4056 } 4057 } else { // HeapBased 4058 #ifdef ASSERT 4059 bool used_R0 = true; 4060 bool used_R1 = true; 4061 #endif 4062 Register current = Rop1; 4063 Label done; 4064 4065 if (maybeNULL) { // NULL ptr must be preserved! 4066 z_ltgr(Z_R0, current); 4067 z_bre(done); 4068 current = Z_R0; 4069 } 4070 4071 unsigned int base_h = ((unsigned long)base)>>32; 4072 unsigned int base_l = (unsigned int)((unsigned long)base); 4073 if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) { 4074 lgr_if_needed(Z_R0, current); 4075 z_aih(Z_R0, -((int)base_h)); // Base has no set bits in lower half. 4076 } else if ((base_h == 0) && (base_l != 0)) { 4077 lgr_if_needed(Z_R0, current); 4078 z_agfi(Z_R0, -(int)base_l); 4079 } else { 4080 int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base)); 4081 add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); // Subtract base by adding complement. 4082 } 4083 4084 if (shift != 0) { 4085 z_srlg(Z_R0, Z_R0, shift); 4086 } 4087 bind(done); 4088 z_cl(Z_R0, disp, Rbase); 4089 #ifdef ASSERT 4090 if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2); 4091 if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2); 4092 #endif 4093 } 4094 } else { 4095 z_clg(Rop1, disp, Z_R0, Rbase); 4096 } 4097 BLOCK_COMMENT("} compare klass ptr"); 4098 } 4099 4100 //--------------------------- 4101 // Compressed oops 4102 //--------------------------- 4103 4104 void MacroAssembler::encode_heap_oop(Register oop) { 4105 oop_encoder(oop, oop, true /*maybe null*/); 4106 } 4107 4108 void MacroAssembler::encode_heap_oop_not_null(Register oop) { 4109 oop_encoder(oop, oop, false /*not null*/); 4110 } 4111 4112 // Called with something derived from the oop base. e.g. oop_base>>3. 4113 int MacroAssembler::get_oop_base_pow2_offset(uint64_t oop_base) { 4114 unsigned int oop_base_ll = ((unsigned int)(oop_base >> 0)) & 0xffff; 4115 unsigned int oop_base_lh = ((unsigned int)(oop_base >> 16)) & 0xffff; 4116 unsigned int oop_base_hl = ((unsigned int)(oop_base >> 32)) & 0xffff; 4117 unsigned int oop_base_hh = ((unsigned int)(oop_base >> 48)) & 0xffff; 4118 unsigned int n_notzero_parts = (oop_base_ll == 0 ? 0:1) 4119 + (oop_base_lh == 0 ? 0:1) 4120 + (oop_base_hl == 0 ? 0:1) 4121 + (oop_base_hh == 0 ? 0:1); 4122 4123 assert(oop_base != 0, "This is for HeapBased cOops only"); 4124 4125 if (n_notzero_parts != 1) { // Check if oop_base is just a few pages shy of a power of 2. 4126 uint64_t pow2_offset = 0x10000 - oop_base_ll; 4127 if (pow2_offset < 0x8000) { // This might not be necessary. 4128 uint64_t oop_base2 = oop_base + pow2_offset; 4129 4130 oop_base_ll = ((unsigned int)(oop_base2 >> 0)) & 0xffff; 4131 oop_base_lh = ((unsigned int)(oop_base2 >> 16)) & 0xffff; 4132 oop_base_hl = ((unsigned int)(oop_base2 >> 32)) & 0xffff; 4133 oop_base_hh = ((unsigned int)(oop_base2 >> 48)) & 0xffff; 4134 n_notzero_parts = (oop_base_ll == 0 ? 0:1) + 4135 (oop_base_lh == 0 ? 0:1) + 4136 (oop_base_hl == 0 ? 0:1) + 4137 (oop_base_hh == 0 ? 0:1); 4138 if (n_notzero_parts == 1) { 4139 assert(-(int64_t)pow2_offset != (int64_t)-1, "We use -1 to signal uninitialized base register"); 4140 return -pow2_offset; 4141 } 4142 } 4143 } 4144 return 0; 4145 } 4146 4147 // If base address is offset from a straight power of two by just a few pages, 4148 // return this offset to the caller for a possible later composite add. 4149 // TODO/FIX: will only work correctly for 4k pages. 4150 int MacroAssembler::get_oop_base(Register Rbase, uint64_t oop_base) { 4151 int pow2_offset = get_oop_base_pow2_offset(oop_base); 4152 4153 load_const_optimized(Rbase, oop_base - pow2_offset); // Best job possible. 4154 4155 return pow2_offset; 4156 } 4157 4158 int MacroAssembler::get_oop_base_complement(Register Rbase, uint64_t oop_base) { 4159 int offset = get_oop_base(Rbase, oop_base); 4160 z_lcgr(Rbase, Rbase); 4161 return -offset; 4162 } 4163 4164 // Compare compressed oop in memory against oop in register. 4165 // Rop1 - Oop in register. 4166 // disp - Offset of cOop in memory. 4167 // Rbase - Base address of cOop in memory. 4168 // maybeNULL - True if Rop1 possibly is a NULL. 4169 // maybeNULLtarget - Branch target for Rop1 == NULL, if flow control shall NOT continue with compare instruction. 4170 void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL) { 4171 Register Rbase = mem.baseOrR0(); 4172 Register Rindex = mem.indexOrR0(); 4173 int64_t disp = mem.disp(); 4174 4175 const int shift = Universe::narrow_oop_shift(); 4176 address base = Universe::narrow_oop_base(); 4177 4178 assert(UseCompressedOops, "must be on to call this method"); 4179 assert(Universe::heap() != NULL, "java heap must be initialized to call this method"); 4180 assert((shift == 0) || (shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift"); 4181 assert_different_registers(Rop1, Z_R0); 4182 assert_different_registers(Rop1, Rbase, Z_R1); 4183 assert_different_registers(Rop1, Rindex, Z_R1); 4184 4185 BLOCK_COMMENT("compare heap oop {"); 4186 4187 // First encode register oop and then compare with cOop in memory. 4188 // This sequence saves an unnecessary cOop load and decode. 4189 if (base == NULL) { 4190 if (shift == 0) { 4191 z_cl(Rop1, disp, Rindex, Rbase); // Unscaled 4192 } else { 4193 z_srlg(Z_R0, Rop1, shift); // ZeroBased 4194 z_cl(Z_R0, disp, Rindex, Rbase); 4195 } 4196 } else { // HeapBased 4197 #ifdef ASSERT 4198 bool used_R0 = true; 4199 bool used_R1 = true; 4200 #endif 4201 Label done; 4202 int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base)); 4203 4204 if (maybeNULL) { // NULL ptr must be preserved! 4205 z_ltgr(Z_R0, Rop1); 4206 z_bre(done); 4207 } 4208 4209 add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); 4210 z_srlg(Z_R0, Z_R0, shift); 4211 4212 bind(done); 4213 z_cl(Z_R0, disp, Rindex, Rbase); 4214 #ifdef ASSERT 4215 if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2); 4216 if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2); 4217 #endif 4218 } 4219 BLOCK_COMMENT("} compare heap oop"); 4220 } 4221 4222 // Load heap oop and decompress, if necessary. 4223 void MacroAssembler::load_heap_oop(Register dest, const Address &a) { 4224 if (UseCompressedOops) { 4225 z_llgf(dest, a.disp(), a.indexOrR0(), a.baseOrR0()); 4226 oop_decoder(dest, dest, true); 4227 } else { 4228 z_lg(dest, a.disp(), a.indexOrR0(), a.baseOrR0()); 4229 } 4230 } 4231 4232 // Load heap oop and decompress, if necessary. 4233 void MacroAssembler::load_heap_oop(Register dest, int64_t disp, Register base) { 4234 if (UseCompressedOops) { 4235 z_llgf(dest, disp, base); 4236 oop_decoder(dest, dest, true); 4237 } else { 4238 z_lg(dest, disp, base); 4239 } 4240 } 4241 4242 // Load heap oop and decompress, if necessary. 4243 void MacroAssembler::load_heap_oop_not_null(Register dest, int64_t disp, Register base) { 4244 if (UseCompressedOops) { 4245 z_llgf(dest, disp, base); 4246 oop_decoder(dest, dest, false); 4247 } else { 4248 z_lg(dest, disp, base); 4249 } 4250 } 4251 4252 // Compress, if necessary, and store oop to heap. 4253 void MacroAssembler::store_heap_oop(Register Roop, RegisterOrConstant offset, Register base) { 4254 Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0; 4255 if (UseCompressedOops) { 4256 assert_different_registers(Roop, offset.register_or_noreg(), base); 4257 encode_heap_oop(Roop); 4258 z_st(Roop, offset.constant_or_zero(), Ridx, base); 4259 } else { 4260 z_stg(Roop, offset.constant_or_zero(), Ridx, base); 4261 } 4262 } 4263 4264 // Compress, if necessary, and store oop to heap. Oop is guaranteed to be not NULL. 4265 void MacroAssembler::store_heap_oop_not_null(Register Roop, RegisterOrConstant offset, Register base) { 4266 Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0; 4267 if (UseCompressedOops) { 4268 assert_different_registers(Roop, offset.register_or_noreg(), base); 4269 encode_heap_oop_not_null(Roop); 4270 z_st(Roop, offset.constant_or_zero(), Ridx, base); 4271 } else { 4272 z_stg(Roop, offset.constant_or_zero(), Ridx, base); 4273 } 4274 } 4275 4276 // Store NULL oop to heap. 4277 void MacroAssembler::store_heap_oop_null(Register zero, RegisterOrConstant offset, Register base) { 4278 Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0; 4279 if (UseCompressedOops) { 4280 z_st(zero, offset.constant_or_zero(), Ridx, base); 4281 } else { 4282 z_stg(zero, offset.constant_or_zero(), Ridx, base); 4283 } 4284 } 4285 4286 //------------------------------------------------- 4287 // Encode compressed oop. Generally usable encoder. 4288 //------------------------------------------------- 4289 // Rsrc - contains regular oop on entry. It remains unchanged. 4290 // Rdst - contains compressed oop on exit. 4291 // Rdst and Rsrc may indicate same register, in which case Rsrc does not remain unchanged. 4292 // 4293 // Rdst must not indicate scratch register Z_R1 (Z_R1_scratch) for functionality. 4294 // Rdst should not indicate scratch register Z_R0 (Z_R0_scratch) for performance. 4295 // 4296 // only32bitValid is set, if later code only uses the lower 32 bits. In this 4297 // case we must not fix the upper 32 bits. 4298 void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL, 4299 Register Rbase, int pow2_offset, bool only32bitValid) { 4300 4301 const address oop_base = Universe::narrow_oop_base(); 4302 const int oop_shift = Universe::narrow_oop_shift(); 4303 const bool disjoint = Universe::narrow_oop_base_disjoint(); 4304 4305 assert(UseCompressedOops, "must be on to call this method"); 4306 assert(Universe::heap() != NULL, "java heap must be initialized to call this encoder"); 4307 assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift"); 4308 4309 if (disjoint || (oop_base == NULL)) { 4310 BLOCK_COMMENT("cOop encoder zeroBase {"); 4311 if (oop_shift == 0) { 4312 if (oop_base != NULL && !only32bitValid) { 4313 z_llgfr(Rdst, Rsrc); // Clear upper bits in case the register will be decoded again. 4314 } else { 4315 lgr_if_needed(Rdst, Rsrc); 4316 } 4317 } else { 4318 z_srlg(Rdst, Rsrc, oop_shift); 4319 if (oop_base != NULL && !only32bitValid) { 4320 z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again. 4321 } 4322 } 4323 BLOCK_COMMENT("} cOop encoder zeroBase"); 4324 return; 4325 } 4326 4327 bool used_R0 = false; 4328 bool used_R1 = false; 4329 4330 BLOCK_COMMENT("cOop encoder general {"); 4331 assert_different_registers(Rdst, Z_R1); 4332 assert_different_registers(Rsrc, Rbase); 4333 if (maybeNULL) { 4334 Label done; 4335 // We reorder shifting and subtracting, so that we can compare 4336 // and shift in parallel: 4337 // 4338 // cycle 0: potential LoadN, base = <const> 4339 // cycle 1: base = !base dst = src >> 3, cmp cr = (src != 0) 4340 // cycle 2: if (cr) br, dst = dst + base + offset 4341 4342 // Get oop_base components. 4343 if (pow2_offset == -1) { 4344 if (Rdst == Rbase) { 4345 if (Rdst == Z_R1 || Rsrc == Z_R1) { 4346 Rbase = Z_R0; 4347 used_R0 = true; 4348 } else { 4349 Rdst = Z_R1; 4350 used_R1 = true; 4351 } 4352 } 4353 if (Rbase == Z_R1) { 4354 used_R1 = true; 4355 } 4356 pow2_offset = get_oop_base_complement(Rbase, ((uint64_t)(intptr_t)oop_base) >> oop_shift); 4357 } 4358 assert_different_registers(Rdst, Rbase); 4359 4360 // Check for NULL oop (must be left alone) and shift. 4361 if (oop_shift != 0) { // Shift out alignment bits 4362 if (((intptr_t)oop_base&0xc000000000000000L) == 0L) { // We are sure: no single address will have the leftmost bit set. 4363 z_srag(Rdst, Rsrc, oop_shift); // Arithmetic shift sets the condition code. 4364 } else { 4365 z_srlg(Rdst, Rsrc, oop_shift); 4366 z_ltgr(Rsrc, Rsrc); // This is the recommended way of testing for zero. 4367 // This probably is faster, as it does not write a register. No! 4368 // z_cghi(Rsrc, 0); 4369 } 4370 } else { 4371 z_ltgr(Rdst, Rsrc); // Move NULL to result register. 4372 } 4373 z_bre(done); 4374 4375 // Subtract oop_base components. 4376 if ((Rdst == Z_R0) || (Rbase == Z_R0)) { 4377 z_algr(Rdst, Rbase); 4378 if (pow2_offset != 0) { add2reg(Rdst, pow2_offset); } 4379 } else { 4380 add2reg_with_index(Rdst, pow2_offset, Rbase, Rdst); 4381 } 4382 if (!only32bitValid) { 4383 z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again. 4384 } 4385 bind(done); 4386 4387 } else { // not null 4388 // Get oop_base components. 4389 if (pow2_offset == -1) { 4390 pow2_offset = get_oop_base_complement(Rbase, (uint64_t)(intptr_t)oop_base); 4391 } 4392 4393 // Subtract oop_base components and shift. 4394 if (Rdst == Z_R0 || Rsrc == Z_R0 || Rbase == Z_R0) { 4395 // Don't use lay instruction. 4396 if (Rdst == Rsrc) { 4397 z_algr(Rdst, Rbase); 4398 } else { 4399 lgr_if_needed(Rdst, Rbase); 4400 z_algr(Rdst, Rsrc); 4401 } 4402 if (pow2_offset != 0) add2reg(Rdst, pow2_offset); 4403 } else { 4404 add2reg_with_index(Rdst, pow2_offset, Rbase, Rsrc); 4405 } 4406 if (oop_shift != 0) { // Shift out alignment bits. 4407 z_srlg(Rdst, Rdst, oop_shift); 4408 } 4409 if (!only32bitValid) { 4410 z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again. 4411 } 4412 } 4413 #ifdef ASSERT 4414 if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb01bUL, 2); } 4415 if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb02bUL, 2); } 4416 #endif 4417 BLOCK_COMMENT("} cOop encoder general"); 4418 } 4419 4420 //------------------------------------------------- 4421 // decode compressed oop. Generally usable decoder. 4422 //------------------------------------------------- 4423 // Rsrc - contains compressed oop on entry. 4424 // Rdst - contains regular oop on exit. 4425 // Rdst and Rsrc may indicate same register. 4426 // Rdst must not be the same register as Rbase, if Rbase was preloaded (before call). 4427 // Rdst can be the same register as Rbase. Then, either Z_R0 or Z_R1 must be available as scratch. 4428 // Rbase - register to use for the base 4429 // pow2_offset - offset of base to nice value. If -1, base must be loaded. 4430 // For performance, it is good to 4431 // - avoid Z_R0 for any of the argument registers. 4432 // - keep Rdst and Rsrc distinct from Rbase. Rdst == Rsrc is ok for performance. 4433 // - avoid Z_R1 for Rdst if Rdst == Rbase. 4434 void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, Register Rbase, int pow2_offset) { 4435 4436 const address oop_base = Universe::narrow_oop_base(); 4437 const int oop_shift = Universe::narrow_oop_shift(); 4438 const bool disjoint = Universe::narrow_oop_base_disjoint(); 4439 4440 assert(UseCompressedOops, "must be on to call this method"); 4441 assert(Universe::heap() != NULL, "java heap must be initialized to call this decoder"); 4442 assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), 4443 "cOop encoder detected bad shift"); 4444 4445 // cOops are always loaded zero-extended from memory. No explicit zero-extension necessary. 4446 4447 if (oop_base != NULL) { 4448 unsigned int oop_base_hl = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xffff; 4449 unsigned int oop_base_hh = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 48)) & 0xffff; 4450 unsigned int oop_base_hf = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xFFFFffff; 4451 if (disjoint && (oop_base_hl == 0 || oop_base_hh == 0)) { 4452 BLOCK_COMMENT("cOop decoder disjointBase {"); 4453 // We do not need to load the base. Instead, we can install the upper bits 4454 // with an OR instead of an ADD. 4455 Label done; 4456 4457 // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set. 4458 if (maybeNULL) { // NULL ptr must be preserved! 4459 z_slag(Rdst, Rsrc, oop_shift); // Arithmetic shift sets the condition code. 4460 z_bre(done); 4461 } else { 4462 z_sllg(Rdst, Rsrc, oop_shift); // Logical shift leaves condition code alone. 4463 } 4464 if ((oop_base_hl != 0) && (oop_base_hh != 0)) { 4465 z_oihf(Rdst, oop_base_hf); 4466 } else if (oop_base_hl != 0) { 4467 z_oihl(Rdst, oop_base_hl); 4468 } else { 4469 assert(oop_base_hh != 0, "not heapbased mode"); 4470 z_oihh(Rdst, oop_base_hh); 4471 } 4472 bind(done); 4473 BLOCK_COMMENT("} cOop decoder disjointBase"); 4474 } else { 4475 BLOCK_COMMENT("cOop decoder general {"); 4476 // There are three decode steps: 4477 // scale oop offset (shift left) 4478 // get base (in reg) and pow2_offset (constant) 4479 // add base, pow2_offset, and oop offset 4480 // The following register overlap situations may exist: 4481 // Rdst == Rsrc, Rbase any other 4482 // not a problem. Scaling in-place leaves Rbase undisturbed. 4483 // Loading Rbase does not impact the scaled offset. 4484 // Rdst == Rbase, Rsrc any other 4485 // scaling would destroy a possibly preloaded Rbase. Loading Rbase 4486 // would destroy the scaled offset. 4487 // Remedy: use Rdst_tmp if Rbase has been preloaded. 4488 // use Rbase_tmp if base has to be loaded. 4489 // Rsrc == Rbase, Rdst any other 4490 // Only possible without preloaded Rbase. 4491 // Loading Rbase does not destroy compressed oop because it was scaled into Rdst before. 4492 // Rsrc == Rbase, Rdst == Rbase 4493 // Only possible without preloaded Rbase. 4494 // Loading Rbase would destroy compressed oop. Scaling in-place is ok. 4495 // Remedy: use Rbase_tmp. 4496 // 4497 Label done; 4498 Register Rdst_tmp = Rdst; 4499 Register Rbase_tmp = Rbase; 4500 bool used_R0 = false; 4501 bool used_R1 = false; 4502 bool base_preloaded = pow2_offset >= 0; 4503 guarantee(!(base_preloaded && (Rsrc == Rbase)), "Register clash, check caller"); 4504 assert(oop_shift != 0, "room for optimization"); 4505 4506 // Check if we need to use scratch registers. 4507 if (Rdst == Rbase) { 4508 assert(!(((Rdst == Z_R0) && (Rsrc == Z_R1)) || ((Rdst == Z_R1) && (Rsrc == Z_R0))), "need a scratch reg"); 4509 if (Rdst != Rsrc) { 4510 if (base_preloaded) { Rdst_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; } 4511 else { Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; } 4512 } else { 4513 Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; 4514 } 4515 } 4516 if (base_preloaded) lgr_if_needed(Rbase_tmp, Rbase); 4517 4518 // Scale oop and check for NULL. 4519 // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set. 4520 if (maybeNULL) { // NULL ptr must be preserved! 4521 z_slag(Rdst_tmp, Rsrc, oop_shift); // Arithmetic shift sets the condition code. 4522 z_bre(done); 4523 } else { 4524 z_sllg(Rdst_tmp, Rsrc, oop_shift); // Logical shift leaves condition code alone. 4525 } 4526 4527 // Get oop_base components. 4528 if (!base_preloaded) { 4529 pow2_offset = get_oop_base(Rbase_tmp, (uint64_t)(intptr_t)oop_base); 4530 } 4531 4532 // Add up all components. 4533 if ((Rbase_tmp == Z_R0) || (Rdst_tmp == Z_R0)) { 4534 z_algr(Rdst_tmp, Rbase_tmp); 4535 if (pow2_offset != 0) { add2reg(Rdst_tmp, pow2_offset); } 4536 } else { 4537 add2reg_with_index(Rdst_tmp, pow2_offset, Rbase_tmp, Rdst_tmp); 4538 } 4539 4540 bind(done); 4541 lgr_if_needed(Rdst, Rdst_tmp); 4542 #ifdef ASSERT 4543 if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb03bUL, 2); } 4544 if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb04bUL, 2); } 4545 #endif 4546 BLOCK_COMMENT("} cOop decoder general"); 4547 } 4548 } else { 4549 BLOCK_COMMENT("cOop decoder zeroBase {"); 4550 if (oop_shift == 0) { 4551 lgr_if_needed(Rdst, Rsrc); 4552 } else { 4553 z_sllg(Rdst, Rsrc, oop_shift); 4554 } 4555 BLOCK_COMMENT("} cOop decoder zeroBase"); 4556 } 4557 } 4558 4559 void MacroAssembler::load_mirror(Register mirror, Register method) { 4560 mem2reg_opt(mirror, Address(method, Method::const_offset())); 4561 mem2reg_opt(mirror, Address(mirror, ConstMethod::constants_offset())); 4562 mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes())); 4563 mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset())); 4564 } 4565 4566 //--------------------------------------------------------------- 4567 //--- Operations on arrays. 4568 //--------------------------------------------------------------- 4569 4570 // Compiler ensures base is doubleword aligned and cnt is #doublewords. 4571 // Emitter does not KILL cnt and base arguments, since they need to be copied to 4572 // work registers anyway. 4573 // Actually, only r0, r1, and r5 are killed. 4574 unsigned int MacroAssembler::Clear_Array(Register cnt_arg, Register base_pointer_arg, Register src_addr, Register src_len) { 4575 // Src_addr is evenReg. 4576 // Src_len is odd_Reg. 4577 4578 int block_start = offset(); 4579 Register tmp_reg = src_len; // Holds target instr addr for EX. 4580 Register dst_len = Z_R1; // Holds dst len for MVCLE. 4581 Register dst_addr = Z_R0; // Holds dst addr for MVCLE. 4582 4583 Label doXC, doMVCLE, done; 4584 4585 BLOCK_COMMENT("Clear_Array {"); 4586 4587 // Check for zero len and convert to long. 4588 z_ltgfr(src_len, cnt_arg); // Remember casted value for doSTG case. 4589 z_bre(done); // Nothing to do if len == 0. 4590 4591 // Prefetch data to be cleared. 4592 if (VM_Version::has_Prefetch()) { 4593 z_pfd(0x02, 0, Z_R0, base_pointer_arg); 4594 z_pfd(0x02, 256, Z_R0, base_pointer_arg); 4595 } 4596 4597 z_sllg(dst_len, src_len, 3); // #bytes to clear. 4598 z_cghi(src_len, 32); // Check for len <= 256 bytes (<=32 DW). 4599 z_brnh(doXC); // If so, use executed XC to clear. 4600 4601 // MVCLE: initialize long arrays (general case). 4602 bind(doMVCLE); 4603 z_lgr(dst_addr, base_pointer_arg); 4604 clear_reg(src_len, true, false); // Src len of MVCLE is zero. 4605 4606 MacroAssembler::move_long_ext(dst_addr, src_addr, 0); 4607 z_bru(done); 4608 4609 // XC: initialize short arrays. 4610 Label XC_template; // Instr template, never exec directly! 4611 bind(XC_template); 4612 z_xc(0,0,base_pointer_arg,0,base_pointer_arg); 4613 4614 bind(doXC); 4615 add2reg(dst_len, -1); // Get #bytes-1 for EXECUTE. 4616 if (VM_Version::has_ExecuteExtensions()) { 4617 z_exrl(dst_len, XC_template); // Execute XC with var. len. 4618 } else { 4619 z_larl(tmp_reg, XC_template); 4620 z_ex(dst_len,0,Z_R0,tmp_reg); // Execute XC with var. len. 4621 } 4622 // z_bru(done); // fallthru 4623 4624 bind(done); 4625 4626 BLOCK_COMMENT("} Clear_Array"); 4627 4628 int block_end = offset(); 4629 return block_end - block_start; 4630 } 4631 4632 // Compiler ensures base is doubleword aligned and cnt is count of doublewords. 4633 // Emitter does not KILL any arguments nor work registers. 4634 // Emitter generates up to 16 XC instructions, depending on the array length. 4635 unsigned int MacroAssembler::Clear_Array_Const(long cnt, Register base) { 4636 int block_start = offset(); 4637 int off; 4638 int lineSize_Bytes = AllocatePrefetchStepSize; 4639 int lineSize_DW = AllocatePrefetchStepSize>>LogBytesPerWord; 4640 bool doPrefetch = VM_Version::has_Prefetch(); 4641 int XC_maxlen = 256; 4642 int numXCInstr = cnt > 0 ? (cnt*BytesPerWord-1)/XC_maxlen+1 : 0; 4643 4644 BLOCK_COMMENT("Clear_Array_Const {"); 4645 assert(cnt*BytesPerWord <= 4096, "ClearArrayConst can handle 4k only"); 4646 4647 // Do less prefetching for very short arrays. 4648 if (numXCInstr > 0) { 4649 // Prefetch only some cache lines, then begin clearing. 4650 if (doPrefetch) { 4651 if (cnt*BytesPerWord <= lineSize_Bytes/4) { // If less than 1/4 of a cache line to clear, 4652 z_pfd(0x02, 0, Z_R0, base); // prefetch just the first cache line. 4653 } else { 4654 assert(XC_maxlen == lineSize_Bytes, "ClearArrayConst needs 256B cache lines"); 4655 for (off = 0; (off < AllocatePrefetchLines) && (off <= numXCInstr); off ++) { 4656 z_pfd(0x02, off*lineSize_Bytes, Z_R0, base); 4657 } 4658 } 4659 } 4660 4661 for (off=0; off<(numXCInstr-1); off++) { 4662 z_xc(off*XC_maxlen, XC_maxlen-1, base, off*XC_maxlen, base); 4663 4664 // Prefetch some cache lines in advance. 4665 if (doPrefetch && (off <= numXCInstr-AllocatePrefetchLines)) { 4666 z_pfd(0x02, (off+AllocatePrefetchLines)*lineSize_Bytes, Z_R0, base); 4667 } 4668 } 4669 if (off*XC_maxlen < cnt*BytesPerWord) { 4670 z_xc(off*XC_maxlen, (cnt*BytesPerWord-off*XC_maxlen)-1, base, off*XC_maxlen, base); 4671 } 4672 } 4673 BLOCK_COMMENT("} Clear_Array_Const"); 4674 4675 int block_end = offset(); 4676 return block_end - block_start; 4677 } 4678 4679 // Compiler ensures base is doubleword aligned and cnt is #doublewords. 4680 // Emitter does not KILL cnt and base arguments, since they need to be copied to 4681 // work registers anyway. 4682 // Actually, only r0, r1, r4, and r5 (which are work registers) are killed. 4683 // 4684 // For very large arrays, exploit MVCLE H/W support. 4685 // MVCLE instruction automatically exploits H/W-optimized page mover. 4686 // - Bytes up to next page boundary are cleared with a series of XC to self. 4687 // - All full pages are cleared with the page mover H/W assist. 4688 // - Remaining bytes are again cleared by a series of XC to self. 4689 // 4690 unsigned int MacroAssembler::Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register src_addr, Register src_len) { 4691 // Src_addr is evenReg. 4692 // Src_len is odd_Reg. 4693 4694 int block_start = offset(); 4695 Register dst_len = Z_R1; // Holds dst len for MVCLE. 4696 Register dst_addr = Z_R0; // Holds dst addr for MVCLE. 4697 4698 BLOCK_COMMENT("Clear_Array_Const_Big {"); 4699 4700 // Get len to clear. 4701 load_const_optimized(dst_len, (long)cnt*8L); // in Bytes = #DW*8 4702 4703 // Prepare other args to MVCLE. 4704 z_lgr(dst_addr, base_pointer_arg); 4705 // Indicate unused result. 4706 (void) clear_reg(src_len, true, false); // Src len of MVCLE is zero. 4707 4708 // Clear. 4709 MacroAssembler::move_long_ext(dst_addr, src_addr, 0); 4710 BLOCK_COMMENT("} Clear_Array_Const_Big"); 4711 4712 int block_end = offset(); 4713 return block_end - block_start; 4714 } 4715 4716 // Allocator. 4717 unsigned int MacroAssembler::CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg, 4718 Register cnt_reg, 4719 Register tmp1_reg, Register tmp2_reg) { 4720 // Tmp1 is oddReg. 4721 // Tmp2 is evenReg. 4722 4723 int block_start = offset(); 4724 Label doMVC, doMVCLE, done, MVC_template; 4725 4726 BLOCK_COMMENT("CopyRawMemory_AlignedDisjoint {"); 4727 4728 // Check for zero len and convert to long. 4729 z_ltgfr(cnt_reg, cnt_reg); // Remember casted value for doSTG case. 4730 z_bre(done); // Nothing to do if len == 0. 4731 4732 z_sllg(Z_R1, cnt_reg, 3); // Dst len in bytes. calc early to have the result ready. 4733 4734 z_cghi(cnt_reg, 32); // Check for len <= 256 bytes (<=32 DW). 4735 z_brnh(doMVC); // If so, use executed MVC to clear. 4736 4737 bind(doMVCLE); // A lot of data (more than 256 bytes). 4738 // Prep dest reg pair. 4739 z_lgr(Z_R0, dst_reg); // dst addr 4740 // Dst len already in Z_R1. 4741 // Prep src reg pair. 4742 z_lgr(tmp2_reg, src_reg); // src addr 4743 z_lgr(tmp1_reg, Z_R1); // Src len same as dst len. 4744 4745 // Do the copy. 4746 move_long_ext(Z_R0, tmp2_reg, 0xb0); // Bypass cache. 4747 z_bru(done); // All done. 4748 4749 bind(MVC_template); // Just some data (not more than 256 bytes). 4750 z_mvc(0, 0, dst_reg, 0, src_reg); 4751 4752 bind(doMVC); 4753 4754 if (VM_Version::has_ExecuteExtensions()) { 4755 add2reg(Z_R1, -1); 4756 } else { 4757 add2reg(tmp1_reg, -1, Z_R1); 4758 z_larl(Z_R1, MVC_template); 4759 } 4760 4761 if (VM_Version::has_Prefetch()) { 4762 z_pfd(1, 0,Z_R0,src_reg); 4763 z_pfd(2, 0,Z_R0,dst_reg); 4764 // z_pfd(1,256,Z_R0,src_reg); // Assume very short copy. 4765 // z_pfd(2,256,Z_R0,dst_reg); 4766 } 4767 4768 if (VM_Version::has_ExecuteExtensions()) { 4769 z_exrl(Z_R1, MVC_template); 4770 } else { 4771 z_ex(tmp1_reg, 0, Z_R0, Z_R1); 4772 } 4773 4774 bind(done); 4775 4776 BLOCK_COMMENT("} CopyRawMemory_AlignedDisjoint"); 4777 4778 int block_end = offset(); 4779 return block_end - block_start; 4780 } 4781 4782 //------------------------------------------------------ 4783 // Special String Intrinsics. Implementation 4784 //------------------------------------------------------ 4785 4786 // Intrinsics for CompactStrings 4787 4788 // Compress char[] to byte[]. odd_reg contains cnt. Kills dst. Early clobber: result 4789 // The result is the number of characters copied before the first incompatible character was found. 4790 // If tmp2 is provided and the compression fails, the compression stops exactly at this point and the result is precise. 4791 // 4792 // Note: Does not behave exactly like package private StringUTF16 compress java implementation in case of failure: 4793 // - Different number of characters may have been written to dead array (if tmp2 not provided). 4794 // - Returns a number <cnt instead of 0. (Result gets compared with cnt.) 4795 unsigned int MacroAssembler::string_compress(Register result, Register src, Register dst, Register odd_reg, 4796 Register even_reg, Register tmp, Register tmp2) { 4797 int block_start = offset(); 4798 Label Lloop1, Lloop2, Lslow, Ldone; 4799 const Register addr2 = dst, ind1 = result, mask = tmp; 4800 const bool precise = (tmp2 != noreg); 4801 4802 BLOCK_COMMENT("string_compress {"); 4803 4804 z_sll(odd_reg, 1); // Number of bytes to read. (Must be a positive simm32.) 4805 clear_reg(ind1); // Index to read. 4806 z_llilf(mask, 0xFF00FF00); 4807 z_ahi(odd_reg, -16); // Last possible index for fast loop. 4808 z_brl(Lslow); 4809 4810 // ind1: index, even_reg: index increment, odd_reg: index limit 4811 z_iihf(mask, 0xFF00FF00); 4812 z_lhi(even_reg, 16); 4813 4814 bind(Lloop1); // 8 Characters per iteration. 4815 z_lg(Z_R0, Address(src, ind1)); 4816 z_lg(Z_R1, Address(src, ind1, 8)); 4817 if (precise) { 4818 if (VM_Version::has_DistinctOpnds()) { 4819 z_ogrk(tmp2, Z_R0, Z_R1); 4820 } else { 4821 z_lgr(tmp2, Z_R0); 4822 z_ogr(tmp2, Z_R1); 4823 } 4824 z_ngr(tmp2, mask); 4825 z_brne(Lslow); // Failed fast case, retry slowly. 4826 } 4827 z_stcmh(Z_R0, 5, 0, addr2); 4828 z_stcm(Z_R0, 5, 2, addr2); 4829 if (!precise) { z_ogr(Z_R0, Z_R1); } 4830 z_stcmh(Z_R1, 5, 4, addr2); 4831 z_stcm(Z_R1, 5, 6, addr2); 4832 if (!precise) { 4833 z_ngr(Z_R0, mask); 4834 z_brne(Ldone); // Failed (more than needed was written). 4835 } 4836 z_aghi(addr2, 8); 4837 z_brxle(ind1, even_reg, Lloop1); 4838 4839 bind(Lslow); 4840 // Compute index limit and skip if negative. 4841 z_ahi(odd_reg, 16-2); // Last possible index for slow loop. 4842 z_lhi(even_reg, 2); 4843 z_cr(ind1, odd_reg); 4844 z_brh(Ldone); 4845 4846 bind(Lloop2); // 1 Character per iteration. 4847 z_llh(Z_R0, Address(src, ind1)); 4848 z_tmll(Z_R0, 0xFF00); 4849 z_brnaz(Ldone); // Failed slow case: Return number of written characters. 4850 z_stc(Z_R0, Address(addr2)); 4851 z_aghi(addr2, 1); 4852 z_brxle(ind1, even_reg, Lloop2); 4853 4854 bind(Ldone); // result = ind1 = 2*cnt 4855 z_srl(ind1, 1); 4856 4857 BLOCK_COMMENT("} string_compress"); 4858 4859 return offset() - block_start; 4860 } 4861 4862 // Inflate byte[] to char[]. 4863 unsigned int MacroAssembler::string_inflate_trot(Register src, Register dst, Register cnt, Register tmp) { 4864 int block_start = offset(); 4865 4866 BLOCK_COMMENT("string_inflate {"); 4867 4868 Register stop_char = Z_R0; 4869 Register table = Z_R1; 4870 Register src_addr = tmp; 4871 4872 assert_different_registers(Z_R0, Z_R1, tmp, src, dst, cnt); 4873 assert(dst->encoding()%2 == 0, "must be even reg"); 4874 assert(cnt->encoding()%2 == 1, "must be odd reg"); 4875 assert(cnt->encoding() - dst->encoding() == 1, "must be even/odd pair"); 4876 4877 StubRoutines::zarch::generate_load_trot_table_addr(this, table); // kills Z_R0 (if ASSERT) 4878 clear_reg(stop_char); // Stop character. Not used here, but initialized to have a defined value. 4879 lgr_if_needed(src_addr, src); 4880 z_llgfr(cnt, cnt); // # src characters, must be a positive simm32. 4881 4882 translate_ot(dst, src_addr, /* mask = */ 0x0001); 4883 4884 BLOCK_COMMENT("} string_inflate"); 4885 4886 return offset() - block_start; 4887 } 4888 4889 // Inflate byte[] to char[]. odd_reg contains cnt. Kills src. 4890 unsigned int MacroAssembler::string_inflate(Register src, Register dst, Register odd_reg, 4891 Register even_reg, Register tmp) { 4892 int block_start = offset(); 4893 4894 BLOCK_COMMENT("string_inflate {"); 4895 4896 Label Lloop1, Lloop2, Lslow, Ldone; 4897 const Register addr1 = src, ind2 = tmp; 4898 4899 z_sll(odd_reg, 1); // Number of bytes to write. (Must be a positive simm32.) 4900 clear_reg(ind2); // Index to write. 4901 z_ahi(odd_reg, -16); // Last possible index for fast loop. 4902 z_brl(Lslow); 4903 4904 // ind2: index, even_reg: index increment, odd_reg: index limit 4905 clear_reg(Z_R0); 4906 clear_reg(Z_R1); 4907 z_lhi(even_reg, 16); 4908 4909 bind(Lloop1); // 8 Characters per iteration. 4910 z_icmh(Z_R0, 5, 0, addr1); 4911 z_icmh(Z_R1, 5, 4, addr1); 4912 z_icm(Z_R0, 5, 2, addr1); 4913 z_icm(Z_R1, 5, 6, addr1); 4914 z_aghi(addr1, 8); 4915 z_stg(Z_R0, Address(dst, ind2)); 4916 z_stg(Z_R1, Address(dst, ind2, 8)); 4917 z_brxle(ind2, even_reg, Lloop1); 4918 4919 bind(Lslow); 4920 // Compute index limit and skip if negative. 4921 z_ahi(odd_reg, 16-2); // Last possible index for slow loop. 4922 z_lhi(even_reg, 2); 4923 z_cr(ind2, odd_reg); 4924 z_brh(Ldone); 4925 4926 bind(Lloop2); // 1 Character per iteration. 4927 z_llc(Z_R0, Address(addr1)); 4928 z_sth(Z_R0, Address(dst, ind2)); 4929 z_aghi(addr1, 1); 4930 z_brxle(ind2, even_reg, Lloop2); 4931 4932 bind(Ldone); 4933 4934 BLOCK_COMMENT("} string_inflate"); 4935 4936 return offset() - block_start; 4937 } 4938 4939 // Kills src. 4940 unsigned int MacroAssembler::has_negatives(Register result, Register src, Register cnt, 4941 Register odd_reg, Register even_reg, Register tmp) { 4942 int block_start = offset(); 4943 Label Lloop1, Lloop2, Lslow, Lnotfound, Ldone; 4944 const Register addr = src, mask = tmp; 4945 4946 BLOCK_COMMENT("has_negatives {"); 4947 4948 z_llgfr(Z_R1, cnt); // Number of bytes to read. (Must be a positive simm32.) 4949 z_llilf(mask, 0x80808080); 4950 z_lhi(result, 1); // Assume true. 4951 // Last possible addr for fast loop. 4952 z_lay(odd_reg, -16, Z_R1, src); 4953 z_chi(cnt, 16); 4954 z_brl(Lslow); 4955 4956 // ind1: index, even_reg: index increment, odd_reg: index limit 4957 z_iihf(mask, 0x80808080); 4958 z_lghi(even_reg, 16); 4959 4960 bind(Lloop1); // 16 bytes per iteration. 4961 z_lg(Z_R0, Address(addr)); 4962 z_lg(Z_R1, Address(addr, 8)); 4963 z_ogr(Z_R0, Z_R1); 4964 z_ngr(Z_R0, mask); 4965 z_brne(Ldone); // If found return 1. 4966 z_brxlg(addr, even_reg, Lloop1); 4967 4968 bind(Lslow); 4969 z_aghi(odd_reg, 16-1); // Last possible addr for slow loop. 4970 z_lghi(even_reg, 1); 4971 z_cgr(addr, odd_reg); 4972 z_brh(Lnotfound); 4973 4974 bind(Lloop2); // 1 byte per iteration. 4975 z_cli(Address(addr), 0x80); 4976 z_brnl(Ldone); // If found return 1. 4977 z_brxlg(addr, even_reg, Lloop2); 4978 4979 bind(Lnotfound); 4980 z_lhi(result, 0); 4981 4982 bind(Ldone); 4983 4984 BLOCK_COMMENT("} has_negatives"); 4985 4986 return offset() - block_start; 4987 } 4988 4989 // kill: cnt1, cnt2, odd_reg, even_reg; early clobber: result 4990 unsigned int MacroAssembler::string_compare(Register str1, Register str2, 4991 Register cnt1, Register cnt2, 4992 Register odd_reg, Register even_reg, Register result, int ae) { 4993 int block_start = offset(); 4994 4995 assert_different_registers(str1, cnt1, cnt2, odd_reg, even_reg, result); 4996 assert_different_registers(str2, cnt1, cnt2, odd_reg, even_reg, result); 4997 4998 // If strings are equal up to min length, return the length difference. 4999 const Register diff = result, // Pre-set result with length difference. 5000 min = cnt1, // min number of bytes 5001 tmp = cnt2; 5002 5003 // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a) 5004 // we interchange str1 and str2 in the UL case and negate the result. 5005 // Like this, str1 is always latin1 encoded, except for the UU case. 5006 // In addition, we need 0 (or sign which is 0) extend when using 64 bit register. 5007 const bool used_as_LU = (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL); 5008 5009 BLOCK_COMMENT("string_compare {"); 5010 5011 if (used_as_LU) { 5012 z_srl(cnt2, 1); 5013 } 5014 5015 // See if the lengths are different, and calculate min in cnt1. 5016 // Save diff in case we need it for a tie-breaker. 5017 5018 // diff = cnt1 - cnt2 5019 if (VM_Version::has_DistinctOpnds()) { 5020 z_srk(diff, cnt1, cnt2); 5021 } else { 5022 z_lr(diff, cnt1); 5023 z_sr(diff, cnt2); 5024 } 5025 if (str1 != str2) { 5026 if (VM_Version::has_LoadStoreConditional()) { 5027 z_locr(min, cnt2, Assembler::bcondHigh); 5028 } else { 5029 Label Lskip; 5030 z_brl(Lskip); // min ok if cnt1 < cnt2 5031 z_lr(min, cnt2); // min = cnt2 5032 bind(Lskip); 5033 } 5034 } 5035 5036 if (ae == StrIntrinsicNode::UU) { 5037 z_sra(diff, 1); 5038 } 5039 if (str1 != str2) { 5040 Label Ldone; 5041 if (used_as_LU) { 5042 // Loop which searches the first difference character by character. 5043 Label Lloop; 5044 const Register ind1 = Z_R1, 5045 ind2 = min; 5046 int stride1 = 1, stride2 = 2; // See comment above. 5047 5048 // ind1: index, even_reg: index increment, odd_reg: index limit 5049 z_llilf(ind1, (unsigned int)(-stride1)); 5050 z_lhi(even_reg, stride1); 5051 add2reg(odd_reg, -stride1, min); 5052 clear_reg(ind2); // kills min 5053 5054 bind(Lloop); 5055 z_brxh(ind1, even_reg, Ldone); 5056 z_llc(tmp, Address(str1, ind1)); 5057 z_llh(Z_R0, Address(str2, ind2)); 5058 z_ahi(ind2, stride2); 5059 z_sr(tmp, Z_R0); 5060 z_bre(Lloop); 5061 5062 z_lr(result, tmp); 5063 5064 } else { 5065 // Use clcle in fast loop (only for same encoding). 5066 z_lgr(Z_R0, str1); 5067 z_lgr(even_reg, str2); 5068 z_llgfr(Z_R1, min); 5069 z_llgfr(odd_reg, min); 5070 5071 if (ae == StrIntrinsicNode::LL) { 5072 compare_long_ext(Z_R0, even_reg, 0); 5073 } else { 5074 compare_long_uni(Z_R0, even_reg, 0); 5075 } 5076 z_bre(Ldone); 5077 z_lgr(Z_R1, Z_R0); 5078 if (ae == StrIntrinsicNode::LL) { 5079 z_llc(Z_R0, Address(even_reg)); 5080 z_llc(result, Address(Z_R1)); 5081 } else { 5082 z_llh(Z_R0, Address(even_reg)); 5083 z_llh(result, Address(Z_R1)); 5084 } 5085 z_sr(result, Z_R0); 5086 } 5087 5088 // Otherwise, return the difference between the first mismatched chars. 5089 bind(Ldone); 5090 } 5091 5092 if (ae == StrIntrinsicNode::UL) { 5093 z_lcr(result, result); // Negate result (see note above). 5094 } 5095 5096 BLOCK_COMMENT("} string_compare"); 5097 5098 return offset() - block_start; 5099 } 5100 5101 unsigned int MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, Register limit, 5102 Register odd_reg, Register even_reg, Register result, bool is_byte) { 5103 int block_start = offset(); 5104 5105 BLOCK_COMMENT("array_equals {"); 5106 5107 assert_different_registers(ary1, limit, odd_reg, even_reg); 5108 assert_different_registers(ary2, limit, odd_reg, even_reg); 5109 5110 Label Ldone, Ldone_true, Ldone_false, Lclcle, CLC_template; 5111 int base_offset = 0; 5112 5113 if (ary1 != ary2) { 5114 if (is_array_equ) { 5115 base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR); 5116 5117 // Return true if the same array. 5118 compareU64_and_branch(ary1, ary2, Assembler::bcondEqual, Ldone_true); 5119 5120 // Return false if one of them is NULL. 5121 compareU64_and_branch(ary1, (intptr_t)0, Assembler::bcondEqual, Ldone_false); 5122 compareU64_and_branch(ary2, (intptr_t)0, Assembler::bcondEqual, Ldone_false); 5123 5124 // Load the lengths of arrays. 5125 z_llgf(odd_reg, Address(ary1, arrayOopDesc::length_offset_in_bytes())); 5126 5127 // Return false if the two arrays are not equal length. 5128 z_c(odd_reg, Address(ary2, arrayOopDesc::length_offset_in_bytes())); 5129 z_brne(Ldone_false); 5130 5131 // string len in bytes (right operand) 5132 if (!is_byte) { 5133 z_chi(odd_reg, 128); 5134 z_sll(odd_reg, 1); // preserves flags 5135 z_brh(Lclcle); 5136 } else { 5137 compareU32_and_branch(odd_reg, (intptr_t)256, Assembler::bcondHigh, Lclcle); 5138 } 5139 } else { 5140 z_llgfr(odd_reg, limit); // Need to zero-extend prior to using the value. 5141 compareU32_and_branch(limit, (intptr_t)256, Assembler::bcondHigh, Lclcle); 5142 } 5143 5144 5145 // Use clc instruction for up to 256 bytes. 5146 { 5147 Register str1_reg = ary1, 5148 str2_reg = ary2; 5149 if (is_array_equ) { 5150 str1_reg = Z_R1; 5151 str2_reg = even_reg; 5152 add2reg(str1_reg, base_offset, ary1); // string addr (left operand) 5153 add2reg(str2_reg, base_offset, ary2); // string addr (right operand) 5154 } 5155 z_ahi(odd_reg, -1); // Clc uses decremented limit. Also compare result to 0. 5156 z_brl(Ldone_true); 5157 // Note: We could jump to the template if equal. 5158 5159 assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware"); 5160 z_exrl(odd_reg, CLC_template); 5161 z_bre(Ldone_true); 5162 // fall through 5163 5164 bind(Ldone_false); 5165 clear_reg(result); 5166 z_bru(Ldone); 5167 5168 bind(CLC_template); 5169 z_clc(0, 0, str1_reg, 0, str2_reg); 5170 } 5171 5172 // Use clcle instruction. 5173 { 5174 bind(Lclcle); 5175 add2reg(even_reg, base_offset, ary2); // string addr (right operand) 5176 add2reg(Z_R0, base_offset, ary1); // string addr (left operand) 5177 5178 z_lgr(Z_R1, odd_reg); // string len in bytes (left operand) 5179 if (is_byte) { 5180 compare_long_ext(Z_R0, even_reg, 0); 5181 } else { 5182 compare_long_uni(Z_R0, even_reg, 0); 5183 } 5184 z_lghi(result, 0); // Preserve flags. 5185 z_brne(Ldone); 5186 } 5187 } 5188 // fall through 5189 5190 bind(Ldone_true); 5191 z_lghi(result, 1); // All characters are equal. 5192 bind(Ldone); 5193 5194 BLOCK_COMMENT("} array_equals"); 5195 5196 return offset() - block_start; 5197 } 5198 5199 // kill: haycnt, needlecnt, odd_reg, even_reg; early clobber: result 5200 unsigned int MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt, 5201 Register needle, Register needlecnt, int needlecntval, 5202 Register odd_reg, Register even_reg, int ae) { 5203 int block_start = offset(); 5204 5205 // Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite! 5206 assert(ae != StrIntrinsicNode::LU, "Invalid encoding"); 5207 const int h_csize = (ae == StrIntrinsicNode::LL) ? 1 : 2; 5208 const int n_csize = (ae == StrIntrinsicNode::UU) ? 2 : 1; 5209 Label L_needle1, L_Found, L_NotFound; 5210 5211 BLOCK_COMMENT("string_indexof {"); 5212 5213 if (needle == haystack) { 5214 z_lhi(result, 0); 5215 } else { 5216 5217 // Load first character of needle (R0 used by search_string instructions). 5218 if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); } 5219 5220 // Compute last haystack addr to use if no match gets found. 5221 if (needlecnt != noreg) { // variable needlecnt 5222 z_ahi(needlecnt, -1); // Remaining characters after first one. 5223 z_sr(haycnt, needlecnt); // Compute index succeeding last element to compare. 5224 if (n_csize == 2) { z_sll(needlecnt, 1); } // In bytes. 5225 } else { // constant needlecnt 5226 assert((needlecntval & 0x7fff) == needlecntval, "must be positive simm16 immediate"); 5227 // Compute index succeeding last element to compare. 5228 if (needlecntval != 1) { z_ahi(haycnt, 1 - needlecntval); } 5229 } 5230 5231 z_llgfr(haycnt, haycnt); // Clear high half. 5232 z_lgr(result, haystack); // Final result will be computed from needle start pointer. 5233 if (h_csize == 2) { z_sll(haycnt, 1); } // Scale to number of bytes. 5234 z_agr(haycnt, haystack); // Point to address succeeding last element (haystack+scale*(haycnt-needlecnt+1)). 5235 5236 if (h_csize != n_csize) { 5237 assert(ae == StrIntrinsicNode::UL, "Invalid encoding"); 5238 5239 if (needlecnt != noreg || needlecntval != 1) { 5240 if (needlecnt != noreg) { 5241 compare32_and_branch(needlecnt, (intptr_t)0, Assembler::bcondEqual, L_needle1); 5242 } 5243 5244 // Main Loop: UL version (now we have at least 2 characters). 5245 Label L_OuterLoop, L_InnerLoop, L_Skip; 5246 bind(L_OuterLoop); // Search for 1st 2 characters. 5247 z_lgr(Z_R1, haycnt); 5248 MacroAssembler::search_string_uni(Z_R1, result); 5249 z_brc(Assembler::bcondNotFound, L_NotFound); 5250 z_lgr(result, Z_R1); 5251 5252 z_lghi(Z_R1, n_csize); 5253 z_lghi(even_reg, h_csize); 5254 bind(L_InnerLoop); 5255 z_llgc(odd_reg, Address(needle, Z_R1)); 5256 z_ch(odd_reg, Address(result, even_reg)); 5257 z_brne(L_Skip); 5258 if (needlecnt != noreg) { z_cr(Z_R1, needlecnt); } else { z_chi(Z_R1, needlecntval - 1); } 5259 z_brnl(L_Found); 5260 z_aghi(Z_R1, n_csize); 5261 z_aghi(even_reg, h_csize); 5262 z_bru(L_InnerLoop); 5263 5264 bind(L_Skip); 5265 z_aghi(result, h_csize); // This is the new address we want to use for comparing. 5266 z_bru(L_OuterLoop); 5267 } 5268 5269 } else { 5270 const intptr_t needle_bytes = (n_csize == 2) ? ((needlecntval - 1) << 1) : (needlecntval - 1); 5271 Label L_clcle; 5272 5273 if (needlecnt != noreg || (needlecntval != 1 && needle_bytes <= 256)) { 5274 if (needlecnt != noreg) { 5275 compare32_and_branch(needlecnt, 256, Assembler::bcondHigh, L_clcle); 5276 z_ahi(needlecnt, -1); // remaining bytes -1 (for CLC) 5277 z_brl(L_needle1); 5278 } 5279 5280 // Main Loop: clc version (now we have at least 2 characters). 5281 Label L_OuterLoop, CLC_template; 5282 bind(L_OuterLoop); // Search for 1st 2 characters. 5283 z_lgr(Z_R1, haycnt); 5284 if (h_csize == 1) { 5285 MacroAssembler::search_string(Z_R1, result); 5286 } else { 5287 MacroAssembler::search_string_uni(Z_R1, result); 5288 } 5289 z_brc(Assembler::bcondNotFound, L_NotFound); 5290 z_lgr(result, Z_R1); 5291 5292 if (needlecnt != noreg) { 5293 assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware"); 5294 z_exrl(needlecnt, CLC_template); 5295 } else { 5296 z_clc(h_csize, needle_bytes -1, Z_R1, n_csize, needle); 5297 } 5298 z_bre(L_Found); 5299 z_aghi(result, h_csize); // This is the new address we want to use for comparing. 5300 z_bru(L_OuterLoop); 5301 5302 if (needlecnt != noreg) { 5303 bind(CLC_template); 5304 z_clc(h_csize, 0, Z_R1, n_csize, needle); 5305 } 5306 } 5307 5308 if (needlecnt != noreg || needle_bytes > 256) { 5309 bind(L_clcle); 5310 5311 // Main Loop: clcle version (now we have at least 256 bytes). 5312 Label L_OuterLoop, CLC_template; 5313 bind(L_OuterLoop); // Search for 1st 2 characters. 5314 z_lgr(Z_R1, haycnt); 5315 if (h_csize == 1) { 5316 MacroAssembler::search_string(Z_R1, result); 5317 } else { 5318 MacroAssembler::search_string_uni(Z_R1, result); 5319 } 5320 z_brc(Assembler::bcondNotFound, L_NotFound); 5321 5322 add2reg(Z_R0, n_csize, needle); 5323 add2reg(even_reg, h_csize, Z_R1); 5324 z_lgr(result, Z_R1); 5325 if (needlecnt != noreg) { 5326 z_llgfr(Z_R1, needlecnt); // needle len in bytes (left operand) 5327 z_llgfr(odd_reg, needlecnt); 5328 } else { 5329 load_const_optimized(Z_R1, needle_bytes); 5330 if (Immediate::is_simm16(needle_bytes)) { z_lghi(odd_reg, needle_bytes); } else { z_lgr(odd_reg, Z_R1); } 5331 } 5332 if (h_csize == 1) { 5333 compare_long_ext(Z_R0, even_reg, 0); 5334 } else { 5335 compare_long_uni(Z_R0, even_reg, 0); 5336 } 5337 z_bre(L_Found); 5338 5339 if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); } // Reload. 5340 z_aghi(result, h_csize); // This is the new address we want to use for comparing. 5341 z_bru(L_OuterLoop); 5342 } 5343 } 5344 5345 if (needlecnt != noreg || needlecntval == 1) { 5346 bind(L_needle1); 5347 5348 // Single needle character version. 5349 if (h_csize == 1) { 5350 MacroAssembler::search_string(haycnt, result); 5351 } else { 5352 MacroAssembler::search_string_uni(haycnt, result); 5353 } 5354 z_lgr(result, haycnt); 5355 z_brc(Assembler::bcondFound, L_Found); 5356 } 5357 5358 bind(L_NotFound); 5359 add2reg(result, -1, haystack); // Return -1. 5360 5361 bind(L_Found); // Return index (or -1 in fallthrough case). 5362 z_sgr(result, haystack); 5363 if (h_csize == 2) { z_srag(result, result, exact_log2(sizeof(jchar))); } 5364 } 5365 BLOCK_COMMENT("} string_indexof"); 5366 5367 return offset() - block_start; 5368 } 5369 5370 // early clobber: result 5371 unsigned int MacroAssembler::string_indexof_char(Register result, Register haystack, Register haycnt, 5372 Register needle, jchar needleChar, Register odd_reg, Register even_reg, bool is_byte) { 5373 int block_start = offset(); 5374 5375 BLOCK_COMMENT("string_indexof_char {"); 5376 5377 if (needle == haystack) { 5378 z_lhi(result, 0); 5379 } else { 5380 5381 Label Ldone; 5382 5383 z_llgfr(odd_reg, haycnt); // Preset loop ctr/searchrange end. 5384 if (needle == noreg) { 5385 load_const_optimized(Z_R0, (unsigned long)needleChar); 5386 } else { 5387 if (is_byte) { 5388 z_llgcr(Z_R0, needle); // First (and only) needle char. 5389 } else { 5390 z_llghr(Z_R0, needle); // First (and only) needle char. 5391 } 5392 } 5393 5394 if (!is_byte) { 5395 z_agr(odd_reg, odd_reg); // Calc #bytes to be processed with SRSTU. 5396 } 5397 5398 z_lgr(even_reg, haystack); // haystack addr 5399 z_agr(odd_reg, haystack); // First char after range end. 5400 z_lghi(result, -1); 5401 5402 if (is_byte) { 5403 MacroAssembler::search_string(odd_reg, even_reg); 5404 } else { 5405 MacroAssembler::search_string_uni(odd_reg, even_reg); 5406 } 5407 z_brc(Assembler::bcondNotFound, Ldone); 5408 if (is_byte) { 5409 if (VM_Version::has_DistinctOpnds()) { 5410 z_sgrk(result, odd_reg, haystack); 5411 } else { 5412 z_sgr(odd_reg, haystack); 5413 z_lgr(result, odd_reg); 5414 } 5415 } else { 5416 z_slgr(odd_reg, haystack); 5417 z_srlg(result, odd_reg, exact_log2(sizeof(jchar))); 5418 } 5419 5420 bind(Ldone); 5421 } 5422 BLOCK_COMMENT("} string_indexof_char"); 5423 5424 return offset() - block_start; 5425 } 5426 5427 5428 //------------------------------------------------- 5429 // Constants (scalar and oop) in constant pool 5430 //------------------------------------------------- 5431 5432 // Add a non-relocated constant to the CP. 5433 int MacroAssembler::store_const_in_toc(AddressLiteral& val) { 5434 long value = val.value(); 5435 address tocPos = long_constant(value); 5436 5437 if (tocPos != NULL) { 5438 int tocOffset = (int)(tocPos - code()->consts()->start()); 5439 return tocOffset; 5440 } 5441 // Address_constant returned NULL, so no constant entry has been created. 5442 // In that case, we return a "fatal" offset, just in case that subsequently 5443 // generated access code is executed. 5444 return -1; 5445 } 5446 5447 // Returns the TOC offset where the address is stored. 5448 // Add a relocated constant to the CP. 5449 int MacroAssembler::store_oop_in_toc(AddressLiteral& oop) { 5450 // Use RelocationHolder::none for the constant pool entry. 5451 // Otherwise we will end up with a failing NativeCall::verify(x), 5452 // where x is the address of the constant pool entry. 5453 address tocPos = address_constant((address)oop.value(), RelocationHolder::none); 5454 5455 if (tocPos != NULL) { 5456 int tocOffset = (int)(tocPos - code()->consts()->start()); 5457 RelocationHolder rsp = oop.rspec(); 5458 Relocation *rel = rsp.reloc(); 5459 5460 // Store toc_offset in relocation, used by call_far_patchable. 5461 if ((relocInfo::relocType)rel->type() == relocInfo::runtime_call_w_cp_type) { 5462 ((runtime_call_w_cp_Relocation *)(rel))->set_constant_pool_offset(tocOffset); 5463 } 5464 // Relocate at the load's pc. 5465 relocate(rsp); 5466 5467 return tocOffset; 5468 } 5469 // Address_constant returned NULL, so no constant entry has been created 5470 // in that case, we return a "fatal" offset, just in case that subsequently 5471 // generated access code is executed. 5472 return -1; 5473 } 5474 5475 bool MacroAssembler::load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc) { 5476 int tocOffset = store_const_in_toc(a); 5477 if (tocOffset == -1) return false; 5478 address tocPos = tocOffset + code()->consts()->start(); 5479 assert((address)code()->consts()->start() != NULL, "Please add CP address"); 5480 5481 load_long_pcrelative(dst, tocPos); 5482 return true; 5483 } 5484 5485 bool MacroAssembler::load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc) { 5486 int tocOffset = store_oop_in_toc(a); 5487 if (tocOffset == -1) return false; 5488 address tocPos = tocOffset + code()->consts()->start(); 5489 assert((address)code()->consts()->start() != NULL, "Please add CP address"); 5490 5491 load_addr_pcrelative(dst, tocPos); 5492 return true; 5493 } 5494 5495 // If the instruction sequence at the given pc is a load_const_from_toc 5496 // sequence, return the value currently stored at the referenced position 5497 // in the TOC. 5498 intptr_t MacroAssembler::get_const_from_toc(address pc) { 5499 5500 assert(is_load_const_from_toc(pc), "must be load_const_from_pool"); 5501 5502 long offset = get_load_const_from_toc_offset(pc); 5503 address dataLoc = NULL; 5504 if (is_load_const_from_toc_pcrelative(pc)) { 5505 dataLoc = pc + offset; 5506 } else { 5507 CodeBlob* cb = CodeCache::find_blob_unsafe(pc); // Else we get assertion if nmethod is zombie. 5508 assert(cb && cb->is_nmethod(), "sanity"); 5509 nmethod* nm = (nmethod*)cb; 5510 dataLoc = nm->ctable_begin() + offset; 5511 } 5512 return *(intptr_t *)dataLoc; 5513 } 5514 5515 // If the instruction sequence at the given pc is a load_const_from_toc 5516 // sequence, copy the passed-in new_data value into the referenced 5517 // position in the TOC. 5518 void MacroAssembler::set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb) { 5519 assert(is_load_const_from_toc(pc), "must be load_const_from_pool"); 5520 5521 long offset = MacroAssembler::get_load_const_from_toc_offset(pc); 5522 address dataLoc = NULL; 5523 if (is_load_const_from_toc_pcrelative(pc)) { 5524 dataLoc = pc+offset; 5525 } else { 5526 nmethod* nm = CodeCache::find_nmethod(pc); 5527 assert((cb == NULL) || (nm == (nmethod*)cb), "instruction address should be in CodeBlob"); 5528 dataLoc = nm->ctable_begin() + offset; 5529 } 5530 if (*(unsigned long *)dataLoc != new_data) { // Prevent cache invalidation: update only if necessary. 5531 *(unsigned long *)dataLoc = new_data; 5532 } 5533 } 5534 5535 // Dynamic TOC. Getter must only be called if "a" is a load_const_from_toc 5536 // site. Verify by calling is_load_const_from_toc() before!! 5537 // Offset is +/- 2**32 -> use long. 5538 long MacroAssembler::get_load_const_from_toc_offset(address a) { 5539 assert(is_load_const_from_toc_pcrelative(a), "expected pc relative load"); 5540 // expected code sequence: 5541 // z_lgrl(t, simm32); len = 6 5542 unsigned long inst; 5543 unsigned int len = get_instruction(a, &inst); 5544 return get_pcrel_offset(inst); 5545 } 5546 5547 //********************************************************************************** 5548 // inspection of generated instruction sequences for a particular pattern 5549 //********************************************************************************** 5550 5551 bool MacroAssembler::is_load_const_from_toc_pcrelative(address a) { 5552 #ifdef ASSERT 5553 unsigned long inst; 5554 unsigned int len = get_instruction(a+2, &inst); 5555 if ((len == 6) && is_load_pcrelative_long(a) && is_call_pcrelative_long(inst)) { 5556 const int range = 128; 5557 Assembler::dump_code_range(tty, a, range, "instr(a) == z_lgrl && instr(a+2) == z_brasl"); 5558 VM_Version::z_SIGSEGV(); 5559 } 5560 #endif 5561 // expected code sequence: 5562 // z_lgrl(t, relAddr32); len = 6 5563 //TODO: verify accessed data is in CP, if possible. 5564 return is_load_pcrelative_long(a); // TODO: might be too general. Currently, only lgrl is used. 5565 } 5566 5567 bool MacroAssembler::is_load_const_from_toc_call(address a) { 5568 return is_load_const_from_toc(a) && is_call_byregister(a + load_const_from_toc_size()); 5569 } 5570 5571 bool MacroAssembler::is_load_const_call(address a) { 5572 return is_load_const(a) && is_call_byregister(a + load_const_size()); 5573 } 5574 5575 //------------------------------------------------- 5576 // Emitters for some really CICS instructions 5577 //------------------------------------------------- 5578 5579 void MacroAssembler::move_long_ext(Register dst, Register src, unsigned int pad) { 5580 assert(dst->encoding()%2==0, "must be an even/odd register pair"); 5581 assert(src->encoding()%2==0, "must be an even/odd register pair"); 5582 assert(pad<256, "must be a padding BYTE"); 5583 5584 Label retry; 5585 bind(retry); 5586 Assembler::z_mvcle(dst, src, pad); 5587 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5588 } 5589 5590 void MacroAssembler::compare_long_ext(Register left, Register right, unsigned int pad) { 5591 assert(left->encoding() % 2 == 0, "must be an even/odd register pair"); 5592 assert(right->encoding() % 2 == 0, "must be an even/odd register pair"); 5593 assert(pad<256, "must be a padding BYTE"); 5594 5595 Label retry; 5596 bind(retry); 5597 Assembler::z_clcle(left, right, pad, Z_R0); 5598 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5599 } 5600 5601 void MacroAssembler::compare_long_uni(Register left, Register right, unsigned int pad) { 5602 assert(left->encoding() % 2 == 0, "must be an even/odd register pair"); 5603 assert(right->encoding() % 2 == 0, "must be an even/odd register pair"); 5604 assert(pad<=0xfff, "must be a padding HALFWORD"); 5605 assert(VM_Version::has_ETF2(), "instruction must be available"); 5606 5607 Label retry; 5608 bind(retry); 5609 Assembler::z_clclu(left, right, pad, Z_R0); 5610 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5611 } 5612 5613 void MacroAssembler::search_string(Register end, Register start) { 5614 assert(end->encoding() != 0, "end address must not be in R0"); 5615 assert(start->encoding() != 0, "start address must not be in R0"); 5616 5617 Label retry; 5618 bind(retry); 5619 Assembler::z_srst(end, start); 5620 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5621 } 5622 5623 void MacroAssembler::search_string_uni(Register end, Register start) { 5624 assert(end->encoding() != 0, "end address must not be in R0"); 5625 assert(start->encoding() != 0, "start address must not be in R0"); 5626 assert(VM_Version::has_ETF3(), "instruction must be available"); 5627 5628 Label retry; 5629 bind(retry); 5630 Assembler::z_srstu(end, start); 5631 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5632 } 5633 5634 void MacroAssembler::kmac(Register srcBuff) { 5635 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 5636 assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair"); 5637 5638 Label retry; 5639 bind(retry); 5640 Assembler::z_kmac(Z_R0, srcBuff); 5641 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5642 } 5643 5644 void MacroAssembler::kimd(Register srcBuff) { 5645 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 5646 assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair"); 5647 5648 Label retry; 5649 bind(retry); 5650 Assembler::z_kimd(Z_R0, srcBuff); 5651 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5652 } 5653 5654 void MacroAssembler::klmd(Register srcBuff) { 5655 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 5656 assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair"); 5657 5658 Label retry; 5659 bind(retry); 5660 Assembler::z_klmd(Z_R0, srcBuff); 5661 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5662 } 5663 5664 void MacroAssembler::km(Register dstBuff, Register srcBuff) { 5665 // DstBuff and srcBuff are allowed to be the same register (encryption in-place). 5666 // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block. 5667 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 5668 assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register"); 5669 assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair"); 5670 5671 Label retry; 5672 bind(retry); 5673 Assembler::z_km(dstBuff, srcBuff); 5674 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5675 } 5676 5677 void MacroAssembler::kmc(Register dstBuff, Register srcBuff) { 5678 // DstBuff and srcBuff are allowed to be the same register (encryption in-place). 5679 // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block. 5680 assert(srcBuff->encoding() != 0, "src buffer address can't be in Z_R0"); 5681 assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register"); 5682 assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair"); 5683 5684 Label retry; 5685 bind(retry); 5686 Assembler::z_kmc(dstBuff, srcBuff); 5687 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5688 } 5689 5690 void MacroAssembler::cksm(Register crcBuff, Register srcBuff) { 5691 assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair"); 5692 5693 Label retry; 5694 bind(retry); 5695 Assembler::z_cksm(crcBuff, srcBuff); 5696 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5697 } 5698 5699 void MacroAssembler::translate_oo(Register r1, Register r2, uint m3) { 5700 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); 5701 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); 5702 5703 Label retry; 5704 bind(retry); 5705 Assembler::z_troo(r1, r2, m3); 5706 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5707 } 5708 5709 void MacroAssembler::translate_ot(Register r1, Register r2, uint m3) { 5710 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); 5711 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); 5712 5713 Label retry; 5714 bind(retry); 5715 Assembler::z_trot(r1, r2, m3); 5716 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5717 } 5718 5719 void MacroAssembler::translate_to(Register r1, Register r2, uint m3) { 5720 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); 5721 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); 5722 5723 Label retry; 5724 bind(retry); 5725 Assembler::z_trto(r1, r2, m3); 5726 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5727 } 5728 5729 void MacroAssembler::translate_tt(Register r1, Register r2, uint m3) { 5730 assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair"); 5731 assert((m3 & 0b1110) == 0, "Unused mask bits must be zero"); 5732 5733 Label retry; 5734 bind(retry); 5735 Assembler::z_trtt(r1, r2, m3); 5736 Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry); 5737 } 5738 5739 void MacroAssembler::generate_safepoint_check(Label& slow_path, Register scratch, bool may_relocate) { 5740 if (scratch == noreg) scratch = Z_R1; 5741 address Astate = SafepointSynchronize::address_of_state(); 5742 BLOCK_COMMENT("safepoint check:"); 5743 5744 if (may_relocate) { 5745 ptrdiff_t total_distance = Astate - this->pc(); 5746 if (RelAddr::is_in_range_of_RelAddr32(total_distance)) { 5747 RelocationHolder rspec = external_word_Relocation::spec(Astate); 5748 (this)->relocate(rspec, relocInfo::pcrel_addr_format); 5749 load_absolute_address(scratch, Astate); 5750 } else { 5751 load_const_optimized(scratch, Astate); 5752 } 5753 } else { 5754 load_absolute_address(scratch, Astate); 5755 } 5756 z_cli(/*SafepointSynchronize::sz_state()*/4-1, scratch, SafepointSynchronize::_not_synchronized); 5757 z_brne(slow_path); 5758 } 5759 5760 5761 void MacroAssembler::generate_type_profiling(const Register Rdata, 5762 const Register Rreceiver_klass, 5763 const Register Rwanted_receiver_klass, 5764 const Register Rmatching_row, 5765 bool is_virtual_call) { 5766 const int row_size = in_bytes(ReceiverTypeData::receiver_offset(1)) - 5767 in_bytes(ReceiverTypeData::receiver_offset(0)); 5768 const int num_rows = ReceiverTypeData::row_limit(); 5769 NearLabel found_free_row; 5770 NearLabel do_increment; 5771 NearLabel found_no_slot; 5772 5773 BLOCK_COMMENT("type profiling {"); 5774 5775 // search for: 5776 // a) The type given in Rwanted_receiver_klass. 5777 // b) The *first* empty row. 5778 5779 // First search for a) only, just running over b) with no regard. 5780 // This is possible because 5781 // wanted_receiver_class == receiver_class && wanted_receiver_class == 0 5782 // is never true (receiver_class can't be zero). 5783 for (int row_num = 0; row_num < num_rows; row_num++) { 5784 // Row_offset should be a well-behaved positive number. The generated code relies 5785 // on that wrt constant code size. Add2reg can handle all row_offset values, but 5786 // will have to vary generated code size. 5787 int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num)); 5788 assert(Displacement::is_shortDisp(row_offset), "Limitation of generated code"); 5789 5790 // Is Rwanted_receiver_klass in this row? 5791 if (VM_Version::has_CompareBranch()) { 5792 z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata); 5793 // Rmatching_row = Rdata + row_offset; 5794 add2reg(Rmatching_row, row_offset, Rdata); 5795 // if (*row_recv == (intptr_t) receiver_klass) goto fill_existing_slot; 5796 compare64_and_branch(Rwanted_receiver_klass, Rreceiver_klass, Assembler::bcondEqual, do_increment); 5797 } else { 5798 add2reg(Rmatching_row, row_offset, Rdata); 5799 z_cg(Rreceiver_klass, row_offset, Z_R0, Rdata); 5800 z_bre(do_increment); 5801 } 5802 } 5803 5804 // Now that we did not find a match, let's search for b). 5805 5806 // We could save the first calculation of Rmatching_row if we woud search for a) in reverse order. 5807 // We would then end up here with Rmatching_row containing the value for row_num == 0. 5808 // We would not see much benefit, if any at all, because the CPU can schedule 5809 // two instructions together with a branch anyway. 5810 for (int row_num = 0; row_num < num_rows; row_num++) { 5811 int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num)); 5812 5813 // Has this row a zero receiver_klass, i.e. is it empty? 5814 if (VM_Version::has_CompareBranch()) { 5815 z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata); 5816 // Rmatching_row = Rdata + row_offset 5817 add2reg(Rmatching_row, row_offset, Rdata); 5818 // if (*row_recv == (intptr_t) 0) goto found_free_row 5819 compare64_and_branch(Rwanted_receiver_klass, (intptr_t)0, Assembler::bcondEqual, found_free_row); 5820 } else { 5821 add2reg(Rmatching_row, row_offset, Rdata); 5822 load_and_test_long(Rwanted_receiver_klass, Address(Rdata, row_offset)); 5823 z_bre(found_free_row); // zero -> Found a free row. 5824 } 5825 } 5826 5827 // No match, no empty row found. 5828 // Increment total counter to indicate polymorphic case. 5829 if (is_virtual_call) { 5830 add2mem_64(Address(Rdata, CounterData::count_offset()), 1, Rmatching_row); 5831 } 5832 z_bru(found_no_slot); 5833 5834 // Here we found an empty row, but we have not found Rwanted_receiver_klass. 5835 // Rmatching_row holds the address to the first empty row. 5836 bind(found_free_row); 5837 // Store receiver_klass into empty slot. 5838 z_stg(Rreceiver_klass, 0, Z_R0, Rmatching_row); 5839 5840 // Increment the counter of Rmatching_row. 5841 bind(do_increment); 5842 ByteSize counter_offset = ReceiverTypeData::receiver_count_offset(0) - ReceiverTypeData::receiver_offset(0); 5843 add2mem_64(Address(Rmatching_row, counter_offset), 1, Rdata); 5844 5845 bind(found_no_slot); 5846 5847 BLOCK_COMMENT("} type profiling"); 5848 } 5849 5850 //--------------------------------------- 5851 // Helpers for Intrinsic Emitters 5852 //--------------------------------------- 5853 5854 /** 5855 * uint32_t crc; 5856 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 5857 */ 5858 void MacroAssembler::fold_byte_crc32(Register crc, Register val, Register table, Register tmp) { 5859 assert_different_registers(crc, table, tmp); 5860 assert_different_registers(val, table); 5861 if (crc == val) { // Must rotate first to use the unmodified value. 5862 rotate_then_insert(tmp, val, 56-2, 63-2, 2, true); // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest. 5863 z_srl(crc, 8); // Unsigned shift, clear leftmost 8 bits. 5864 } else { 5865 z_srl(crc, 8); // Unsigned shift, clear leftmost 8 bits. 5866 rotate_then_insert(tmp, val, 56-2, 63-2, 2, true); // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest. 5867 } 5868 z_x(crc, Address(table, tmp, 0)); 5869 } 5870 5871 /** 5872 * uint32_t crc; 5873 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 5874 */ 5875 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 5876 fold_byte_crc32(crc, crc, table, tmp); 5877 } 5878 5879 /** 5880 * Emits code to update CRC-32 with a byte value according to constants in table. 5881 * 5882 * @param [in,out]crc Register containing the crc. 5883 * @param [in]val Register containing the byte to fold into the CRC. 5884 * @param [in]table Register containing the table of crc constants. 5885 * 5886 * uint32_t crc; 5887 * val = crc_table[(val ^ crc) & 0xFF]; 5888 * crc = val ^ (crc >> 8); 5889 */ 5890 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 5891 z_xr(val, crc); 5892 fold_byte_crc32(crc, val, table, val); 5893 } 5894 5895 5896 /** 5897 * @param crc register containing existing CRC (32-bit) 5898 * @param buf register pointing to input byte buffer (byte*) 5899 * @param len register containing number of bytes 5900 * @param table register pointing to CRC table 5901 */ 5902 void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, 5903 Register data, bool invertCRC) { 5904 assert_different_registers(crc, buf, len, table, data); 5905 5906 Label L_mainLoop, L_done; 5907 const int mainLoop_stepping = 1; 5908 5909 // Process all bytes in a single-byte loop. 5910 z_ltr(len, len); 5911 z_brnh(L_done); 5912 5913 if (invertCRC) { 5914 not_(crc, noreg, false); // ~c 5915 } 5916 5917 bind(L_mainLoop); 5918 z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register. 5919 add2reg(buf, mainLoop_stepping); // Advance buffer position. 5920 update_byte_crc32(crc, data, table); 5921 z_brct(len, L_mainLoop); // Iterate. 5922 5923 if (invertCRC) { 5924 not_(crc, noreg, false); // ~c 5925 } 5926 5927 bind(L_done); 5928 } 5929 5930 /** 5931 * Emits code to update CRC-32 with a 4-byte value according to constants in table. 5932 * Implementation according to jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.c. 5933 * 5934 */ 5935 void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc, 5936 Register t0, Register t1, Register t2, Register t3) { 5937 // This is what we implement (the DOBIG4 part): 5938 // 5939 // #define DOBIG4 c ^= *++buf4; \ 5940 // c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \ 5941 // crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24] 5942 // #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4 5943 const int ix0 = 4*(4*CRC32_COLUMN_SIZE); 5944 const int ix1 = 5*(4*CRC32_COLUMN_SIZE); 5945 const int ix2 = 6*(4*CRC32_COLUMN_SIZE); 5946 const int ix3 = 7*(4*CRC32_COLUMN_SIZE); 5947 5948 // XOR crc with next four bytes of buffer. 5949 lgr_if_needed(t0, crc); 5950 z_x(t0, Address(buf, bufDisp)); 5951 if (bufInc != 0) { 5952 add2reg(buf, bufInc); 5953 } 5954 5955 // Chop crc into 4 single-byte pieces, shifted left 2 bits, to form the table indices. 5956 rotate_then_insert(t3, t0, 56-2, 63-2, 2, true); // ((c >> 0) & 0xff) << 2 5957 rotate_then_insert(t2, t0, 56-2, 63-2, 2-8, true); // ((c >> 8) & 0xff) << 2 5958 rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true); // ((c >> 16) & 0xff) << 2 5959 rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true); // ((c >> 24) & 0xff) << 2 5960 5961 // Load pre-calculated table values. 5962 // Use columns 4..7 for big-endian. 5963 z_ly(t3, Address(table, t3, (intptr_t)ix0)); 5964 z_ly(t2, Address(table, t2, (intptr_t)ix1)); 5965 z_ly(t1, Address(table, t1, (intptr_t)ix2)); 5966 z_ly(t0, Address(table, t0, (intptr_t)ix3)); 5967 5968 // Calculate new crc from table values. 5969 z_xr(t2, t3); 5970 z_xr(t0, t1); 5971 z_xr(t0, t2); // Now crc contains the final checksum value. 5972 lgr_if_needed(crc, t0); 5973 } 5974 5975 /** 5976 * @param crc register containing existing CRC (32-bit) 5977 * @param buf register pointing to input byte buffer (byte*) 5978 * @param len register containing number of bytes 5979 * @param table register pointing to CRC table 5980 * 5981 * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller! 5982 */ 5983 void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table, 5984 Register t0, Register t1, Register t2, Register t3) { 5985 assert_different_registers(crc, buf, len, table); 5986 5987 Label L_mainLoop, L_tail; 5988 Register data = t0; 5989 Register ctr = Z_R0; 5990 const int mainLoop_stepping = 8; 5991 const int tailLoop_stepping = 1; 5992 const int log_stepping = exact_log2(mainLoop_stepping); 5993 5994 // Don't test for len <= 0 here. This pathological case should not occur anyway. 5995 // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles. 5996 // The situation itself is detected and handled correctly by the conditional branches 5997 // following aghi(len, -stepping) and aghi(len, +stepping). 5998 5999 not_(crc, noreg, false); // 1s complement of crc 6000 6001 #if 0 6002 { 6003 // Pre-mainLoop alignment did not show any positive effect on performance. 6004 // We leave the code in for reference. Maybe the vector instructions in z13 depend on alignment. 6005 6006 z_cghi(len, mainLoop_stepping); // Alignment is useless for short data streams. 6007 z_brnh(L_tail); 6008 6009 // Align buf to word (4-byte) boundary. 6010 z_lcr(ctr, buf); 6011 rotate_then_insert(ctr, ctr, 62, 63, 0, true); // TODO: should set cc 6012 z_sgfr(len, ctr); // Remaining len after alignment. 6013 6014 update_byteLoop_crc32(crc, buf, ctr, table, data, false); 6015 } 6016 #endif 6017 6018 // Check for short (<mainLoop_stepping bytes) buffer. 6019 z_srag(ctr, len, log_stepping); 6020 z_brnh(L_tail); 6021 6022 z_lrvr(crc, crc); // Revert byte order because we are dealing with big-endian data. 6023 rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop 6024 6025 BIND(L_mainLoop); 6026 update_1word_crc32(crc, buf, table, 0, 0, crc, t1, t2, t3); 6027 update_1word_crc32(crc, buf, table, 4, mainLoop_stepping, crc, t1, t2, t3); 6028 z_brct(ctr, L_mainLoop); // Iterate. 6029 6030 z_lrvr(crc, crc); // Revert byte order back to original. 6031 6032 // Process last few (<8) bytes of buffer. 6033 BIND(L_tail); 6034 update_byteLoop_crc32(crc, buf, len, table, data, false); 6035 6036 not_(crc, noreg, false); // 1s complement of crc 6037 } 6038 6039 /** 6040 * @param crc register containing existing CRC (32-bit) 6041 * @param buf register pointing to input byte buffer (byte*) 6042 * @param len register containing number of bytes 6043 * @param table register pointing to CRC table 6044 * 6045 * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller! 6046 */ 6047 void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table, 6048 Register t0, Register t1, Register t2, Register t3) { 6049 assert_different_registers(crc, buf, len, table); 6050 6051 Label L_mainLoop, L_tail; 6052 Register data = t0; 6053 Register ctr = Z_R0; 6054 const int mainLoop_stepping = 4; 6055 const int log_stepping = exact_log2(mainLoop_stepping); 6056 6057 // Don't test for len <= 0 here. This pathological case should not occur anyway. 6058 // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles. 6059 // The situation itself is detected and handled correctly by the conditional branches 6060 // following aghi(len, -stepping) and aghi(len, +stepping). 6061 6062 not_(crc, noreg, false); // 1s complement of crc 6063 6064 // Check for short (<4 bytes) buffer. 6065 z_srag(ctr, len, log_stepping); 6066 z_brnh(L_tail); 6067 6068 z_lrvr(crc, crc); // Revert byte order because we are dealing with big-endian data. 6069 rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop 6070 6071 BIND(L_mainLoop); 6072 update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3); 6073 z_brct(ctr, L_mainLoop); // Iterate. 6074 z_lrvr(crc, crc); // Revert byte order back to original. 6075 6076 // Process last few (<8) bytes of buffer. 6077 BIND(L_tail); 6078 update_byteLoop_crc32(crc, buf, len, table, data, false); 6079 6080 not_(crc, noreg, false); // 1s complement of crc 6081 } 6082 6083 /** 6084 * @param crc register containing existing CRC (32-bit) 6085 * @param buf register pointing to input byte buffer (byte*) 6086 * @param len register containing number of bytes 6087 * @param table register pointing to CRC table 6088 */ 6089 void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table, 6090 Register t0, Register t1, Register t2, Register t3) { 6091 assert_different_registers(crc, buf, len, table); 6092 Register data = t0; 6093 6094 update_byteLoop_crc32(crc, buf, len, table, data, true); 6095 } 6096 6097 void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp) { 6098 assert_different_registers(crc, buf, len, table, tmp); 6099 6100 not_(crc, noreg, false); // ~c 6101 6102 z_llgc(tmp, Address(buf, (intptr_t)0)); // Current byte of input buffer (zero extended). Avoids garbage in upper half of register. 6103 update_byte_crc32(crc, tmp, table); 6104 6105 not_(crc, noreg, false); // ~c 6106 } 6107 6108 // 6109 // Code for BigInteger::multiplyToLen() intrinsic. 6110 // 6111 6112 // dest_lo += src1 + src2 6113 // dest_hi += carry1 + carry2 6114 // Z_R7 is destroyed ! 6115 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, 6116 Register src1, Register src2) { 6117 clear_reg(Z_R7); 6118 z_algr(dest_lo, src1); 6119 z_alcgr(dest_hi, Z_R7); 6120 z_algr(dest_lo, src2); 6121 z_alcgr(dest_hi, Z_R7); 6122 } 6123 6124 // Multiply 64 bit by 64 bit first loop. 6125 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, 6126 Register x_xstart, 6127 Register y, Register y_idx, 6128 Register z, 6129 Register carry, 6130 Register product, 6131 Register idx, Register kdx) { 6132 // jlong carry, x[], y[], z[]; 6133 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) { 6134 // huge_128 product = y[idx] * x[xstart] + carry; 6135 // z[kdx] = (jlong)product; 6136 // carry = (jlong)(product >>> 64); 6137 // } 6138 // z[xstart] = carry; 6139 6140 Label L_first_loop, L_first_loop_exit; 6141 Label L_one_x, L_one_y, L_multiply; 6142 6143 z_aghi(xstart, -1); 6144 z_brl(L_one_x); // Special case: length of x is 1. 6145 6146 // Load next two integers of x. 6147 z_sllg(Z_R1_scratch, xstart, LogBytesPerInt); 6148 mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0)); 6149 6150 6151 bind(L_first_loop); 6152 6153 z_aghi(idx, -1); 6154 z_brl(L_first_loop_exit); 6155 z_aghi(idx, -1); 6156 z_brl(L_one_y); 6157 6158 // Load next two integers of y. 6159 z_sllg(Z_R1_scratch, idx, LogBytesPerInt); 6160 mem2reg_opt(y_idx, Address(y, Z_R1_scratch, 0)); 6161 6162 6163 bind(L_multiply); 6164 6165 Register multiplicand = product->successor(); 6166 Register product_low = multiplicand; 6167 6168 lgr_if_needed(multiplicand, x_xstart); 6169 z_mlgr(product, y_idx); // multiplicand * y_idx -> product::multiplicand 6170 clear_reg(Z_R7); 6171 z_algr(product_low, carry); // Add carry to result. 6172 z_alcgr(product, Z_R7); // Add carry of the last addition. 6173 add2reg(kdx, -2); 6174 6175 // Store result. 6176 z_sllg(Z_R7, kdx, LogBytesPerInt); 6177 reg2mem_opt(product_low, Address(z, Z_R7, 0)); 6178 lgr_if_needed(carry, product); 6179 z_bru(L_first_loop); 6180 6181 6182 bind(L_one_y); // Load one 32 bit portion of y as (0,value). 6183 6184 clear_reg(y_idx); 6185 mem2reg_opt(y_idx, Address(y, (intptr_t) 0), false); 6186 z_bru(L_multiply); 6187 6188 6189 bind(L_one_x); // Load one 32 bit portion of x as (0,value). 6190 6191 clear_reg(x_xstart); 6192 mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false); 6193 z_bru(L_first_loop); 6194 6195 bind(L_first_loop_exit); 6196 } 6197 6198 // Multiply 64 bit by 64 bit and add 128 bit. 6199 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, 6200 Register z, 6201 Register yz_idx, Register idx, 6202 Register carry, Register product, 6203 int offset) { 6204 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; 6205 // z[kdx] = (jlong)product; 6206 6207 Register multiplicand = product->successor(); 6208 Register product_low = multiplicand; 6209 6210 z_sllg(Z_R7, idx, LogBytesPerInt); 6211 mem2reg_opt(yz_idx, Address(y, Z_R7, offset)); 6212 6213 lgr_if_needed(multiplicand, x_xstart); 6214 z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand 6215 mem2reg_opt(yz_idx, Address(z, Z_R7, offset)); 6216 6217 add2_with_carry(product, product_low, carry, yz_idx); 6218 6219 z_sllg(Z_R7, idx, LogBytesPerInt); 6220 reg2mem_opt(product_low, Address(z, Z_R7, offset)); 6221 6222 } 6223 6224 // Multiply 128 bit by 128 bit. Unrolled inner loop. 6225 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, 6226 Register y, Register z, 6227 Register yz_idx, Register idx, 6228 Register jdx, 6229 Register carry, Register product, 6230 Register carry2) { 6231 // jlong carry, x[], y[], z[]; 6232 // int kdx = ystart+1; 6233 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 6234 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; 6235 // z[kdx+idx+1] = (jlong)product; 6236 // jlong carry2 = (jlong)(product >>> 64); 6237 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; 6238 // z[kdx+idx] = (jlong)product; 6239 // carry = (jlong)(product >>> 64); 6240 // } 6241 // idx += 2; 6242 // if (idx > 0) { 6243 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry; 6244 // z[kdx+idx] = (jlong)product; 6245 // carry = (jlong)(product >>> 64); 6246 // } 6247 6248 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 6249 6250 // scale the index 6251 lgr_if_needed(jdx, idx); 6252 and_imm(jdx, 0xfffffffffffffffcL); 6253 rshift(jdx, 2); 6254 6255 6256 bind(L_third_loop); 6257 6258 z_aghi(jdx, -1); 6259 z_brl(L_third_loop_exit); 6260 add2reg(idx, -4); 6261 6262 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8); 6263 lgr_if_needed(carry2, product); 6264 6265 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0); 6266 lgr_if_needed(carry, product); 6267 z_bru(L_third_loop); 6268 6269 6270 bind(L_third_loop_exit); // Handle any left-over operand parts. 6271 6272 and_imm(idx, 0x3); 6273 z_brz(L_post_third_loop_done); 6274 6275 Label L_check_1; 6276 6277 z_aghi(idx, -2); 6278 z_brl(L_check_1); 6279 6280 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0); 6281 lgr_if_needed(carry, product); 6282 6283 6284 bind(L_check_1); 6285 6286 add2reg(idx, 0x2); 6287 and_imm(idx, 0x1); 6288 z_aghi(idx, -1); 6289 z_brl(L_post_third_loop_done); 6290 6291 Register multiplicand = product->successor(); 6292 Register product_low = multiplicand; 6293 6294 z_sllg(Z_R7, idx, LogBytesPerInt); 6295 clear_reg(yz_idx); 6296 mem2reg_opt(yz_idx, Address(y, Z_R7, 0), false); 6297 lgr_if_needed(multiplicand, x_xstart); 6298 z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand 6299 clear_reg(yz_idx); 6300 mem2reg_opt(yz_idx, Address(z, Z_R7, 0), false); 6301 6302 add2_with_carry(product, product_low, yz_idx, carry); 6303 6304 z_sllg(Z_R7, idx, LogBytesPerInt); 6305 reg2mem_opt(product_low, Address(z, Z_R7, 0), false); 6306 rshift(product_low, 32); 6307 6308 lshift(product, 32); 6309 z_ogr(product_low, product); 6310 lgr_if_needed(carry, product_low); 6311 6312 bind(L_post_third_loop_done); 6313 } 6314 6315 void MacroAssembler::multiply_to_len(Register x, Register xlen, 6316 Register y, Register ylen, 6317 Register z, 6318 Register tmp1, Register tmp2, 6319 Register tmp3, Register tmp4, 6320 Register tmp5) { 6321 ShortBranchVerifier sbv(this); 6322 6323 assert_different_registers(x, xlen, y, ylen, z, 6324 tmp1, tmp2, tmp3, tmp4, tmp5, Z_R1_scratch, Z_R7); 6325 assert_different_registers(x, xlen, y, ylen, z, 6326 tmp1, tmp2, tmp3, tmp4, tmp5, Z_R8); 6327 6328 z_stmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP); 6329 6330 // In openJdk, we store the argument as 32-bit value to slot. 6331 Address zlen(Z_SP, _z_abi(remaining_cargs)); // Int in long on big endian. 6332 6333 const Register idx = tmp1; 6334 const Register kdx = tmp2; 6335 const Register xstart = tmp3; 6336 6337 const Register y_idx = tmp4; 6338 const Register carry = tmp5; 6339 const Register product = Z_R0_scratch; 6340 const Register x_xstart = Z_R8; 6341 6342 // First Loop. 6343 // 6344 // final static long LONG_MASK = 0xffffffffL; 6345 // int xstart = xlen - 1; 6346 // int ystart = ylen - 1; 6347 // long carry = 0; 6348 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 6349 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 6350 // z[kdx] = (int)product; 6351 // carry = product >>> 32; 6352 // } 6353 // z[xstart] = (int)carry; 6354 // 6355 6356 lgr_if_needed(idx, ylen); // idx = ylen 6357 z_llgf(kdx, zlen); // C2 does not respect int to long conversion for stub calls, thus load zero-extended. 6358 clear_reg(carry); // carry = 0 6359 6360 Label L_done; 6361 6362 lgr_if_needed(xstart, xlen); 6363 z_aghi(xstart, -1); 6364 z_brl(L_done); 6365 6366 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 6367 6368 NearLabel L_second_loop; 6369 compare64_and_branch(kdx, RegisterOrConstant((intptr_t) 0), bcondEqual, L_second_loop); 6370 6371 NearLabel L_carry; 6372 z_aghi(kdx, -1); 6373 z_brz(L_carry); 6374 6375 // Store lower 32 bits of carry. 6376 z_sllg(Z_R1_scratch, kdx, LogBytesPerInt); 6377 reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); 6378 rshift(carry, 32); 6379 z_aghi(kdx, -1); 6380 6381 6382 bind(L_carry); 6383 6384 // Store upper 32 bits of carry. 6385 z_sllg(Z_R1_scratch, kdx, LogBytesPerInt); 6386 reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); 6387 6388 // Second and third (nested) loops. 6389 // 6390 // for (int i = xstart-1; i >= 0; i--) { // Second loop 6391 // carry = 0; 6392 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 6393 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 6394 // (z[k] & LONG_MASK) + carry; 6395 // z[k] = (int)product; 6396 // carry = product >>> 32; 6397 // } 6398 // z[i] = (int)carry; 6399 // } 6400 // 6401 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx 6402 6403 const Register jdx = tmp1; 6404 6405 bind(L_second_loop); 6406 6407 clear_reg(carry); // carry = 0; 6408 lgr_if_needed(jdx, ylen); // j = ystart+1 6409 6410 z_aghi(xstart, -1); // i = xstart-1; 6411 z_brl(L_done); 6412 6413 // Use free slots in the current stackframe instead of push/pop. 6414 Address zsave(Z_SP, _z_abi(carg_1)); 6415 reg2mem_opt(z, zsave); 6416 6417 6418 Label L_last_x; 6419 6420 z_sllg(Z_R1_scratch, xstart, LogBytesPerInt); 6421 load_address(z, Address(z, Z_R1_scratch, 4)); // z = z + k - j 6422 z_aghi(xstart, -1); // i = xstart-1; 6423 z_brl(L_last_x); 6424 6425 z_sllg(Z_R1_scratch, xstart, LogBytesPerInt); 6426 mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0)); 6427 6428 6429 Label L_third_loop_prologue; 6430 6431 bind(L_third_loop_prologue); 6432 6433 Address xsave(Z_SP, _z_abi(carg_2)); 6434 Address xlensave(Z_SP, _z_abi(carg_3)); 6435 Address ylensave(Z_SP, _z_abi(carg_4)); 6436 6437 reg2mem_opt(x, xsave); 6438 reg2mem_opt(xstart, xlensave); 6439 reg2mem_opt(ylen, ylensave); 6440 6441 6442 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x); 6443 6444 mem2reg_opt(z, zsave); 6445 mem2reg_opt(x, xsave); 6446 mem2reg_opt(xlen, xlensave); // This is the decrement of the loop counter! 6447 mem2reg_opt(ylen, ylensave); 6448 6449 add2reg(tmp3, 1, xlen); 6450 z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt); 6451 reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); 6452 z_aghi(tmp3, -1); 6453 z_brl(L_done); 6454 6455 rshift(carry, 32); 6456 z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt); 6457 reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false); 6458 z_bru(L_second_loop); 6459 6460 // Next infrequent code is moved outside loops. 6461 bind(L_last_x); 6462 6463 clear_reg(x_xstart); 6464 mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false); 6465 z_bru(L_third_loop_prologue); 6466 6467 bind(L_done); 6468 6469 z_lmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP); 6470 } 6471 6472 #ifndef PRODUCT 6473 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false). 6474 void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) { 6475 Label ok; 6476 if (check_equal) { 6477 z_bre(ok); 6478 } else { 6479 z_brne(ok); 6480 } 6481 stop(msg, id); 6482 bind(ok); 6483 } 6484 6485 // Assert if CC indicates "low". 6486 void MacroAssembler::asm_assert_low(const char *msg, int id) { 6487 Label ok; 6488 z_brnl(ok); 6489 stop(msg, id); 6490 bind(ok); 6491 } 6492 6493 // Assert if CC indicates "high". 6494 void MacroAssembler::asm_assert_high(const char *msg, int id) { 6495 Label ok; 6496 z_brnh(ok); 6497 stop(msg, id); 6498 bind(ok); 6499 } 6500 6501 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false) 6502 // generate non-relocatable code. 6503 void MacroAssembler::asm_assert_static(bool check_equal, const char *msg, int id) { 6504 Label ok; 6505 if (check_equal) { z_bre(ok); } 6506 else { z_brne(ok); } 6507 stop_static(msg, id); 6508 bind(ok); 6509 } 6510 6511 void MacroAssembler::asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset, 6512 Register mem_base, const char* msg, int id) { 6513 switch (size) { 6514 case 4: 6515 load_and_test_int(Z_R0, Address(mem_base, mem_offset)); 6516 break; 6517 case 8: 6518 load_and_test_long(Z_R0, Address(mem_base, mem_offset)); 6519 break; 6520 default: 6521 ShouldNotReachHere(); 6522 } 6523 if (allow_relocation) { asm_assert(check_equal, msg, id); } 6524 else { asm_assert_static(check_equal, msg, id); } 6525 } 6526 6527 // Check the condition 6528 // expected_size == FP - SP 6529 // after transformation: 6530 // expected_size - FP + SP == 0 6531 // Destroys Register expected_size if no tmp register is passed. 6532 void MacroAssembler::asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) { 6533 if (tmp == noreg) { 6534 tmp = expected_size; 6535 } else { 6536 if (tmp != expected_size) { 6537 z_lgr(tmp, expected_size); 6538 } 6539 z_algr(tmp, Z_SP); 6540 z_slg(tmp, 0, Z_R0, Z_SP); 6541 asm_assert_eq(msg, id); 6542 } 6543 } 6544 #endif // !PRODUCT 6545 6546 void MacroAssembler::verify_thread() { 6547 if (VerifyThread) { 6548 unimplemented("", 117); 6549 } 6550 } 6551 6552 // Plausibility check for oops. 6553 void MacroAssembler::verify_oop(Register oop, const char* msg) { 6554 if (!VerifyOops) return; 6555 6556 BLOCK_COMMENT("verify_oop {"); 6557 Register tmp = Z_R0; 6558 unsigned int nbytes_save = 6 *8; 6559 address entry = StubRoutines::verify_oop_subroutine_entry_address(); 6560 save_return_pc(); 6561 push_frame_abi160(nbytes_save); 6562 z_stmg(Z_R0, Z_R5, 160, Z_SP); 6563 6564 z_lgr(Z_ARG2, oop); 6565 load_const(Z_ARG1, (address) msg); 6566 load_const(Z_R1, entry); 6567 z_lg(Z_R1, 0, Z_R1); 6568 call_c(Z_R1); 6569 6570 z_lmg(Z_R0, Z_R5, 160, Z_SP); 6571 pop_frame(); 6572 6573 restore_return_pc(); 6574 BLOCK_COMMENT("} verify_oop "); 6575 } 6576 6577 const char* MacroAssembler::stop_types[] = { 6578 "stop", 6579 "untested", 6580 "unimplemented", 6581 "shouldnotreachhere" 6582 }; 6583 6584 static void stop_on_request(const char* tp, const char* msg) { 6585 tty->print("Z assembly code requires stop: (%s) %s\n", tp, msg); 6586 guarantee(false, "Z assembly code requires stop: %s", msg); 6587 } 6588 6589 void MacroAssembler::stop(int type, const char* msg, int id) { 6590 BLOCK_COMMENT(err_msg("stop: %s {", msg)); 6591 6592 // Setup arguments. 6593 load_const(Z_ARG1, (void*) stop_types[type%stop_end]); 6594 load_const(Z_ARG2, (void*) msg); 6595 get_PC(Z_R14); // Following code pushes a frame without entering a new function. Use current pc as return address. 6596 save_return_pc(); // Saves return pc Z_R14. 6597 push_frame_abi160(0); 6598 call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2); 6599 // The plain disassembler does not recognize illtrap. It instead displays 6600 // a 32-bit value. Issueing two illtraps assures the disassembler finds 6601 // the proper beginning of the next instruction. 6602 z_illtrap(); // Illegal instruction. 6603 z_illtrap(); // Illegal instruction. 6604 6605 BLOCK_COMMENT(" } stop"); 6606 } 6607 6608 // Special version of stop() for code size reduction. 6609 // Reuses the previously generated call sequence, if any. 6610 // Generates the call sequence on its own, if necessary. 6611 // Note: This code will work only in non-relocatable code! 6612 // The relative address of the data elements (arg1, arg2) must not change. 6613 // The reentry point must not move relative to it's users. This prerequisite 6614 // should be given for "hand-written" code, if all chain calls are in the same code blob. 6615 // Generated code must not undergo any transformation, e.g. ShortenBranches, to be safe. 6616 address MacroAssembler::stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation) { 6617 BLOCK_COMMENT(err_msg("stop_chain(%s,%s): %s {", reentry==NULL?"init":"cont", allow_relocation?"reloc ":"static", msg)); 6618 6619 // Setup arguments. 6620 if (allow_relocation) { 6621 // Relocatable version (for comparison purposes). Remove after some time. 6622 load_const(Z_ARG1, (void*) stop_types[type%stop_end]); 6623 load_const(Z_ARG2, (void*) msg); 6624 } else { 6625 load_absolute_address(Z_ARG1, (address)stop_types[type%stop_end]); 6626 load_absolute_address(Z_ARG2, (address)msg); 6627 } 6628 if ((reentry != NULL) && RelAddr::is_in_range_of_RelAddr16(reentry, pc())) { 6629 BLOCK_COMMENT("branch to reentry point:"); 6630 z_brc(bcondAlways, reentry); 6631 } else { 6632 BLOCK_COMMENT("reentry point:"); 6633 reentry = pc(); // Re-entry point for subsequent stop calls. 6634 save_return_pc(); // Saves return pc Z_R14. 6635 push_frame_abi160(0); 6636 if (allow_relocation) { 6637 reentry = NULL; // Prevent reentry if code relocation is allowed. 6638 call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2); 6639 } else { 6640 call_VM_leaf_static(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2); 6641 } 6642 z_illtrap(); // Illegal instruction as emergency stop, should the above call return. 6643 } 6644 BLOCK_COMMENT(" } stop_chain"); 6645 6646 return reentry; 6647 } 6648 6649 // Special version of stop() for code size reduction. 6650 // Assumes constant relative addresses for data and runtime call. 6651 void MacroAssembler::stop_static(int type, const char* msg, int id) { 6652 stop_chain(NULL, type, msg, id, false); 6653 } 6654 6655 void MacroAssembler::stop_subroutine() { 6656 unimplemented("stop_subroutine", 710); 6657 } 6658 6659 // Prints msg to stdout from within generated code.. 6660 void MacroAssembler::warn(const char* msg) { 6661 RegisterSaver::save_live_registers(this, RegisterSaver::all_registers, Z_R14); 6662 load_absolute_address(Z_R1, (address) warning); 6663 load_absolute_address(Z_ARG1, (address) msg); 6664 (void) call(Z_R1); 6665 RegisterSaver::restore_live_registers(this, RegisterSaver::all_registers); 6666 } 6667 6668 #ifndef PRODUCT 6669 6670 // Write pattern 0x0101010101010101 in region [low-before, high+after]. 6671 void MacroAssembler::zap_from_to(Register low, Register high, Register val, Register addr, int before, int after) { 6672 if (!ZapEmptyStackFields) return; 6673 BLOCK_COMMENT("zap memory region {"); 6674 load_const_optimized(val, 0x0101010101010101); 6675 int size = before + after; 6676 if (low == high && size < 5 && size > 0) { 6677 int offset = -before*BytesPerWord; 6678 for (int i = 0; i < size; ++i) { 6679 z_stg(val, Address(low, offset)); 6680 offset +=(1*BytesPerWord); 6681 } 6682 } else { 6683 add2reg(addr, -before*BytesPerWord, low); 6684 if (after) { 6685 #ifdef ASSERT 6686 jlong check = after * BytesPerWord; 6687 assert(Immediate::is_simm32(check) && Immediate::is_simm32(-check), "value not encodable !"); 6688 #endif 6689 add2reg(high, after * BytesPerWord); 6690 } 6691 NearLabel loop; 6692 bind(loop); 6693 z_stg(val, Address(addr)); 6694 add2reg(addr, 8); 6695 compare64_and_branch(addr, high, bcondNotHigh, loop); 6696 if (after) { 6697 add2reg(high, -after * BytesPerWord); 6698 } 6699 } 6700 BLOCK_COMMENT("} zap memory region"); 6701 } 6702 #endif // !PRODUCT 6703 6704 SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value, Register _rscratch) { 6705 _masm = masm; 6706 _masm->load_absolute_address(_rscratch, (address)flag_addr); 6707 _masm->load_and_test_int(_rscratch, Address(_rscratch)); 6708 if (value) { 6709 _masm->z_brne(_label); // Skip if true, i.e. != 0. 6710 } else { 6711 _masm->z_bre(_label); // Skip if false, i.e. == 0. 6712 } 6713 } 6714 6715 SkipIfEqual::~SkipIfEqual() { 6716 _masm->bind(_label); 6717 }