1 /*
   2  * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/codeBuffer.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "compiler/disassembler.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "gc/shared/cardTableModRefBS.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "memory/universe.hpp"
  35 #include "oops/klass.inline.hpp"
  36 #include "opto/compile.hpp"
  37 #include "opto/intrinsicnode.hpp"
  38 #include "opto/matcher.hpp"
  39 #include "prims/methodHandles.hpp"
  40 #include "registerSaver_s390.hpp"
  41 #include "runtime/biasedLocking.hpp"
  42 #include "runtime/icache.hpp"
  43 #include "runtime/interfaceSupport.hpp"
  44 #include "runtime/objectMonitor.hpp"
  45 #include "runtime/os.hpp"
  46 #include "runtime/sharedRuntime.hpp"
  47 #include "runtime/stubRoutines.hpp"
  48 #include "utilities/events.hpp"
  49 #include "utilities/macros.hpp"
  50 #if INCLUDE_ALL_GCS
  51 #include "gc/g1/g1CollectedHeap.inline.hpp"
  52 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
  53 #include "gc/g1/heapRegion.hpp"
  54 #endif
  55 
  56 #include <ucontext.h>
  57 
  58 #define BLOCK_COMMENT(str) block_comment(str)
  59 #define BIND(label)        bind(label); BLOCK_COMMENT(#label ":")
  60 
  61 // Move 32-bit register if destination and source are different.
  62 void MacroAssembler::lr_if_needed(Register rd, Register rs) {
  63   if (rs != rd) { z_lr(rd, rs); }
  64 }
  65 
  66 // Move register if destination and source are different.
  67 void MacroAssembler::lgr_if_needed(Register rd, Register rs) {
  68   if (rs != rd) { z_lgr(rd, rs); }
  69 }
  70 
  71 // Zero-extend 32-bit register into 64-bit register if destination and source are different.
  72 void MacroAssembler::llgfr_if_needed(Register rd, Register rs) {
  73   if (rs != rd) { z_llgfr(rd, rs); }
  74 }
  75 
  76 // Move float register if destination and source are different.
  77 void MacroAssembler::ldr_if_needed(FloatRegister rd, FloatRegister rs) {
  78   if (rs != rd) { z_ldr(rd, rs); }
  79 }
  80 
  81 // Move integer register if destination and source are different.
  82 // It is assumed that shorter-than-int types are already
  83 // appropriately sign-extended.
  84 void MacroAssembler::move_reg_if_needed(Register dst, BasicType dst_type, Register src,
  85                                         BasicType src_type) {
  86   assert((dst_type != T_FLOAT) && (dst_type != T_DOUBLE), "use move_freg for float types");
  87   assert((src_type != T_FLOAT) && (src_type != T_DOUBLE), "use move_freg for float types");
  88 
  89   if (dst_type == src_type) {
  90     lgr_if_needed(dst, src); // Just move all 64 bits.
  91     return;
  92   }
  93 
  94   switch (dst_type) {
  95     // Do not support these types for now.
  96     //  case T_BOOLEAN:
  97     case T_BYTE:  // signed byte
  98       switch (src_type) {
  99         case T_INT:
 100           z_lgbr(dst, src);
 101           break;
 102         default:
 103           ShouldNotReachHere();
 104       }
 105       return;
 106 
 107     case T_CHAR:
 108     case T_SHORT:
 109       switch (src_type) {
 110         case T_INT:
 111           if (dst_type == T_CHAR) {
 112             z_llghr(dst, src);
 113           } else {
 114             z_lghr(dst, src);
 115           }
 116           break;
 117         default:
 118           ShouldNotReachHere();
 119       }
 120       return;
 121 
 122     case T_INT:
 123       switch (src_type) {
 124         case T_BOOLEAN:
 125         case T_BYTE:
 126         case T_CHAR:
 127         case T_SHORT:
 128         case T_INT:
 129         case T_LONG:
 130         case T_OBJECT:
 131         case T_ARRAY:
 132         case T_VOID:
 133         case T_ADDRESS:
 134           lr_if_needed(dst, src);
 135           // llgfr_if_needed(dst, src);  // zero-extend (in case we need to find a bug).
 136           return;
 137 
 138         default:
 139           assert(false, "non-integer src type");
 140           return;
 141       }
 142     case T_LONG:
 143       switch (src_type) {
 144         case T_BOOLEAN:
 145         case T_BYTE:
 146         case T_CHAR:
 147         case T_SHORT:
 148         case T_INT:
 149           z_lgfr(dst, src); // sign extension
 150           return;
 151 
 152         case T_LONG:
 153         case T_OBJECT:
 154         case T_ARRAY:
 155         case T_VOID:
 156         case T_ADDRESS:
 157           lgr_if_needed(dst, src);
 158           return;
 159 
 160         default:
 161           assert(false, "non-integer src type");
 162           return;
 163       }
 164       return;
 165     case T_OBJECT:
 166     case T_ARRAY:
 167     case T_VOID:
 168     case T_ADDRESS:
 169       switch (src_type) {
 170         // These types don't make sense to be converted to pointers:
 171         //      case T_BOOLEAN:
 172         //      case T_BYTE:
 173         //      case T_CHAR:
 174         //      case T_SHORT:
 175 
 176         case T_INT:
 177           z_llgfr(dst, src); // zero extension
 178           return;
 179 
 180         case T_LONG:
 181         case T_OBJECT:
 182         case T_ARRAY:
 183         case T_VOID:
 184         case T_ADDRESS:
 185           lgr_if_needed(dst, src);
 186           return;
 187 
 188         default:
 189           assert(false, "non-integer src type");
 190           return;
 191       }
 192       return;
 193     default:
 194       assert(false, "non-integer dst type");
 195       return;
 196   }
 197 }
 198 
 199 // Move float register if destination and source are different.
 200 void MacroAssembler::move_freg_if_needed(FloatRegister dst, BasicType dst_type,
 201                                          FloatRegister src, BasicType src_type) {
 202   assert((dst_type == T_FLOAT) || (dst_type == T_DOUBLE), "use move_reg for int types");
 203   assert((src_type == T_FLOAT) || (src_type == T_DOUBLE), "use move_reg for int types");
 204   if (dst_type == src_type) {
 205     ldr_if_needed(dst, src); // Just move all 64 bits.
 206   } else {
 207     switch (dst_type) {
 208       case T_FLOAT:
 209         assert(src_type == T_DOUBLE, "invalid float type combination");
 210         z_ledbr(dst, src);
 211         return;
 212       case T_DOUBLE:
 213         assert(src_type == T_FLOAT, "invalid float type combination");
 214         z_ldebr(dst, src);
 215         return;
 216       default:
 217         assert(false, "non-float dst type");
 218         return;
 219     }
 220   }
 221 }
 222 
 223 // Optimized emitter for reg to mem operations.
 224 // Uses modern instructions if running on modern hardware, classic instructions
 225 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 226 // Data register (reg) cannot be used as work register.
 227 //
 228 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default).
 229 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs!
 230 void MacroAssembler::freg2mem_opt(FloatRegister reg,
 231                                   int64_t       disp,
 232                                   Register      index,
 233                                   Register      base,
 234                                   void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
 235                                   void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
 236                                   Register      scratch) {
 237   index = (index == noreg) ? Z_R0 : index;
 238   if (Displacement::is_shortDisp(disp)) {
 239     (this->*classic)(reg, disp, index, base);
 240   } else {
 241     if (Displacement::is_validDisp(disp)) {
 242       (this->*modern)(reg, disp, index, base);
 243     } else {
 244       if (scratch != Z_R0 && scratch != Z_R1) {
 245         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 246       } else {
 247         if (scratch != Z_R0) {   // scratch == Z_R1
 248           if ((scratch == index) || (index == base)) {
 249             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 250           } else {
 251             add2reg(scratch, disp, base);
 252             (this->*classic)(reg, 0, index, scratch);
 253             if (base == scratch) {
 254               add2reg(base, -disp);  // Restore base.
 255             }
 256           }
 257         } else {   // scratch == Z_R0
 258           z_lgr(scratch, base);
 259           add2reg(base, disp);
 260           (this->*classic)(reg, 0, index, base);
 261           z_lgr(base, scratch);      // Restore base.
 262         }
 263       }
 264     }
 265   }
 266 }
 267 
 268 void MacroAssembler::freg2mem_opt(FloatRegister reg, const Address &a, bool is_double) {
 269   if (is_double) {
 270     freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stdy), CLASSIC_FFUN(z_std));
 271   } else {
 272     freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stey), CLASSIC_FFUN(z_ste));
 273   }
 274 }
 275 
 276 // Optimized emitter for mem to reg operations.
 277 // Uses modern instructions if running on modern hardware, classic instructions
 278 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 279 // data register (reg) cannot be used as work register.
 280 //
 281 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default).
 282 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs!
 283 void MacroAssembler::mem2freg_opt(FloatRegister reg,
 284                                   int64_t       disp,
 285                                   Register      index,
 286                                   Register      base,
 287                                   void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
 288                                   void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
 289                                   Register      scratch) {
 290   index = (index == noreg) ? Z_R0 : index;
 291   if (Displacement::is_shortDisp(disp)) {
 292     (this->*classic)(reg, disp, index, base);
 293   } else {
 294     if (Displacement::is_validDisp(disp)) {
 295       (this->*modern)(reg, disp, index, base);
 296     } else {
 297       if (scratch != Z_R0 && scratch != Z_R1) {
 298         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 299       } else {
 300         if (scratch != Z_R0) {   // scratch == Z_R1
 301           if ((scratch == index) || (index == base)) {
 302             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 303           } else {
 304             add2reg(scratch, disp, base);
 305             (this->*classic)(reg, 0, index, scratch);
 306             if (base == scratch) {
 307               add2reg(base, -disp);  // Restore base.
 308             }
 309           }
 310         } else {   // scratch == Z_R0
 311           z_lgr(scratch, base);
 312           add2reg(base, disp);
 313           (this->*classic)(reg, 0, index, base);
 314           z_lgr(base, scratch);      // Restore base.
 315         }
 316       }
 317     }
 318   }
 319 }
 320 
 321 void MacroAssembler::mem2freg_opt(FloatRegister reg, const Address &a, bool is_double) {
 322   if (is_double) {
 323     mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ldy), CLASSIC_FFUN(z_ld));
 324   } else {
 325     mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ley), CLASSIC_FFUN(z_le));
 326   }
 327 }
 328 
 329 // Optimized emitter for reg to mem operations.
 330 // Uses modern instructions if running on modern hardware, classic instructions
 331 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 332 // Data register (reg) cannot be used as work register.
 333 //
 334 // Don't rely on register locking, instead pass a scratch register
 335 // (Z_R0 by default)
 336 // CAUTION! passing registers >= Z_R2 may produce bad results on old CPUs!
 337 void MacroAssembler::reg2mem_opt(Register reg,
 338                                  int64_t  disp,
 339                                  Register index,
 340                                  Register base,
 341                                  void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
 342                                  void (MacroAssembler::*classic)(Register, int64_t, Register, Register),
 343                                  Register scratch) {
 344   index = (index == noreg) ? Z_R0 : index;
 345   if (Displacement::is_shortDisp(disp)) {
 346     (this->*classic)(reg, disp, index, base);
 347   } else {
 348     if (Displacement::is_validDisp(disp)) {
 349       (this->*modern)(reg, disp, index, base);
 350     } else {
 351       if (scratch != Z_R0 && scratch != Z_R1) {
 352         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 353       } else {
 354         if (scratch != Z_R0) {   // scratch == Z_R1
 355           if ((scratch == index) || (index == base)) {
 356             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 357           } else {
 358             add2reg(scratch, disp, base);
 359             (this->*classic)(reg, 0, index, scratch);
 360             if (base == scratch) {
 361               add2reg(base, -disp);  // Restore base.
 362             }
 363           }
 364         } else {   // scratch == Z_R0
 365           if ((scratch == reg) || (scratch == base) || (reg == base)) {
 366             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 367           } else {
 368             z_lgr(scratch, base);
 369             add2reg(base, disp);
 370             (this->*classic)(reg, 0, index, base);
 371             z_lgr(base, scratch);    // Restore base.
 372           }
 373         }
 374       }
 375     }
 376   }
 377 }
 378 
 379 int MacroAssembler::reg2mem_opt(Register reg, const Address &a, bool is_double) {
 380   int store_offset = offset();
 381   if (is_double) {
 382     reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_stg), CLASSIC_IFUN(z_stg));
 383   } else {
 384     reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_sty), CLASSIC_IFUN(z_st));
 385   }
 386   return store_offset;
 387 }
 388 
 389 // Optimized emitter for mem to reg operations.
 390 // Uses modern instructions if running on modern hardware, classic instructions
 391 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 392 // Data register (reg) will be used as work register where possible.
 393 void MacroAssembler::mem2reg_opt(Register reg,
 394                                  int64_t  disp,
 395                                  Register index,
 396                                  Register base,
 397                                  void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
 398                                  void (MacroAssembler::*classic)(Register, int64_t, Register, Register)) {
 399   index = (index == noreg) ? Z_R0 : index;
 400   if (Displacement::is_shortDisp(disp)) {
 401     (this->*classic)(reg, disp, index, base);
 402   } else {
 403     if (Displacement::is_validDisp(disp)) {
 404       (this->*modern)(reg, disp, index, base);
 405     } else {
 406       if ((reg == index) && (reg == base)) {
 407         z_sllg(reg, reg, 1);
 408         add2reg(reg, disp);
 409         (this->*classic)(reg, 0, noreg, reg);
 410       } else if ((reg == index) && (reg != Z_R0)) {
 411         add2reg(reg, disp);
 412         (this->*classic)(reg, 0, reg, base);
 413       } else if (reg == base) {
 414         add2reg(reg, disp);
 415         (this->*classic)(reg, 0, index, reg);
 416       } else if (reg != Z_R0) {
 417         add2reg(reg, disp, base);
 418         (this->*classic)(reg, 0, index, reg);
 419       } else { // reg == Z_R0 && reg != base here
 420         add2reg(base, disp);
 421         (this->*classic)(reg, 0, index, base);
 422         add2reg(base, -disp);
 423       }
 424     }
 425   }
 426 }
 427 
 428 void MacroAssembler::mem2reg_opt(Register reg, const Address &a, bool is_double) {
 429   if (is_double) {
 430     z_lg(reg, a);
 431   } else {
 432     mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_ly), CLASSIC_IFUN(z_l));
 433   }
 434 }
 435 
 436 void MacroAssembler::mem2reg_signed_opt(Register reg, const Address &a) {
 437   mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_lgf), CLASSIC_IFUN(z_lgf));
 438 }
 439 
 440 void MacroAssembler::and_imm(Register r, long mask,
 441                              Register tmp /* = Z_R0 */,
 442                              bool wide    /* = false */) {
 443   assert(wide || Immediate::is_simm32(mask), "mask value too large");
 444 
 445   if (!wide) {
 446     z_nilf(r, mask);
 447     return;
 448   }
 449 
 450   assert(r != tmp, " need a different temporary register !");
 451   load_const_optimized(tmp, mask);
 452   z_ngr(r, tmp);
 453 }
 454 
 455 // Calculate the 1's complement.
 456 // Note: The condition code is neither preserved nor correctly set by this code!!!
 457 // Note: (wide == false) does not protect the high order half of the target register
 458 //       from alteration. It only serves as optimization hint for 32-bit results.
 459 void MacroAssembler::not_(Register r1, Register r2, bool wide) {
 460 
 461   if ((r2 == noreg) || (r2 == r1)) { // Calc 1's complement in place.
 462     z_xilf(r1, -1);
 463     if (wide) {
 464       z_xihf(r1, -1);
 465     }
 466   } else { // Distinct src and dst registers.
 467     if (VM_Version::has_DistinctOpnds()) {
 468       load_const_optimized(r1, -1);
 469       z_xgrk(r1, r2, r1);
 470     } else {
 471       if (wide) {
 472         z_lgr(r1, r2);
 473         z_xilf(r1, -1);
 474         z_xihf(r1, -1);
 475       } else {
 476         z_lr(r1, r2);
 477         z_xilf(r1, -1);
 478       }
 479     }
 480   }
 481 }
 482 
 483 unsigned long MacroAssembler::create_mask(int lBitPos, int rBitPos) {
 484   assert(lBitPos >=  0,      "zero is  leftmost bit position");
 485   assert(rBitPos <= 63,      "63   is rightmost bit position");
 486   assert(lBitPos <= rBitPos, "inverted selection interval");
 487   return (lBitPos == 0 ? (unsigned long)(-1L) : ((1UL<<(63-lBitPos+1))-1)) & (~((1UL<<(63-rBitPos))-1));
 488 }
 489 
 490 // Helper function for the "Rotate_then_<logicalOP>" emitters.
 491 // Rotate src, then mask register contents such that only bits in range survive.
 492 // For oneBits == false, all bits not in range are set to 0. Useful for deleting all bits outside range.
 493 // For oneBits == true,  all bits not in range are set to 1. Useful for preserving all bits outside range.
 494 // The caller must ensure that the selected range only contains bits with defined value.
 495 void MacroAssembler::rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos,
 496                                       int nRotate, bool src32bit, bool dst32bit, bool oneBits) {
 497   assert(!(dst32bit && lBitPos < 32), "selection interval out of range for int destination");
 498   bool sll4rll = (nRotate >= 0) && (nRotate <= (63-rBitPos)); // Substitute SLL(G) for RLL(G).
 499   bool srl4rll = (nRotate <  0) && (-nRotate <= lBitPos);     // Substitute SRL(G) for RLL(G).
 500   //  Pre-determine which parts of dst will be zero after shift/rotate.
 501   bool llZero  =  sll4rll && (nRotate >= 16);
 502   bool lhZero  = (sll4rll && (nRotate >= 32)) || (srl4rll && (nRotate <= -48));
 503   bool lfZero  = llZero && lhZero;
 504   bool hlZero  = (sll4rll && (nRotate >= 48)) || (srl4rll && (nRotate <= -32));
 505   bool hhZero  =                                 (srl4rll && (nRotate <= -16));
 506   bool hfZero  = hlZero && hhZero;
 507 
 508   // rotate then mask src operand.
 509   // if oneBits == true,  all bits outside selected range are 1s.
 510   // if oneBits == false, all bits outside selected range are 0s.
 511   if (src32bit) {   // There might be garbage in the upper 32 bits which will get masked away.
 512     if (dst32bit) {
 513       z_rll(dst, src, nRotate);   // Copy and rotate, upper half of reg remains undisturbed.
 514     } else {
 515       if      (sll4rll) { z_sllg(dst, src,  nRotate); }
 516       else if (srl4rll) { z_srlg(dst, src, -nRotate); }
 517       else              { z_rllg(dst, src,  nRotate); }
 518     }
 519   } else {
 520     if      (sll4rll) { z_sllg(dst, src,  nRotate); }
 521     else if (srl4rll) { z_srlg(dst, src, -nRotate); }
 522     else              { z_rllg(dst, src,  nRotate); }
 523   }
 524 
 525   unsigned long  range_mask    = create_mask(lBitPos, rBitPos);
 526   unsigned int   range_mask_h  = (unsigned int)(range_mask >> 32);
 527   unsigned int   range_mask_l  = (unsigned int)range_mask;
 528   unsigned short range_mask_hh = (unsigned short)(range_mask >> 48);
 529   unsigned short range_mask_hl = (unsigned short)(range_mask >> 32);
 530   unsigned short range_mask_lh = (unsigned short)(range_mask >> 16);
 531   unsigned short range_mask_ll = (unsigned short)range_mask;
 532   // Works for z9 and newer H/W.
 533   if (oneBits) {
 534     if ((~range_mask_l) != 0)                { z_oilf(dst, ~range_mask_l); } // All bits outside range become 1s.
 535     if (((~range_mask_h) != 0) && !dst32bit) { z_oihf(dst, ~range_mask_h); }
 536   } else {
 537     // All bits outside range become 0s
 538     if (((~range_mask_l) != 0) &&              !lfZero) {
 539       z_nilf(dst, range_mask_l);
 540     }
 541     if (((~range_mask_h) != 0) && !dst32bit && !hfZero) {
 542       z_nihf(dst, range_mask_h);
 543     }
 544   }
 545 }
 546 
 547 // Rotate src, then insert selected range from rotated src into dst.
 548 // Clear dst before, if requested.
 549 void MacroAssembler::rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos,
 550                                         int nRotate, bool clear_dst) {
 551   // This version does not depend on src being zero-extended int2long.
 552   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 553   z_risbg(dst, src, lBitPos, rBitPos, nRotate, clear_dst); // Rotate, then insert selected, clear the rest.
 554 }
 555 
 556 // Rotate src, then and selected range from rotated src into dst.
 557 // Set condition code only if so requested. Otherwise it is unpredictable.
 558 // See performance note in macroAssembler_s390.hpp for important information.
 559 void MacroAssembler::rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos,
 560                                      int nRotate, bool test_only) {
 561   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 562   // This version does not depend on src being zero-extended int2long.
 563   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 564   z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 565 }
 566 
 567 // Rotate src, then or selected range from rotated src into dst.
 568 // Set condition code only if so requested. Otherwise it is unpredictable.
 569 // See performance note in macroAssembler_s390.hpp for important information.
 570 void MacroAssembler::rotate_then_or(Register dst, Register src,  int  lBitPos,  int  rBitPos,
 571                                     int nRotate, bool test_only) {
 572   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 573   // This version does not depend on src being zero-extended int2long.
 574   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 575   z_rosbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 576 }
 577 
 578 // Rotate src, then xor selected range from rotated src into dst.
 579 // Set condition code only if so requested. Otherwise it is unpredictable.
 580 // See performance note in macroAssembler_s390.hpp for important information.
 581 void MacroAssembler::rotate_then_xor(Register dst, Register src,  int  lBitPos,  int  rBitPos,
 582                                      int nRotate, bool test_only) {
 583   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 584     // This version does not depend on src being zero-extended int2long.
 585   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 586   z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 587 }
 588 
 589 void MacroAssembler::add64(Register r1, RegisterOrConstant inc) {
 590   if (inc.is_register()) {
 591     z_agr(r1, inc.as_register());
 592   } else { // constant
 593     intptr_t imm = inc.as_constant();
 594     add2reg(r1, imm);
 595   }
 596 }
 597 // Helper function to multiply the 64bit contents of a register by a 16bit constant.
 598 // The optimization tries to avoid the mghi instruction, since it uses the FPU for
 599 // calculation and is thus rather slow.
 600 //
 601 // There is no handling for special cases, e.g. cval==0 or cval==1.
 602 //
 603 // Returns len of generated code block.
 604 unsigned int MacroAssembler::mul_reg64_const16(Register rval, Register work, int cval) {
 605   int block_start = offset();
 606 
 607   bool sign_flip = cval < 0;
 608   cval = sign_flip ? -cval : cval;
 609 
 610   BLOCK_COMMENT("Reg64*Con16 {");
 611 
 612   int bit1 = cval & -cval;
 613   if (bit1 == cval) {
 614     z_sllg(rval, rval, exact_log2(bit1));
 615     if (sign_flip) { z_lcgr(rval, rval); }
 616   } else {
 617     int bit2 = (cval-bit1) & -(cval-bit1);
 618     if ((bit1+bit2) == cval) {
 619       z_sllg(work, rval, exact_log2(bit1));
 620       z_sllg(rval, rval, exact_log2(bit2));
 621       z_agr(rval, work);
 622       if (sign_flip) { z_lcgr(rval, rval); }
 623     } else {
 624       if (sign_flip) { z_mghi(rval, -cval); }
 625       else           { z_mghi(rval,  cval); }
 626     }
 627   }
 628   BLOCK_COMMENT("} Reg64*Con16");
 629 
 630   int block_end = offset();
 631   return block_end - block_start;
 632 }
 633 
 634 // Generic operation r1 := r2 + imm.
 635 //
 636 // Should produce the best code for each supported CPU version.
 637 // r2 == noreg yields r1 := r1 + imm
 638 // imm == 0 emits either no instruction or r1 := r2 !
 639 // NOTES: 1) Don't use this function where fixed sized
 640 //           instruction sequences are required!!!
 641 //        2) Don't use this function if condition code
 642 //           setting is required!
 643 //        3) Despite being declared as int64_t, the parameter imm
 644 //           must be a simm_32 value (= signed 32-bit integer).
 645 void MacroAssembler::add2reg(Register r1, int64_t imm, Register r2) {
 646   assert(Immediate::is_simm32(imm), "probably an implicit conversion went wrong");
 647 
 648   if (r2 == noreg) { r2 = r1; }
 649 
 650   // Handle special case imm == 0.
 651   if (imm == 0) {
 652     lgr_if_needed(r1, r2);
 653     // Nothing else to do.
 654     return;
 655   }
 656 
 657   if (!PreferLAoverADD || (r2 == Z_R0)) {
 658     bool distinctOpnds = VM_Version::has_DistinctOpnds();
 659 
 660     // Can we encode imm in 16 bits signed?
 661     if (Immediate::is_simm16(imm)) {
 662       if (r1 == r2) {
 663         z_aghi(r1, imm);
 664         return;
 665       }
 666       if (distinctOpnds) {
 667         z_aghik(r1, r2, imm);
 668         return;
 669       }
 670       z_lgr(r1, r2);
 671       z_aghi(r1, imm);
 672       return;
 673     }
 674   } else {
 675     // Can we encode imm in 12 bits unsigned?
 676     if (Displacement::is_shortDisp(imm)) {
 677       z_la(r1, imm, r2);
 678       return;
 679     }
 680     // Can we encode imm in 20 bits signed?
 681     if (Displacement::is_validDisp(imm)) {
 682       // Always use LAY instruction, so we don't need the tmp register.
 683       z_lay(r1, imm, r2);
 684       return;
 685     }
 686 
 687   }
 688 
 689   // Can handle it (all possible values) with long immediates.
 690   lgr_if_needed(r1, r2);
 691   z_agfi(r1, imm);
 692 }
 693 
 694 // Generic operation r := b + x + d
 695 //
 696 // Addition of several operands with address generation semantics - sort of:
 697 //  - no restriction on the registers. Any register will do for any operand.
 698 //  - x == noreg: operand will be disregarded.
 699 //  - b == noreg: will use (contents of) result reg as operand (r := r + d).
 700 //  - x == Z_R0:  just disregard
 701 //  - b == Z_R0:  use as operand. This is not address generation semantics!!!
 702 //
 703 // The same restrictions as on add2reg() are valid!!!
 704 void MacroAssembler::add2reg_with_index(Register r, int64_t d, Register x, Register b) {
 705   assert(Immediate::is_simm32(d), "probably an implicit conversion went wrong");
 706 
 707   if (x == noreg) { x = Z_R0; }
 708   if (b == noreg) { b = r; }
 709 
 710   // Handle special case x == R0.
 711   if (x == Z_R0) {
 712     // Can simply add the immediate value to the base register.
 713     add2reg(r, d, b);
 714     return;
 715   }
 716 
 717   if (!PreferLAoverADD || (b == Z_R0)) {
 718     bool distinctOpnds = VM_Version::has_DistinctOpnds();
 719     // Handle special case d == 0.
 720     if (d == 0) {
 721       if (b == x)        { z_sllg(r, b, 1); return; }
 722       if (r == x)        { z_agr(r, b);     return; }
 723       if (r == b)        { z_agr(r, x);     return; }
 724       if (distinctOpnds) { z_agrk(r, x, b); return; }
 725       z_lgr(r, b);
 726       z_agr(r, x);
 727     } else {
 728       if (x == b)             { z_sllg(r, x, 1); }
 729       else if (r == x)        { z_agr(r, b); }
 730       else if (r == b)        { z_agr(r, x); }
 731       else if (distinctOpnds) { z_agrk(r, x, b); }
 732       else {
 733         z_lgr(r, b);
 734         z_agr(r, x);
 735       }
 736       add2reg(r, d);
 737     }
 738   } else {
 739     // Can we encode imm in 12 bits unsigned?
 740     if (Displacement::is_shortDisp(d)) {
 741       z_la(r, d, x, b);
 742       return;
 743     }
 744     // Can we encode imm in 20 bits signed?
 745     if (Displacement::is_validDisp(d)) {
 746       z_lay(r, d, x, b);
 747       return;
 748     }
 749     z_la(r, 0, x, b);
 750     add2reg(r, d);
 751   }
 752 }
 753 
 754 // Generic emitter (32bit) for direct memory increment.
 755 // For optimal code, do not specify Z_R0 as temp register.
 756 void MacroAssembler::add2mem_32(const Address &a, int64_t imm, Register tmp) {
 757   if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) {
 758     z_asi(a, imm);
 759   } else {
 760     z_lgf(tmp, a);
 761     add2reg(tmp, imm);
 762     z_st(tmp, a);
 763   }
 764 }
 765 
 766 void MacroAssembler::add2mem_64(const Address &a, int64_t imm, Register tmp) {
 767   if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) {
 768     z_agsi(a, imm);
 769   } else {
 770     z_lg(tmp, a);
 771     add2reg(tmp, imm);
 772     z_stg(tmp, a);
 773   }
 774 }
 775 
 776 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) {
 777   switch (size_in_bytes) {
 778     case  8: z_lg(dst, src); break;
 779     case  4: is_signed ? z_lgf(dst, src) : z_llgf(dst, src); break;
 780     case  2: is_signed ? z_lgh(dst, src) : z_llgh(dst, src); break;
 781     case  1: is_signed ? z_lgb(dst, src) : z_llgc(dst, src); break;
 782     default: ShouldNotReachHere();
 783   }
 784 }
 785 
 786 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) {
 787   switch (size_in_bytes) {
 788     case  8: z_stg(src, dst); break;
 789     case  4: z_st(src, dst); break;
 790     case  2: z_sth(src, dst); break;
 791     case  1: z_stc(src, dst); break;
 792     default: ShouldNotReachHere();
 793   }
 794 }
 795 
 796 // Split a si20 offset (20bit, signed) into an ui12 offset (12bit, unsigned) and
 797 // a high-order summand in register tmp.
 798 //
 799 // return value: <  0: No split required, si20 actually has property uimm12.
 800 //               >= 0: Split performed. Use return value as uimm12 displacement and
 801 //                     tmp as index register.
 802 int MacroAssembler::split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate) {
 803   assert(Immediate::is_simm20(si20_offset), "sanity");
 804   int lg_off = (int)si20_offset &  0x0fff; // Punch out low-order 12 bits, always positive.
 805   int ll_off = (int)si20_offset & ~0x0fff; // Force low-order 12 bits to zero.
 806   assert((Displacement::is_shortDisp(si20_offset) && (ll_off == 0)) ||
 807          !Displacement::is_shortDisp(si20_offset), "unexpected offset values");
 808   assert((lg_off+ll_off) == si20_offset, "offset splitup error");
 809 
 810   Register work = accumulate? Z_R0 : tmp;
 811 
 812   if (fixed_codelen) {          // Len of code = 10 = 4 + 6.
 813     z_lghi(work, ll_off>>12);   // Implicit sign extension.
 814     z_slag(work, work, 12);
 815   } else {                      // Len of code = 0..10.
 816     if (ll_off == 0) { return -1; }
 817     // ll_off has 8 significant bits (at most) plus sign.
 818     if ((ll_off & 0x0000f000) == 0) {    // Non-zero bits only in upper halfbyte.
 819       z_llilh(work, ll_off >> 16);
 820       if (ll_off < 0) {                  // Sign-extension required.
 821         z_lgfr(work, work);
 822       }
 823     } else {
 824       if ((ll_off & 0x000f0000) == 0) {  // Non-zero bits only in lower halfbyte.
 825         z_llill(work, ll_off);
 826       } else {                           // Non-zero bits in both halfbytes.
 827         z_lghi(work, ll_off>>12);        // Implicit sign extension.
 828         z_slag(work, work, 12);
 829       }
 830     }
 831   }
 832   if (accumulate) { z_algr(tmp, work); } // len of code += 4
 833   return lg_off;
 834 }
 835 
 836 void MacroAssembler::load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) {
 837   if (Displacement::is_validDisp(si20)) {
 838     z_ley(t, si20, a);
 839   } else {
 840     // Fixed_codelen = true is a simple way to ensure that the size of load_float_largeoffset
 841     // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant
 842     // pool loads).
 843     bool accumulate    = true;
 844     bool fixed_codelen = true;
 845     Register work;
 846 
 847     if (fixed_codelen) {
 848       z_lgr(tmp, a);  // Lgr_if_needed not applicable due to fixed_codelen.
 849     } else {
 850       accumulate = (a == tmp);
 851     }
 852     work = tmp;
 853 
 854     int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate);
 855     if (disp12 < 0) {
 856       z_le(t, si20, work);
 857     } else {
 858       if (accumulate) {
 859         z_le(t, disp12, work);
 860       } else {
 861         z_le(t, disp12, work, a);
 862       }
 863     }
 864   }
 865 }
 866 
 867 void MacroAssembler::load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) {
 868   if (Displacement::is_validDisp(si20)) {
 869     z_ldy(t, si20, a);
 870   } else {
 871     // Fixed_codelen = true is a simple way to ensure that the size of load_double_largeoffset
 872     // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant
 873     // pool loads).
 874     bool accumulate    = true;
 875     bool fixed_codelen = true;
 876     Register work;
 877 
 878     if (fixed_codelen) {
 879       z_lgr(tmp, a);  // Lgr_if_needed not applicable due to fixed_codelen.
 880     } else {
 881       accumulate = (a == tmp);
 882     }
 883     work = tmp;
 884 
 885     int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate);
 886     if (disp12 < 0) {
 887       z_ld(t, si20, work);
 888     } else {
 889       if (accumulate) {
 890         z_ld(t, disp12, work);
 891       } else {
 892         z_ld(t, disp12, work, a);
 893       }
 894     }
 895   }
 896 }
 897 
 898 // PCrelative TOC access.
 899 // Returns distance (in bytes) from current position to start of consts section.
 900 // Returns 0 (zero) if no consts section exists or if it has size zero.
 901 long MacroAssembler::toc_distance() {
 902   CodeSection* cs = code()->consts();
 903   return (long)((cs != NULL) ? cs->start()-pc() : 0);
 904 }
 905 
 906 // Implementation on x86/sparc assumes that constant and instruction section are
 907 // adjacent, but this doesn't hold. Two special situations may occur, that we must
 908 // be able to handle:
 909 //   1. const section may be located apart from the inst section.
 910 //   2. const section may be empty
 911 // In both cases, we use the const section's start address to compute the "TOC",
 912 // this seems to occur only temporarily; in the final step we always seem to end up
 913 // with the pc-relatice variant.
 914 //
 915 // PC-relative offset could be +/-2**32 -> use long for disp
 916 // Furthermore: makes no sense to have special code for
 917 // adjacent const and inst sections.
 918 void MacroAssembler::load_toc(Register Rtoc) {
 919   // Simply use distance from start of const section (should be patched in the end).
 920   long disp = toc_distance();
 921 
 922   RelocationHolder rspec = internal_word_Relocation::spec(pc() + disp);
 923   relocate(rspec);
 924   z_larl(Rtoc, RelAddr::pcrel_off32(disp));  // Offset is in halfwords.
 925 }
 926 
 927 // PCrelative TOC access.
 928 // Load from anywhere pcrelative (with relocation of load instr)
 929 void MacroAssembler::load_long_pcrelative(Register Rdst, address dataLocation) {
 930   address          pc             = this->pc();
 931   ptrdiff_t        total_distance = dataLocation - pc;
 932   RelocationHolder rspec          = internal_word_Relocation::spec(dataLocation);
 933 
 934   assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory");
 935   assert(total_distance != 0, "sanity");
 936 
 937   // Some extra safety net.
 938   if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
 939     guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance);
 940   }
 941 
 942   (this)->relocate(rspec, relocInfo::pcrel_addr_format);
 943   z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance));
 944 }
 945 
 946 
 947 // PCrelative TOC access.
 948 // Load from anywhere pcrelative (with relocation of load instr)
 949 // loaded addr has to be relocated when added to constant pool.
 950 void MacroAssembler::load_addr_pcrelative(Register Rdst, address addrLocation) {
 951   address          pc             = this->pc();
 952   ptrdiff_t        total_distance = addrLocation - pc;
 953   RelocationHolder rspec          = internal_word_Relocation::spec(addrLocation);
 954 
 955   assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory");
 956 
 957   // Some extra safety net.
 958   if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
 959     guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance);
 960   }
 961 
 962   (this)->relocate(rspec, relocInfo::pcrel_addr_format);
 963   z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance));
 964 }
 965 
 966 // Generic operation: load a value from memory and test.
 967 // CondCode indicates the sign (<0, ==0, >0) of the loaded value.
 968 void MacroAssembler::load_and_test_byte(Register dst, const Address &a) {
 969   z_lb(dst, a);
 970   z_ltr(dst, dst);
 971 }
 972 
 973 void MacroAssembler::load_and_test_short(Register dst, const Address &a) {
 974   int64_t disp = a.disp20();
 975   if (Displacement::is_shortDisp(disp)) {
 976     z_lh(dst, a);
 977   } else if (Displacement::is_longDisp(disp)) {
 978     z_lhy(dst, a);
 979   } else {
 980     guarantee(false, "displacement out of range");
 981   }
 982   z_ltr(dst, dst);
 983 }
 984 
 985 void MacroAssembler::load_and_test_int(Register dst, const Address &a) {
 986   z_lt(dst, a);
 987 }
 988 
 989 void MacroAssembler::load_and_test_int2long(Register dst, const Address &a) {
 990   z_ltgf(dst, a);
 991 }
 992 
 993 void MacroAssembler::load_and_test_long(Register dst, const Address &a) {
 994   z_ltg(dst, a);
 995 }
 996 
 997 // Test a bit in memory.
 998 void MacroAssembler::testbit(const Address &a, unsigned int bit) {
 999   assert(a.index() == noreg, "no index reg allowed in testbit");
1000   if (bit <= 7) {
1001     z_tm(a.disp() + 3, a.base(), 1 << bit);
1002   } else if (bit <= 15) {
1003     z_tm(a.disp() + 2, a.base(), 1 << (bit - 8));
1004   } else if (bit <= 23) {
1005     z_tm(a.disp() + 1, a.base(), 1 << (bit - 16));
1006   } else if (bit <= 31) {
1007     z_tm(a.disp() + 0, a.base(), 1 << (bit - 24));
1008   } else {
1009     ShouldNotReachHere();
1010   }
1011 }
1012 
1013 // Test a bit in a register. Result is reflected in CC.
1014 void MacroAssembler::testbit(Register r, unsigned int bitPos) {
1015   if (bitPos < 16) {
1016     z_tmll(r, 1U<<bitPos);
1017   } else if (bitPos < 32) {
1018     z_tmlh(r, 1U<<(bitPos-16));
1019   } else if (bitPos < 48) {
1020     z_tmhl(r, 1U<<(bitPos-32));
1021   } else if (bitPos < 64) {
1022     z_tmhh(r, 1U<<(bitPos-48));
1023   } else {
1024     ShouldNotReachHere();
1025   }
1026 }
1027 
1028 void MacroAssembler::prefetch_read(Address a) {
1029   z_pfd(1, a.disp20(), a.indexOrR0(), a.base());
1030 }
1031 void MacroAssembler::prefetch_update(Address a) {
1032   z_pfd(2, a.disp20(), a.indexOrR0(), a.base());
1033 }
1034 
1035 // Clear a register, i.e. load const zero into reg.
1036 // Return len (in bytes) of generated instruction(s).
1037 // whole_reg: Clear 64 bits if true, 32 bits otherwise.
1038 // set_cc:    Use instruction that sets the condition code, if true.
1039 int MacroAssembler::clear_reg(Register r, bool whole_reg, bool set_cc) {
1040   unsigned int start_off = offset();
1041   if (whole_reg) {
1042     set_cc ? z_xgr(r, r) : z_laz(r, 0, Z_R0);
1043   } else {  // Only 32bit register.
1044     set_cc ? z_xr(r, r) : z_lhi(r, 0);
1045   }
1046   return offset() - start_off;
1047 }
1048 
1049 #ifdef ASSERT
1050 int MacroAssembler::preset_reg(Register r, unsigned long pattern, int pattern_len) {
1051   switch (pattern_len) {
1052     case 1:
1053       pattern = (pattern & 0x000000ff)  | ((pattern & 0x000000ff)<<8);
1054     case 2:
1055       pattern = (pattern & 0x0000ffff)  | ((pattern & 0x0000ffff)<<16);
1056     case 4:
1057       pattern = (pattern & 0xffffffffL) | ((pattern & 0xffffffffL)<<32);
1058     case 8:
1059       return load_const_optimized_rtn_len(r, pattern, true);
1060       break;
1061     default:
1062       guarantee(false, "preset_reg: bad len");
1063   }
1064   return 0;
1065 }
1066 #endif
1067 
1068 // addr: Address descriptor of memory to clear index register will not be used !
1069 // size: Number of bytes to clear.
1070 //    !!! DO NOT USE THEM FOR ATOMIC MEMORY CLEARING !!!
1071 //    !!! Use store_const() instead                  !!!
1072 void MacroAssembler::clear_mem(const Address& addr, unsigned size) {
1073   guarantee(size <= 256, "MacroAssembler::clear_mem: size too large");
1074 
1075   if (size == 1) {
1076     z_mvi(addr, 0);
1077     return;
1078   }
1079 
1080   switch (size) {
1081     case 2: z_mvhhi(addr, 0);
1082       return;
1083     case 4: z_mvhi(addr, 0);
1084       return;
1085     case 8: z_mvghi(addr, 0);
1086       return;
1087     default: ; // Fallthru to xc.
1088   }
1089 
1090   z_xc(addr, size, addr);
1091 }
1092 
1093 void MacroAssembler::align(int modulus) {
1094   while (offset() % modulus != 0) z_nop();
1095 }
1096 
1097 // Special version for non-relocateable code if required alignment
1098 // is larger than CodeEntryAlignment.
1099 void MacroAssembler::align_address(int modulus) {
1100   while ((uintptr_t)pc() % modulus != 0) z_nop();
1101 }
1102 
1103 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
1104                                          Register temp_reg,
1105                                          int64_t extra_slot_offset) {
1106   // On Z, we can have index and disp in an Address. So don't call argument_offset,
1107   // which issues an unnecessary add instruction.
1108   int stackElementSize = Interpreter::stackElementSize;
1109   int64_t offset = extra_slot_offset * stackElementSize;
1110   const Register argbase = Z_esp;
1111   if (arg_slot.is_constant()) {
1112     offset += arg_slot.as_constant() * stackElementSize;
1113     return Address(argbase, offset);
1114   }
1115   // else
1116   assert(temp_reg != noreg, "must specify");
1117   assert(temp_reg != Z_ARG1, "base and index are conflicting");
1118   z_sllg(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize)); // tempreg = arg_slot << 3
1119   return Address(argbase, temp_reg, offset);
1120 }
1121 
1122 
1123 //===================================================================
1124 //===   START   C O N S T A N T S   I N   C O D E   S T R E A M   ===
1125 //===================================================================
1126 //===            P A T CH A B L E   C O N S T A N T S             ===
1127 //===================================================================
1128 
1129 
1130 //---------------------------------------------------
1131 //  Load (patchable) constant into register
1132 //---------------------------------------------------
1133 
1134 
1135 // Load absolute address (and try to optimize).
1136 //   Note: This method is usable only for position-fixed code,
1137 //         referring to a position-fixed target location.
1138 //         If not so, relocations and patching must be used.
1139 void MacroAssembler::load_absolute_address(Register d, address addr) {
1140   assert(addr != NULL, "should not happen");
1141   BLOCK_COMMENT("load_absolute_address:");
1142   if (addr == NULL) {
1143     z_larl(d, pc()); // Dummy emit for size calc.
1144     return;
1145   }
1146 
1147   if (RelAddr::is_in_range_of_RelAddr32(addr, pc())) {
1148     z_larl(d, addr);
1149     return;
1150   }
1151 
1152   load_const_optimized(d, (long)addr);
1153 }
1154 
1155 // Load a 64bit constant.
1156 // Patchable code sequence, but not atomically patchable.
1157 // Make sure to keep code size constant -> no value-dependent optimizations.
1158 // Do not kill condition code.
1159 void MacroAssembler::load_const(Register t, long x) {
1160   Assembler::z_iihf(t, (int)(x >> 32));
1161   Assembler::z_iilf(t, (int)(x & 0xffffffff));
1162 }
1163 
1164 // Load a 32bit constant into a 64bit register, sign-extend or zero-extend.
1165 // Patchable code sequence, but not atomically patchable.
1166 // Make sure to keep code size constant -> no value-dependent optimizations.
1167 // Do not kill condition code.
1168 void MacroAssembler::load_const_32to64(Register t, int64_t x, bool sign_extend) {
1169   if (sign_extend) { Assembler::z_lgfi(t, x); }
1170   else             { Assembler::z_llilf(t, x); }
1171 }
1172 
1173 // Load narrow oop constant, no decompression.
1174 void MacroAssembler::load_narrow_oop(Register t, narrowOop a) {
1175   assert(UseCompressedOops, "must be on to call this method");
1176   load_const_32to64(t, a, false /*sign_extend*/);
1177 }
1178 
1179 // Load narrow klass constant, compression required.
1180 void MacroAssembler::load_narrow_klass(Register t, Klass* k) {
1181   assert(UseCompressedClassPointers, "must be on to call this method");
1182   narrowKlass encoded_k = Klass::encode_klass(k);
1183   load_const_32to64(t, encoded_k, false /*sign_extend*/);
1184 }
1185 
1186 //------------------------------------------------------
1187 //  Compare (patchable) constant with register.
1188 //------------------------------------------------------
1189 
1190 // Compare narrow oop in reg with narrow oop constant, no decompression.
1191 void MacroAssembler::compare_immediate_narrow_oop(Register oop1, narrowOop oop2) {
1192   assert(UseCompressedOops, "must be on to call this method");
1193 
1194   Assembler::z_clfi(oop1, oop2);
1195 }
1196 
1197 // Compare narrow oop in reg with narrow oop constant, no decompression.
1198 void MacroAssembler::compare_immediate_narrow_klass(Register klass1, Klass* klass2) {
1199   assert(UseCompressedClassPointers, "must be on to call this method");
1200   narrowKlass encoded_k = Klass::encode_klass(klass2);
1201 
1202   Assembler::z_clfi(klass1, encoded_k);
1203 }
1204 
1205 //----------------------------------------------------------
1206 //  Check which kind of load_constant we have here.
1207 //----------------------------------------------------------
1208 
1209 // Detection of CPU version dependent load_const sequence.
1210 // The detection is valid only for code sequences generated by load_const,
1211 // not load_const_optimized.
1212 bool MacroAssembler::is_load_const(address a) {
1213   unsigned long inst1, inst2;
1214   unsigned int  len1,  len2;
1215 
1216   len1 = get_instruction(a, &inst1);
1217   len2 = get_instruction(a + len1, &inst2);
1218 
1219   return is_z_iihf(inst1) && is_z_iilf(inst2);
1220 }
1221 
1222 // Detection of CPU version dependent load_const_32to64 sequence.
1223 // Mostly used for narrow oops and narrow Klass pointers.
1224 // The detection is valid only for code sequences generated by load_const_32to64.
1225 bool MacroAssembler::is_load_const_32to64(address pos) {
1226   unsigned long inst1, inst2;
1227   unsigned int len1;
1228 
1229   len1 = get_instruction(pos, &inst1);
1230   return is_z_llilf(inst1);
1231 }
1232 
1233 // Detection of compare_immediate_narrow sequence.
1234 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop.
1235 bool MacroAssembler::is_compare_immediate32(address pos) {
1236   return is_equal(pos, CLFI_ZOPC, RIL_MASK);
1237 }
1238 
1239 // Detection of compare_immediate_narrow sequence.
1240 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop.
1241 bool MacroAssembler::is_compare_immediate_narrow_oop(address pos) {
1242   return is_compare_immediate32(pos);
1243   }
1244 
1245 // Detection of compare_immediate_narrow sequence.
1246 // The detection is valid only for code sequences generated by compare_immediate_narrow_klass.
1247 bool MacroAssembler::is_compare_immediate_narrow_klass(address pos) {
1248   return is_compare_immediate32(pos);
1249 }
1250 
1251 //-----------------------------------
1252 //  patch the load_constant
1253 //-----------------------------------
1254 
1255 // CPU-version dependend patching of load_const.
1256 void MacroAssembler::patch_const(address a, long x) {
1257   assert(is_load_const(a), "not a load of a constant");
1258   set_imm32((address)a, (int) ((x >> 32) & 0xffffffff));
1259   set_imm32((address)(a + 6), (int)(x & 0xffffffff));
1260 }
1261 
1262 // Patching the value of CPU version dependent load_const_32to64 sequence.
1263 // The passed ptr MUST be in compressed format!
1264 int MacroAssembler::patch_load_const_32to64(address pos, int64_t np) {
1265   assert(is_load_const_32to64(pos), "not a load of a narrow ptr (oop or klass)");
1266 
1267   set_imm32(pos, np);
1268   return 6;
1269 }
1270 
1271 // Patching the value of CPU version dependent compare_immediate_narrow sequence.
1272 // The passed ptr MUST be in compressed format!
1273 int MacroAssembler::patch_compare_immediate_32(address pos, int64_t np) {
1274   assert(is_compare_immediate32(pos), "not a compressed ptr compare");
1275 
1276   set_imm32(pos, np);
1277   return 6;
1278 }
1279 
1280 // Patching the immediate value of CPU version dependent load_narrow_oop sequence.
1281 // The passed ptr must NOT be in compressed format!
1282 int MacroAssembler::patch_load_narrow_oop(address pos, oop o) {
1283   assert(UseCompressedOops, "Can only patch compressed oops");
1284 
1285   narrowOop no = oopDesc::encode_heap_oop(o);
1286   return patch_load_const_32to64(pos, no);
1287 }
1288 
1289 // Patching the immediate value of CPU version dependent load_narrow_klass sequence.
1290 // The passed ptr must NOT be in compressed format!
1291 int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) {
1292   assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
1293 
1294   narrowKlass nk = Klass::encode_klass(k);
1295   return patch_load_const_32to64(pos, nk);
1296 }
1297 
1298 // Patching the immediate value of CPU version dependent compare_immediate_narrow_oop sequence.
1299 // The passed ptr must NOT be in compressed format!
1300 int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) {
1301   assert(UseCompressedOops, "Can only patch compressed oops");
1302 
1303   narrowOop no = oopDesc::encode_heap_oop(o);
1304   return patch_compare_immediate_32(pos, no);
1305 }
1306 
1307 // Patching the immediate value of CPU version dependent compare_immediate_narrow_klass sequence.
1308 // The passed ptr must NOT be in compressed format!
1309 int MacroAssembler::patch_compare_immediate_narrow_klass(address pos, Klass* k) {
1310   assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
1311 
1312   narrowKlass nk = Klass::encode_klass(k);
1313   return patch_compare_immediate_32(pos, nk);
1314 }
1315 
1316 //------------------------------------------------------------------------
1317 //  Extract the constant from a load_constant instruction stream.
1318 //------------------------------------------------------------------------
1319 
1320 // Get constant from a load_const sequence.
1321 long MacroAssembler::get_const(address a) {
1322   assert(is_load_const(a), "not a load of a constant");
1323   unsigned long x;
1324   x =  (((unsigned long) (get_imm32(a,0) & 0xffffffff)) << 32);
1325   x |= (((unsigned long) (get_imm32(a,1) & 0xffffffff)));
1326   return (long) x;
1327 }
1328 
1329 //--------------------------------------
1330 //  Store a constant in memory.
1331 //--------------------------------------
1332 
1333 // General emitter to move a constant to memory.
1334 // The store is atomic.
1335 //  o Address must be given in RS format (no index register)
1336 //  o Displacement should be 12bit unsigned for efficiency. 20bit signed also supported.
1337 //  o Constant can be 1, 2, 4, or 8 bytes, signed or unsigned.
1338 //  o Memory slot can be 1, 2, 4, or 8 bytes, signed or unsigned.
1339 //  o Memory slot must be at least as wide as constant, will assert otherwise.
1340 //  o Signed constants will sign-extend, unsigned constants will zero-extend to slot width.
1341 int MacroAssembler::store_const(const Address &dest, long imm,
1342                                 unsigned int lm, unsigned int lc,
1343                                 Register scratch) {
1344   int64_t  disp = dest.disp();
1345   Register base = dest.base();
1346   assert(!dest.has_index(), "not supported");
1347   assert((lm==1)||(lm==2)||(lm==4)||(lm==8), "memory   length not supported");
1348   assert((lc==1)||(lc==2)||(lc==4)||(lc==8), "constant length not supported");
1349   assert(lm>=lc, "memory slot too small");
1350   assert(lc==8 || Immediate::is_simm(imm, lc*8), "const out of range");
1351   assert(Displacement::is_validDisp(disp), "displacement out of range");
1352 
1353   bool is_shortDisp = Displacement::is_shortDisp(disp);
1354   int store_offset = -1;
1355 
1356   // For target len == 1 it's easy.
1357   if (lm == 1) {
1358     store_offset = offset();
1359     if (is_shortDisp) {
1360       z_mvi(disp, base, imm);
1361       return store_offset;
1362     } else {
1363       z_mviy(disp, base, imm);
1364       return store_offset;
1365     }
1366   }
1367 
1368   // All the "good stuff" takes an unsigned displacement.
1369   if (is_shortDisp) {
1370     // NOTE: Cannot use clear_mem for imm==0, because it is not atomic.
1371 
1372     store_offset = offset();
1373     switch (lm) {
1374       case 2:  // Lc == 1 handled correctly here, even for unsigned. Instruction does no widening.
1375         z_mvhhi(disp, base, imm);
1376         return store_offset;
1377       case 4:
1378         if (Immediate::is_simm16(imm)) {
1379           z_mvhi(disp, base, imm);
1380           return store_offset;
1381         }
1382         break;
1383       case 8:
1384         if (Immediate::is_simm16(imm)) {
1385           z_mvghi(disp, base, imm);
1386           return store_offset;
1387         }
1388         break;
1389       default:
1390         ShouldNotReachHere();
1391         break;
1392     }
1393   }
1394 
1395   //  Can't optimize, so load value and store it.
1396   guarantee(scratch != noreg, " need a scratch register here !");
1397   if (imm != 0) {
1398     load_const_optimized(scratch, imm);  // Preserves CC anyway.
1399   } else {
1400     // Leave CC alone!!
1401     (void) clear_reg(scratch, true, false); // Indicate unused result.
1402   }
1403 
1404   store_offset = offset();
1405   if (is_shortDisp) {
1406     switch (lm) {
1407       case 2:
1408         z_sth(scratch, disp, Z_R0, base);
1409         return store_offset;
1410       case 4:
1411         z_st(scratch, disp, Z_R0, base);
1412         return store_offset;
1413       case 8:
1414         z_stg(scratch, disp, Z_R0, base);
1415         return store_offset;
1416       default:
1417         ShouldNotReachHere();
1418         break;
1419     }
1420   } else {
1421     switch (lm) {
1422       case 2:
1423         z_sthy(scratch, disp, Z_R0, base);
1424         return store_offset;
1425       case 4:
1426         z_sty(scratch, disp, Z_R0, base);
1427         return store_offset;
1428       case 8:
1429         z_stg(scratch, disp, Z_R0, base);
1430         return store_offset;
1431       default:
1432         ShouldNotReachHere();
1433         break;
1434     }
1435   }
1436   return -1; // should not reach here
1437 }
1438 
1439 //===================================================================
1440 //===       N O T   P A T CH A B L E   C O N S T A N T S          ===
1441 //===================================================================
1442 
1443 // Load constant x into register t with a fast instrcution sequence
1444 // depending on the bits in x. Preserves CC under all circumstances.
1445 int MacroAssembler::load_const_optimized_rtn_len(Register t, long x, bool emit) {
1446   if (x == 0) {
1447     int len;
1448     if (emit) {
1449       len = clear_reg(t, true, false);
1450     } else {
1451       len = 4;
1452     }
1453     return len;
1454   }
1455 
1456   if (Immediate::is_simm16(x)) {
1457     if (emit) { z_lghi(t, x); }
1458     return 4;
1459   }
1460 
1461   // 64 bit value: | part1 | part2 | part3 | part4 |
1462   // At least one part is not zero!
1463   int part1 = ((x >> 32) & 0xffff0000) >> 16;
1464   int part2 = (x >> 32) & 0x0000ffff;
1465   int part3 = (x & 0xffff0000) >> 16;
1466   int part4 = (x & 0x0000ffff);
1467 
1468   // Lower word only (unsigned).
1469   if ((part1 == 0) && (part2 == 0)) {
1470     if (part3 == 0) {
1471       if (emit) z_llill(t, part4);
1472       return 4;
1473     }
1474     if (part4 == 0) {
1475       if (emit) z_llilh(t, part3);
1476       return 4;
1477     }
1478     if (emit) z_llilf(t, (int)(x & 0xffffffff));
1479     return 6;
1480   }
1481 
1482   // Upper word only.
1483   if ((part3 == 0) && (part4 == 0)) {
1484     if (part1 == 0) {
1485       if (emit) z_llihl(t, part2);
1486       return 4;
1487     }
1488     if (part2 == 0) {
1489       if (emit) z_llihh(t, part1);
1490       return 4;
1491     }
1492     if (emit) z_llihf(t, (int)(x >> 32));
1493     return 6;
1494   }
1495 
1496   // Lower word only (signed).
1497   if ((part1 == 0x0000ffff) && (part2 == 0x0000ffff) && ((part3 & 0x00008000) != 0)) {
1498     if (emit) z_lgfi(t, (int)(x & 0xffffffff));
1499     return 6;
1500   }
1501 
1502   int len = 0;
1503 
1504   if ((part1 == 0) || (part2 == 0)) {
1505     if (part1 == 0) {
1506       if (emit) z_llihl(t, part2);
1507       len += 4;
1508     } else {
1509       if (emit) z_llihh(t, part1);
1510       len += 4;
1511     }
1512   } else {
1513     if (emit) z_llihf(t, (int)(x >> 32));
1514     len += 6;
1515   }
1516 
1517   if ((part3 == 0) || (part4 == 0)) {
1518     if (part3 == 0) {
1519       if (emit) z_iill(t, part4);
1520       len += 4;
1521     } else {
1522       if (emit) z_iilh(t, part3);
1523       len += 4;
1524     }
1525   } else {
1526     if (emit) z_iilf(t, (int)(x & 0xffffffff));
1527     len += 6;
1528   }
1529   return len;
1530 }
1531 
1532 //=====================================================================
1533 //===     H I G H E R   L E V E L   B R A N C H   E M I T T E R S   ===
1534 //=====================================================================
1535 
1536 // Note: In the worst case, one of the scratch registers is destroyed!!!
1537 void MacroAssembler::compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1538   // Right operand is constant.
1539   if (x2.is_constant()) {
1540     jlong value = x2.as_constant();
1541     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/true);
1542     return;
1543   }
1544 
1545   // Right operand is in register.
1546   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/true);
1547 }
1548 
1549 // Note: In the worst case, one of the scratch registers is destroyed!!!
1550 void MacroAssembler::compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1551   // Right operand is constant.
1552   if (x2.is_constant()) {
1553     jlong value = x2.as_constant();
1554     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/false);
1555     return;
1556   }
1557 
1558   // Right operand is in register.
1559   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/false);
1560 }
1561 
1562 // Note: In the worst case, one of the scratch registers is destroyed!!!
1563 void MacroAssembler::compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1564   // Right operand is constant.
1565   if (x2.is_constant()) {
1566     jlong value = x2.as_constant();
1567     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/true);
1568     return;
1569   }
1570 
1571   // Right operand is in register.
1572   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/true);
1573 }
1574 
1575 void MacroAssembler::compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1576   // Right operand is constant.
1577   if (x2.is_constant()) {
1578     jlong value = x2.as_constant();
1579     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/false);
1580     return;
1581   }
1582 
1583   // Right operand is in register.
1584   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/false);
1585 }
1586 
1587 // Generate an optimal branch to the branch target.
1588 // Optimal means that a relative branch (brc or brcl) is used if the
1589 // branch distance is short enough. Loading the target address into a
1590 // register and branching via reg is used as fallback only.
1591 //
1592 // Used registers:
1593 //   Z_R1 - work reg. Holds branch target address.
1594 //          Used in fallback case only.
1595 //
1596 // This version of branch_optimized is good for cases where the target address is known
1597 // and constant, i.e. is never changed (no relocation, no patching).
1598 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, address branch_addr) {
1599   address branch_origin = pc();
1600 
1601   if (RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) {
1602     z_brc(cond, branch_addr);
1603   } else if (RelAddr::is_in_range_of_RelAddr32(branch_addr, branch_origin)) {
1604     z_brcl(cond, branch_addr);
1605   } else {
1606     load_const_optimized(Z_R1, branch_addr);  // CC must not get killed by load_const_optimized.
1607     z_bcr(cond, Z_R1);
1608   }
1609 }
1610 
1611 // This version of branch_optimized is good for cases where the target address
1612 // is potentially not yet known at the time the code is emitted.
1613 //
1614 // One very common case is a branch to an unbound label which is handled here.
1615 // The caller might know (or hope) that the branch distance is short enough
1616 // to be encoded in a 16bit relative address. In this case he will pass a
1617 // NearLabel branch_target.
1618 // Care must be taken with unbound labels. Each call to target(label) creates
1619 // an entry in the patch queue for that label to patch all references of the label
1620 // once it gets bound. Those recorded patch locations must be patchable. Otherwise,
1621 // an assertion fires at patch time.
1622 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& branch_target) {
1623   if (branch_target.is_bound()) {
1624     address branch_addr = target(branch_target);
1625     branch_optimized(cond, branch_addr);
1626   } else if (branch_target.is_near()) {
1627     z_brc(cond, branch_target);  // Caller assures that the target will be in range for z_brc.
1628   } else {
1629     z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time.
1630   }
1631 }
1632 
1633 // Generate an optimal compare and branch to the branch target.
1634 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the
1635 // branch distance is short enough. Loading the target address into a
1636 // register and branching via reg is used as fallback only.
1637 //
1638 // Input:
1639 //   r1 - left compare operand
1640 //   r2 - right compare operand
1641 void MacroAssembler::compare_and_branch_optimized(Register r1,
1642                                                   Register r2,
1643                                                   Assembler::branch_condition cond,
1644                                                   address  branch_addr,
1645                                                   bool     len64,
1646                                                   bool     has_sign) {
1647   unsigned int casenum = (len64?2:0)+(has_sign?0:1);
1648 
1649   address branch_origin = pc();
1650   if (VM_Version::has_CompareBranch() && RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) {
1651     switch (casenum) {
1652       case 0: z_crj( r1, r2, cond, branch_addr); break;
1653       case 1: z_clrj (r1, r2, cond, branch_addr); break;
1654       case 2: z_cgrj(r1, r2, cond, branch_addr); break;
1655       case 3: z_clgrj(r1, r2, cond, branch_addr); break;
1656       default: ShouldNotReachHere(); break;
1657     }
1658   } else {
1659     switch (casenum) {
1660       case 0: z_cr( r1, r2); break;
1661       case 1: z_clr(r1, r2); break;
1662       case 2: z_cgr(r1, r2); break;
1663       case 3: z_clgr(r1, r2); break;
1664       default: ShouldNotReachHere(); break;
1665     }
1666     branch_optimized(cond, branch_addr);
1667   }
1668 }
1669 
1670 // Generate an optimal compare and branch to the branch target.
1671 // Optimal means that a relative branch (clgij, brc or brcl) is used if the
1672 // branch distance is short enough. Loading the target address into a
1673 // register and branching via reg is used as fallback only.
1674 //
1675 // Input:
1676 //   r1 - left compare operand (in register)
1677 //   x2 - right compare operand (immediate)
1678 void MacroAssembler::compare_and_branch_optimized(Register r1,
1679                                                   jlong    x2,
1680                                                   Assembler::branch_condition cond,
1681                                                   Label&   branch_target,
1682                                                   bool     len64,
1683                                                   bool     has_sign) {
1684   address      branch_origin = pc();
1685   bool         x2_imm8       = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2));
1686   bool         is_RelAddr16  = branch_target.is_near() ||
1687                                (branch_target.is_bound() &&
1688                                 RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin));
1689   unsigned int casenum       = (len64?2:0)+(has_sign?0:1);
1690 
1691   if (VM_Version::has_CompareBranch() && is_RelAddr16 && x2_imm8) {
1692     switch (casenum) {
1693       case 0: z_cij( r1, x2, cond, branch_target); break;
1694       case 1: z_clij(r1, x2, cond, branch_target); break;
1695       case 2: z_cgij(r1, x2, cond, branch_target); break;
1696       case 3: z_clgij(r1, x2, cond, branch_target); break;
1697       default: ShouldNotReachHere(); break;
1698     }
1699     return;
1700   }
1701 
1702   if (x2 == 0) {
1703     switch (casenum) {
1704       case 0: z_ltr(r1, r1); break;
1705       case 1: z_ltr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication!
1706       case 2: z_ltgr(r1, r1); break;
1707       case 3: z_ltgr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication!
1708       default: ShouldNotReachHere(); break;
1709     }
1710   } else {
1711     if ((has_sign && Immediate::is_simm16(x2)) || (!has_sign && Immediate::is_uimm(x2, 15))) {
1712       switch (casenum) {
1713         case 0: z_chi(r1, x2); break;
1714         case 1: z_chi(r1, x2); break; // positive immediate < 2**15
1715         case 2: z_cghi(r1, x2); break;
1716         case 3: z_cghi(r1, x2); break; // positive immediate < 2**15
1717         default: break;
1718       }
1719     } else if ( (has_sign && Immediate::is_simm32(x2)) || (!has_sign && Immediate::is_uimm32(x2)) ) {
1720       switch (casenum) {
1721         case 0: z_cfi( r1, x2); break;
1722         case 1: z_clfi(r1, x2); break;
1723         case 2: z_cgfi(r1, x2); break;
1724         case 3: z_clgfi(r1, x2); break;
1725         default: ShouldNotReachHere(); break;
1726       }
1727     } else {
1728       // No instruction with immediate operand possible, so load into register.
1729       Register scratch = (r1 != Z_R0) ? Z_R0 : Z_R1;
1730       load_const_optimized(scratch, x2);
1731       switch (casenum) {
1732         case 0: z_cr( r1, scratch); break;
1733         case 1: z_clr(r1, scratch); break;
1734         case 2: z_cgr(r1, scratch); break;
1735         case 3: z_clgr(r1, scratch); break;
1736         default: ShouldNotReachHere(); break;
1737       }
1738     }
1739   }
1740   branch_optimized(cond, branch_target);
1741 }
1742 
1743 // Generate an optimal compare and branch to the branch target.
1744 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the
1745 // branch distance is short enough. Loading the target address into a
1746 // register and branching via reg is used as fallback only.
1747 //
1748 // Input:
1749 //   r1 - left compare operand
1750 //   r2 - right compare operand
1751 void MacroAssembler::compare_and_branch_optimized(Register r1,
1752                                                   Register r2,
1753                                                   Assembler::branch_condition cond,
1754                                                   Label&   branch_target,
1755                                                   bool     len64,
1756                                                   bool     has_sign) {
1757   unsigned int casenum = (len64 ? 2 : 0) + (has_sign ? 0 : 1);
1758 
1759   if (branch_target.is_bound()) {
1760     address branch_addr = target(branch_target);
1761     compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign);
1762   } else {
1763     if (VM_Version::has_CompareBranch() && branch_target.is_near()) {
1764       switch (casenum) {
1765         case 0: z_crj(  r1, r2, cond, branch_target); break;
1766         case 1: z_clrj( r1, r2, cond, branch_target); break;
1767         case 2: z_cgrj( r1, r2, cond, branch_target); break;
1768         case 3: z_clgrj(r1, r2, cond, branch_target); break;
1769         default: ShouldNotReachHere(); break;
1770       }
1771     } else {
1772       switch (casenum) {
1773         case 0: z_cr( r1, r2); break;
1774         case 1: z_clr(r1, r2); break;
1775         case 2: z_cgr(r1, r2); break;
1776         case 3: z_clgr(r1, r2); break;
1777         default: ShouldNotReachHere(); break;
1778       }
1779       branch_optimized(cond, branch_target);
1780     }
1781   }
1782 }
1783 
1784 //===========================================================================
1785 //===   END     H I G H E R   L E V E L   B R A N C H   E M I T T E R S   ===
1786 //===========================================================================
1787 
1788 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
1789   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1790   int index = oop_recorder()->allocate_metadata_index(obj);
1791   RelocationHolder rspec = metadata_Relocation::spec(index);
1792   return AddressLiteral((address)obj, rspec);
1793 }
1794 
1795 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
1796   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1797   int index = oop_recorder()->find_index(obj);
1798   RelocationHolder rspec = metadata_Relocation::spec(index);
1799   return AddressLiteral((address)obj, rspec);
1800 }
1801 
1802 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
1803   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1804   int oop_index = oop_recorder()->allocate_oop_index(obj);
1805   return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
1806 }
1807 
1808 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
1809   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1810   int oop_index = oop_recorder()->find_index(obj);
1811   return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
1812 }
1813 
1814 // NOTE: destroys r
1815 void MacroAssembler::c2bool(Register r, Register t) {
1816   z_lcr(t, r);   // t = -r
1817   z_or(r, t);    // r = -r OR r
1818   z_srl(r, 31);  // Yields 0 if r was 0, 1 otherwise.
1819 }
1820 
1821 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
1822                                                       Register tmp,
1823                                                       int offset) {
1824   intptr_t value = *delayed_value_addr;
1825   if (value != 0) {
1826     return RegisterOrConstant(value + offset);
1827   }
1828 
1829   BLOCK_COMMENT("delayed_value {");
1830   // Load indirectly to solve generation ordering problem.
1831   load_absolute_address(tmp, (address) delayed_value_addr); // tmp = a;
1832   z_lg(tmp, 0, tmp);                   // tmp = *tmp;
1833 
1834 #ifdef ASSERT
1835   NearLabel L;
1836   compare64_and_branch(tmp, (intptr_t)0L, Assembler::bcondNotEqual, L);
1837   z_illtrap();
1838   bind(L);
1839 #endif
1840 
1841   if (offset != 0) {
1842     z_agfi(tmp, offset);               // tmp = tmp + offset;
1843   }
1844 
1845   BLOCK_COMMENT("} delayed_value");
1846   return RegisterOrConstant(tmp);
1847 }
1848 
1849 // Patch instruction `inst' at offset `inst_pos' to refer to `dest_pos'
1850 // and return the resulting instruction.
1851 // Dest_pos and inst_pos are 32 bit only. These parms can only designate
1852 // relative positions.
1853 // Use correct argument types. Do not pre-calculate distance.
1854 unsigned long MacroAssembler::patched_branch(address dest_pos, unsigned long inst, address inst_pos) {
1855   int c = 0;
1856   unsigned long patched_inst = 0;
1857   if (is_call_pcrelative_short(inst) ||
1858       is_branch_pcrelative_short(inst) ||
1859       is_branchoncount_pcrelative_short(inst) ||
1860       is_branchonindex32_pcrelative_short(inst)) {
1861     c = 1;
1862     int m = fmask(15, 0);    // simm16(-1, 16, 32);
1863     int v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 32);
1864     patched_inst = (inst & ~m) | v;
1865   } else if (is_compareandbranch_pcrelative_short(inst)) {
1866     c = 2;
1867     long m = fmask(31, 16);  // simm16(-1, 16, 48);
1868     long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48);
1869     patched_inst = (inst & ~m) | v;
1870   } else if (is_branchonindex64_pcrelative_short(inst)) {
1871     c = 3;
1872     long m = fmask(31, 16);  // simm16(-1, 16, 48);
1873     long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48);
1874     patched_inst = (inst & ~m) | v;
1875   } else if (is_call_pcrelative_long(inst) || is_branch_pcrelative_long(inst)) {
1876     c = 4;
1877     long m = fmask(31, 0);  // simm32(-1, 16, 48);
1878     long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48);
1879     patched_inst = (inst & ~m) | v;
1880   } else if (is_pcrelative_long(inst)) { // These are the non-branch pc-relative instructions.
1881     c = 5;
1882     long m = fmask(31, 0);  // simm32(-1, 16, 48);
1883     long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48);
1884     patched_inst = (inst & ~m) | v;
1885   } else {
1886     print_dbg_msg(tty, inst, "not a relative branch", 0);
1887     dump_code_range(tty, inst_pos, 32, "not a pcrelative branch");
1888     ShouldNotReachHere();
1889   }
1890 
1891   long new_off = get_pcrel_offset(patched_inst);
1892   if (new_off != (dest_pos-inst_pos)) {
1893     tty->print_cr("case %d: dest_pos = %p, inst_pos = %p, disp = %ld(%12.12lx)", c, dest_pos, inst_pos, new_off, new_off);
1894     print_dbg_msg(tty, inst,         "<- original instruction: branch patching error", 0);
1895     print_dbg_msg(tty, patched_inst, "<- patched  instruction: branch patching error", 0);
1896 #ifdef LUCY_DBG
1897     VM_Version::z_SIGSEGV();
1898 #endif
1899     ShouldNotReachHere();
1900   }
1901   return patched_inst;
1902 }
1903 
1904 // Only called when binding labels (share/vm/asm/assembler.cpp)
1905 // Pass arguments as intended. Do not pre-calculate distance.
1906 void MacroAssembler::pd_patch_instruction(address branch, address target) {
1907   unsigned long stub_inst;
1908   int           inst_len = get_instruction(branch, &stub_inst);
1909 
1910   set_instruction(branch, patched_branch(target, stub_inst, branch), inst_len);
1911 }
1912 
1913 
1914 // Extract relative address (aka offset).
1915 // inv_simm16 works for 4-byte instructions only.
1916 // compare and branch instructions are 6-byte and have a 16bit offset "in the middle".
1917 long MacroAssembler::get_pcrel_offset(unsigned long inst) {
1918 
1919   if (MacroAssembler::is_pcrelative_short(inst)) {
1920     if (((inst&0xFFFFffff00000000UL) == 0) && ((inst&0x00000000FFFF0000UL) != 0)) {
1921       return RelAddr::inv_pcrel_off16(inv_simm16(inst));
1922     } else {
1923       return RelAddr::inv_pcrel_off16(inv_simm16_48(inst));
1924     }
1925   }
1926 
1927   if (MacroAssembler::is_pcrelative_long(inst)) {
1928     return RelAddr::inv_pcrel_off32(inv_simm32(inst));
1929   }
1930 
1931   print_dbg_msg(tty, inst, "not a pcrelative instruction", 6);
1932 #ifdef LUCY_DBG
1933   VM_Version::z_SIGSEGV();
1934 #else
1935   ShouldNotReachHere();
1936 #endif
1937   return -1;
1938 }
1939 
1940 long MacroAssembler::get_pcrel_offset(address pc) {
1941   unsigned long inst;
1942   unsigned int  len = get_instruction(pc, &inst);
1943 
1944 #ifdef ASSERT
1945   long offset;
1946   if (MacroAssembler::is_pcrelative_short(inst) || MacroAssembler::is_pcrelative_long(inst)) {
1947     offset = get_pcrel_offset(inst);
1948   } else {
1949     offset = -1;
1950   }
1951 
1952   if (offset == -1) {
1953     dump_code_range(tty, pc, 32, "not a pcrelative instruction");
1954 #ifdef LUCY_DBG
1955     VM_Version::z_SIGSEGV();
1956 #else
1957     ShouldNotReachHere();
1958 #endif
1959   }
1960   return offset;
1961 #else
1962   return get_pcrel_offset(inst);
1963 #endif // ASSERT
1964 }
1965 
1966 // Get target address from pc-relative instructions.
1967 address MacroAssembler::get_target_addr_pcrel(address pc) {
1968   assert(is_pcrelative_long(pc), "not a pcrelative instruction");
1969   return pc + get_pcrel_offset(pc);
1970 }
1971 
1972 // Patch pc relative load address.
1973 void MacroAssembler::patch_target_addr_pcrel(address pc, address con) {
1974   unsigned long inst;
1975   // Offset is +/- 2**32 -> use long.
1976   ptrdiff_t distance = con - pc;
1977 
1978   get_instruction(pc, &inst);
1979 
1980   if (is_pcrelative_short(inst)) {
1981     *(short *)(pc+2) = RelAddr::pcrel_off16(con, pc);  // Instructions are at least 2-byte aligned, no test required.
1982 
1983     // Some extra safety net.
1984     if (!RelAddr::is_in_range_of_RelAddr16(distance)) {
1985       print_dbg_msg(tty, inst, "distance out of range (16bit)", 4);
1986       dump_code_range(tty, pc, 32, "distance out of range (16bit)");
1987       guarantee(RelAddr::is_in_range_of_RelAddr16(distance), "too far away (more than +/- 2**16");
1988     }
1989     return;
1990   }
1991 
1992   if (is_pcrelative_long(inst)) {
1993     *(int *)(pc+2)   = RelAddr::pcrel_off32(con, pc);
1994 
1995     // Some Extra safety net.
1996     if (!RelAddr::is_in_range_of_RelAddr32(distance)) {
1997       print_dbg_msg(tty, inst, "distance out of range (32bit)", 6);
1998       dump_code_range(tty, pc, 32, "distance out of range (32bit)");
1999       guarantee(RelAddr::is_in_range_of_RelAddr32(distance), "too far away (more than +/- 2**32");
2000     }
2001     return;
2002   }
2003 
2004   guarantee(false, "not a pcrelative instruction to patch!");
2005 }
2006 
2007 // "Current PC" here means the address just behind the basr instruction.
2008 address MacroAssembler::get_PC(Register result) {
2009   z_basr(result, Z_R0); // Don't branch, just save next instruction address in result.
2010   return pc();
2011 }
2012 
2013 // Get current PC + offset.
2014 // Offset given in bytes, must be even!
2015 // "Current PC" here means the address of the larl instruction plus the given offset.
2016 address MacroAssembler::get_PC(Register result, int64_t offset) {
2017   address here = pc();
2018   z_larl(result, offset/2); // Save target instruction address in result.
2019   return here + offset;
2020 }
2021 
2022 // Resize_frame with SP(new) = SP(old) - [offset].
2023 void MacroAssembler::resize_frame_sub(Register offset, Register fp, bool load_fp)
2024 {
2025   assert_different_registers(offset, fp, Z_SP);
2026   if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); }
2027 
2028   z_sgr(Z_SP, offset);
2029   z_stg(fp, _z_abi(callers_sp), Z_SP);
2030 }
2031 
2032 // Resize_frame with SP(new) = [newSP] + offset.
2033 //   This emitter is useful if we already have calculated a pointer
2034 //   into the to-be-allocated stack space, e.g. with special alignment properties,
2035 //   but need some additional space, e.g. for spilling.
2036 //   newSP    is the pre-calculated pointer. It must not be modified.
2037 //   fp       holds, or is filled with, the frame pointer.
2038 //   offset   is the additional increment which is added to addr to form the new SP.
2039 //            Note: specify a negative value to reserve more space!
2040 //   load_fp == true  only indicates that fp is not pre-filled with the frame pointer.
2041 //                    It does not guarantee that fp contains the frame pointer at the end.
2042 void MacroAssembler::resize_frame_abs_with_offset(Register newSP, Register fp, int offset, bool load_fp) {
2043   assert_different_registers(newSP, fp, Z_SP);
2044 
2045   if (load_fp) {
2046     z_lg(fp, _z_abi(callers_sp), Z_SP);
2047   }
2048 
2049   add2reg(Z_SP, offset, newSP);
2050   z_stg(fp, _z_abi(callers_sp), Z_SP);
2051 }
2052 
2053 // Resize_frame with SP(new) = [newSP].
2054 //   load_fp == true  only indicates that fp is not pre-filled with the frame pointer.
2055 //                    It does not guarantee that fp contains the frame pointer at the end.
2056 void MacroAssembler::resize_frame_absolute(Register newSP, Register fp, bool load_fp) {
2057   assert_different_registers(newSP, fp, Z_SP);
2058 
2059   if (load_fp) {
2060     z_lg(fp, _z_abi(callers_sp), Z_SP); // need to use load/store.
2061   }
2062 
2063   z_lgr(Z_SP, newSP);
2064   if (newSP != Z_R0) { // make sure we generate correct code, no matter what register newSP uses.
2065     z_stg(fp, _z_abi(callers_sp), newSP);
2066   } else {
2067     z_stg(fp, _z_abi(callers_sp), Z_SP);
2068   }
2069 }
2070 
2071 // Resize_frame with SP(new) = SP(old) + offset.
2072 void MacroAssembler::resize_frame(RegisterOrConstant offset, Register fp, bool load_fp) {
2073   assert_different_registers(fp, Z_SP);
2074 
2075   if (load_fp) {
2076     z_lg(fp, _z_abi(callers_sp), Z_SP);
2077   }
2078   add64(Z_SP, offset);
2079   z_stg(fp, _z_abi(callers_sp), Z_SP);
2080 }
2081 
2082 void MacroAssembler::push_frame(Register bytes, Register old_sp, bool copy_sp, bool bytes_with_inverted_sign) {
2083 #ifdef ASSERT
2084   assert_different_registers(bytes, old_sp, Z_SP);
2085   if (!copy_sp) {
2086     z_cgr(old_sp, Z_SP);
2087     asm_assert_eq("[old_sp]!=[Z_SP]", 0x211);
2088   }
2089 #endif
2090   if (copy_sp) { z_lgr(old_sp, Z_SP); }
2091   if (bytes_with_inverted_sign) {
2092     z_agr(Z_SP, bytes);
2093   } else {
2094     z_sgr(Z_SP, bytes); // Z_sgfr sufficient, but probably not faster.
2095   }
2096   z_stg(old_sp, _z_abi(callers_sp), Z_SP);
2097 }
2098 
2099 unsigned int MacroAssembler::push_frame(unsigned int bytes, Register scratch) {
2100   long offset = Assembler::align(bytes, frame::alignment_in_bytes);
2101   assert(offset > 0, "should push a frame with positive size, size = %ld.", offset);
2102   assert(Displacement::is_validDisp(-offset), "frame size out of range, size = %ld", offset);
2103 
2104   // We must not write outside the current stack bounds (given by Z_SP).
2105   // Thus, we have to first update Z_SP and then store the previous SP as stack linkage.
2106   // We rely on Z_R0 by default to be available as scratch.
2107   z_lgr(scratch, Z_SP);
2108   add2reg(Z_SP, -offset);
2109   z_stg(scratch, _z_abi(callers_sp), Z_SP);
2110 #ifdef ASSERT
2111   // Just make sure nobody uses the value in the default scratch register.
2112   // When another register is used, the caller might rely on it containing the frame pointer.
2113   if (scratch == Z_R0) {
2114     z_iihf(scratch, 0xbaadbabe);
2115     z_iilf(scratch, 0xdeadbeef);
2116   }
2117 #endif
2118   return offset;
2119 }
2120 
2121 // Push a frame of size `bytes' plus abi160 on top.
2122 unsigned int MacroAssembler::push_frame_abi160(unsigned int bytes) {
2123   BLOCK_COMMENT("push_frame_abi160 {");
2124   unsigned int res = push_frame(bytes + frame::z_abi_160_size);
2125   BLOCK_COMMENT("} push_frame_abi160");
2126   return res;
2127 }
2128 
2129 // Pop current C frame.
2130 void MacroAssembler::pop_frame() {
2131   BLOCK_COMMENT("pop_frame:");
2132   Assembler::z_lg(Z_SP, _z_abi(callers_sp), Z_SP);
2133 }
2134 
2135 // Pop current C frame and restore return PC register (Z_R14).
2136 void MacroAssembler::pop_frame_restore_retPC(int frame_size_in_bytes) {
2137   BLOCK_COMMENT("pop_frame_restore_retPC:");
2138   int retPC_offset = _z_abi16(return_pc) + frame_size_in_bytes;
2139   // If possible, pop frame by add instead of load (a penny saved is a penny got :-).
2140   if (Displacement::is_validDisp(retPC_offset)) {
2141     z_lg(Z_R14, retPC_offset, Z_SP);
2142     add2reg(Z_SP, frame_size_in_bytes);
2143   } else {
2144     add2reg(Z_SP, frame_size_in_bytes);
2145     restore_return_pc();
2146   }
2147 }
2148 
2149 void MacroAssembler::call_VM_leaf_base(address entry_point, bool allow_relocation) {
2150   if (allow_relocation) {
2151     call_c(entry_point);
2152   } else {
2153     call_c_static(entry_point);
2154   }
2155 }
2156 
2157 void MacroAssembler::call_VM_leaf_base(address entry_point) {
2158   bool allow_relocation = true;
2159   call_VM_leaf_base(entry_point, allow_relocation);
2160 }
2161 
2162 void MacroAssembler::call_VM_base(Register oop_result,
2163                                   Register last_java_sp,
2164                                   address  entry_point,
2165                                   bool     allow_relocation,
2166                                   bool     check_exceptions) { // Defaults to true.
2167   // Allow_relocation indicates, if true, that the generated code shall
2168   // be fit for code relocation or referenced data relocation. In other
2169   // words: all addresses must be considered variable. PC-relative addressing
2170   // is not possible then.
2171   // On the other hand, if (allow_relocation == false), addresses and offsets
2172   // may be considered stable, enabling us to take advantage of some PC-relative
2173   // addressing tweaks. These might improve performance and reduce code size.
2174 
2175   // Determine last_java_sp register.
2176   if (!last_java_sp->is_valid()) {
2177     last_java_sp = Z_SP;  // Load Z_SP as SP.
2178   }
2179 
2180   set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, Z_R1, allow_relocation);
2181 
2182   // ARG1 must hold thread address.
2183   z_lgr(Z_ARG1, Z_thread);
2184 
2185   address return_pc = NULL;
2186   if (allow_relocation) {
2187     return_pc = call_c(entry_point);
2188   } else {
2189     return_pc = call_c_static(entry_point);
2190   }
2191 
2192   reset_last_Java_frame(allow_relocation);
2193 
2194   // C++ interp handles this in the interpreter.
2195   check_and_handle_popframe(Z_thread);
2196   check_and_handle_earlyret(Z_thread);
2197 
2198   // Check for pending exceptions.
2199   if (check_exceptions) {
2200     // Check for pending exceptions (java_thread is set upon return).
2201     load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset()));
2202 
2203     // This used to conditionally jump to forward_exception however it is
2204     // possible if we relocate that the branch will not reach. So we must jump
2205     // around so we can always reach.
2206 
2207     Label ok;
2208     z_bre(ok); // Bcondequal is the same as bcondZero.
2209     call_stub(StubRoutines::forward_exception_entry());
2210     bind(ok);
2211   }
2212 
2213   // Get oop result if there is one and reset the value in the thread.
2214   if (oop_result->is_valid()) {
2215     get_vm_result(oop_result);
2216   }
2217 
2218   _last_calls_return_pc = return_pc;  // Wipe out other (error handling) calls.
2219 }
2220 
2221 void MacroAssembler::call_VM_base(Register oop_result,
2222                                   Register last_java_sp,
2223                                   address  entry_point,
2224                                   bool     check_exceptions) { // Defaults to true.
2225   bool allow_relocation = true;
2226   call_VM_base(oop_result, last_java_sp, entry_point, allow_relocation, check_exceptions);
2227 }
2228 
2229 // VM calls without explicit last_java_sp.
2230 
2231 void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
2232   // Call takes possible detour via InterpreterMacroAssembler.
2233   call_VM_base(oop_result, noreg, entry_point, true, check_exceptions);
2234 }
2235 
2236 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
2237   // Z_ARG1 is reserved for the thread.
2238   lgr_if_needed(Z_ARG2, arg_1);
2239   call_VM(oop_result, entry_point, check_exceptions);
2240 }
2241 
2242 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
2243   // Z_ARG1 is reserved for the thread.
2244   lgr_if_needed(Z_ARG2, arg_1);
2245   assert(arg_2 != Z_ARG2, "smashed argument");
2246   lgr_if_needed(Z_ARG3, arg_2);
2247   call_VM(oop_result, entry_point, check_exceptions);
2248 }
2249 
2250 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2,
2251                              Register arg_3, bool check_exceptions) {
2252   // Z_ARG1 is reserved for the thread.
2253   lgr_if_needed(Z_ARG2, arg_1);
2254   assert(arg_2 != Z_ARG2, "smashed argument");
2255   lgr_if_needed(Z_ARG3, arg_2);
2256   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2257   lgr_if_needed(Z_ARG4, arg_3);
2258   call_VM(oop_result, entry_point, check_exceptions);
2259 }
2260 
2261 // VM static calls without explicit last_java_sp.
2262 
2263 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, bool check_exceptions) {
2264   // Call takes possible detour via InterpreterMacroAssembler.
2265   call_VM_base(oop_result, noreg, entry_point, false, check_exceptions);
2266 }
2267 
2268 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2,
2269                                     Register arg_3, bool check_exceptions) {
2270   // Z_ARG1 is reserved for the thread.
2271   lgr_if_needed(Z_ARG2, arg_1);
2272   assert(arg_2 != Z_ARG2, "smashed argument");
2273   lgr_if_needed(Z_ARG3, arg_2);
2274   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2275   lgr_if_needed(Z_ARG4, arg_3);
2276   call_VM_static(oop_result, entry_point, check_exceptions);
2277 }
2278 
2279 // VM calls with explicit last_java_sp.
2280 
2281 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions) {
2282   // Call takes possible detour via InterpreterMacroAssembler.
2283   call_VM_base(oop_result, last_java_sp, entry_point, true, check_exceptions);
2284 }
2285 
2286 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
2287    // Z_ARG1 is reserved for the thread.
2288    lgr_if_needed(Z_ARG2, arg_1);
2289    call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2290 }
2291 
2292 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1,
2293                              Register arg_2, bool check_exceptions) {
2294    // Z_ARG1 is reserved for the thread.
2295    lgr_if_needed(Z_ARG2, arg_1);
2296    assert(arg_2 != Z_ARG2, "smashed argument");
2297    lgr_if_needed(Z_ARG3, arg_2);
2298    call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2299 }
2300 
2301 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1,
2302                              Register arg_2, Register arg_3, bool check_exceptions) {
2303   // Z_ARG1 is reserved for the thread.
2304   lgr_if_needed(Z_ARG2, arg_1);
2305   assert(arg_2 != Z_ARG2, "smashed argument");
2306   lgr_if_needed(Z_ARG3, arg_2);
2307   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2308   lgr_if_needed(Z_ARG4, arg_3);
2309   call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2310 }
2311 
2312 // VM leaf calls.
2313 
2314 void MacroAssembler::call_VM_leaf(address entry_point) {
2315   // Call takes possible detour via InterpreterMacroAssembler.
2316   call_VM_leaf_base(entry_point, true);
2317 }
2318 
2319 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {
2320   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2321   call_VM_leaf(entry_point);
2322 }
2323 
2324 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
2325   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2326   assert(arg_2 != Z_ARG1, "smashed argument");
2327   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2328   call_VM_leaf(entry_point);
2329 }
2330 
2331 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
2332   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2333   assert(arg_2 != Z_ARG1, "smashed argument");
2334   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2335   assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument");
2336   if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3);
2337   call_VM_leaf(entry_point);
2338 }
2339 
2340 // Static VM leaf calls.
2341 // Really static VM leaf calls are never patched.
2342 
2343 void MacroAssembler::call_VM_leaf_static(address entry_point) {
2344   // Call takes possible detour via InterpreterMacroAssembler.
2345   call_VM_leaf_base(entry_point, false);
2346 }
2347 
2348 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1) {
2349   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2350   call_VM_leaf_static(entry_point);
2351 }
2352 
2353 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2) {
2354   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2355   assert(arg_2 != Z_ARG1, "smashed argument");
2356   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2357   call_VM_leaf_static(entry_point);
2358 }
2359 
2360 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
2361   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2362   assert(arg_2 != Z_ARG1, "smashed argument");
2363   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2364   assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument");
2365   if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3);
2366   call_VM_leaf_static(entry_point);
2367 }
2368 
2369 // Don't use detour via call_c(reg).
2370 address MacroAssembler::call_c(address function_entry) {
2371   load_const(Z_R1, function_entry);
2372   return call(Z_R1);
2373 }
2374 
2375 // Variant for really static (non-relocatable) calls which are never patched.
2376 address MacroAssembler::call_c_static(address function_entry) {
2377   load_absolute_address(Z_R1, function_entry);
2378 #if 0 // def ASSERT
2379   // Verify that call site did not move.
2380   load_const_optimized(Z_R0, function_entry);
2381   z_cgr(Z_R1, Z_R0);
2382   z_brc(bcondEqual, 3);
2383   z_illtrap(0xba);
2384 #endif
2385   return call(Z_R1);
2386 }
2387 
2388 address MacroAssembler::call_c_opt(address function_entry) {
2389   bool success = call_far_patchable(function_entry, -2 /* emit relocation + constant */);
2390   _last_calls_return_pc = success ? pc() : NULL;
2391   return _last_calls_return_pc;
2392 }
2393 
2394 // Identify a call_far_patchable instruction: LARL + LG + BASR
2395 //
2396 //    nop                   ; optionally, if required for alignment
2397 //    lgrl rx,A(TOC entry)  ; PC-relative access into constant pool
2398 //    basr Z_R14,rx         ; end of this instruction must be aligned to a word boundary
2399 //
2400 // Code pattern will eventually get patched into variant2 (see below for detection code).
2401 //
2402 bool MacroAssembler::is_call_far_patchable_variant0_at(address instruction_addr) {
2403   address iaddr = instruction_addr;
2404 
2405   // Check for the actual load instruction.
2406   if (!is_load_const_from_toc(iaddr)) { return false; }
2407   iaddr += load_const_from_toc_size();
2408 
2409   // Check for the call (BASR) instruction, finally.
2410   assert(iaddr-instruction_addr+call_byregister_size() == call_far_patchable_size(), "size mismatch");
2411   return is_call_byregister(iaddr);
2412 }
2413 
2414 // Identify a call_far_patchable instruction: BRASL
2415 //
2416 // Code pattern to suits atomic patching:
2417 //    nop                       ; Optionally, if required for alignment.
2418 //    nop    ...                ; Multiple filler nops to compensate for size difference (variant0 is longer).
2419 //    nop                       ; For code pattern detection: Prepend each BRASL with a nop.
2420 //    brasl  Z_R14,<reladdr>    ; End of code must be 4-byte aligned !
2421 bool MacroAssembler::is_call_far_patchable_variant2_at(address instruction_addr) {
2422   const address call_addr = (address)((intptr_t)instruction_addr + call_far_patchable_size() - call_far_pcrelative_size());
2423 
2424   // Check for correct number of leading nops.
2425   address iaddr;
2426   for (iaddr = instruction_addr; iaddr < call_addr; iaddr += nop_size()) {
2427     if (!is_z_nop(iaddr)) { return false; }
2428   }
2429   assert(iaddr == call_addr, "sanity");
2430 
2431   // --> Check for call instruction.
2432   if (is_call_far_pcrelative(call_addr)) {
2433     assert(call_addr-instruction_addr+call_far_pcrelative_size() == call_far_patchable_size(), "size mismatch");
2434     return true;
2435   }
2436 
2437   return false;
2438 }
2439 
2440 // Emit a NOT mt-safely patchable 64 bit absolute call.
2441 // If toc_offset == -2, then the destination of the call (= target) is emitted
2442 //                      to the constant pool and a runtime_call relocation is added
2443 //                      to the code buffer.
2444 // If toc_offset != -2, target must already be in the constant pool at
2445 //                      _ctableStart+toc_offset (a caller can retrieve toc_offset
2446 //                      from the runtime_call relocation).
2447 // Special handling of emitting to scratch buffer when there is no constant pool.
2448 // Slightly changed code pattern. We emit an additional nop if we would
2449 // not end emitting at a word aligned address. This is to ensure
2450 // an atomically patchable displacement in brasl instructions.
2451 //
2452 // A call_far_patchable comes in different flavors:
2453 //  - LARL(CP) / LG(CP) / BR (address in constant pool, access via CP register)
2454 //  - LGRL(CP) / BR          (address in constant pool, pc-relative accesss)
2455 //  - BRASL                  (relative address of call target coded in instruction)
2456 // All flavors occupy the same amount of space. Length differences are compensated
2457 // by leading nops, such that the instruction sequence always ends at the same
2458 // byte offset. This is required to keep the return offset constant.
2459 // Furthermore, the return address (the end of the instruction sequence) is forced
2460 // to be on a 4-byte boundary. This is required for atomic patching, should we ever
2461 // need to patch the call target of the BRASL flavor.
2462 // RETURN value: false, if no constant pool entry could be allocated, true otherwise.
2463 bool MacroAssembler::call_far_patchable(address target, int64_t tocOffset) {
2464   // Get current pc and ensure word alignment for end of instr sequence.
2465   const address start_pc = pc();
2466   const intptr_t       start_off = offset();
2467   assert(!call_far_patchable_requires_alignment_nop(start_pc), "call_far_patchable requires aligned address");
2468   const ptrdiff_t      dist      = (ptrdiff_t)(target - (start_pc + 2)); // Prepend each BRASL with a nop.
2469   const bool emit_target_to_pool = (tocOffset == -2) && !code_section()->scratch_emit();
2470   const bool emit_relative_call  = !emit_target_to_pool &&
2471                                    RelAddr::is_in_range_of_RelAddr32(dist) &&
2472                                    ReoptimizeCallSequences &&
2473                                    !code_section()->scratch_emit();
2474 
2475   if (emit_relative_call) {
2476     // Add padding to get the same size as below.
2477     const unsigned int padding = call_far_patchable_size() - call_far_pcrelative_size();
2478     unsigned int current_padding;
2479     for (current_padding = 0; current_padding < padding; current_padding += nop_size()) { z_nop(); }
2480     assert(current_padding == padding, "sanity");
2481 
2482     // relative call: len = 2(nop) + 6 (brasl)
2483     // CodeBlob resize cannot occur in this case because
2484     // this call is emitted into pre-existing space.
2485     z_nop(); // Prepend each BRASL with a nop.
2486     z_brasl(Z_R14, target);
2487   } else {
2488     // absolute call: Get address from TOC.
2489     // len = (load TOC){6|0} + (load from TOC){6} + (basr){2} = {14|8}
2490     if (emit_target_to_pool) {
2491       // When emitting the call for the first time, we do not need to use
2492       // the pc-relative version. It will be patched anyway, when the code
2493       // buffer is copied.
2494       // Relocation is not needed when !ReoptimizeCallSequences.
2495       relocInfo::relocType rt = ReoptimizeCallSequences ? relocInfo::runtime_call_w_cp_type : relocInfo::none;
2496       AddressLiteral dest(target, rt);
2497       // Store_oop_in_toc() adds dest to the constant table. As side effect, this kills
2498       // inst_mark(). Reset if possible.
2499       bool reset_mark = (inst_mark() == pc());
2500       tocOffset = store_oop_in_toc(dest);
2501       if (reset_mark) { set_inst_mark(); }
2502       if (tocOffset == -1) {
2503         return false; // Couldn't create constant pool entry.
2504       }
2505     }
2506     assert(offset() == start_off, "emit no code before this point!");
2507 
2508     address tocPos = pc() + tocOffset;
2509     if (emit_target_to_pool) {
2510       tocPos = code()->consts()->start() + tocOffset;
2511     }
2512     load_long_pcrelative(Z_R14, tocPos);
2513     z_basr(Z_R14, Z_R14);
2514   }
2515 
2516 #ifdef ASSERT
2517   // Assert that we can identify the emitted call.
2518   assert(is_call_far_patchable_at(addr_at(start_off)), "can't identify emitted call");
2519   assert(offset() == start_off+call_far_patchable_size(), "wrong size");
2520 
2521   if (emit_target_to_pool) {
2522     assert(get_dest_of_call_far_patchable_at(addr_at(start_off), code()->consts()->start()) == target,
2523            "wrong encoding of dest address");
2524   }
2525 #endif
2526   return true; // success
2527 }
2528 
2529 // Identify a call_far_patchable instruction.
2530 // For more detailed information see header comment of call_far_patchable.
2531 bool MacroAssembler::is_call_far_patchable_at(address instruction_addr) {
2532   return is_call_far_patchable_variant2_at(instruction_addr)  || // short version: BRASL
2533          is_call_far_patchable_variant0_at(instruction_addr);    // long version LARL + LG + BASR
2534 }
2535 
2536 // Does the call_far_patchable instruction use a pc-relative encoding
2537 // of the call destination?
2538 bool MacroAssembler::is_call_far_patchable_pcrelative_at(address instruction_addr) {
2539   // Variant 2 is pc-relative.
2540   return is_call_far_patchable_variant2_at(instruction_addr);
2541 }
2542 
2543 bool MacroAssembler::is_call_far_pcrelative(address instruction_addr) {
2544   // Prepend each BRASL with a nop.
2545   return is_z_nop(instruction_addr) && is_z_brasl(instruction_addr + nop_size());  // Match at position after one nop required.
2546 }
2547 
2548 // Set destination address of a call_far_patchable instruction.
2549 void MacroAssembler::set_dest_of_call_far_patchable_at(address instruction_addr, address dest, int64_t tocOffset) {
2550   ResourceMark rm;
2551 
2552   // Now that CP entry is verified, patch call to a pc-relative call (if circumstances permit).
2553   int code_size = MacroAssembler::call_far_patchable_size();
2554   CodeBuffer buf(instruction_addr, code_size);
2555   MacroAssembler masm(&buf);
2556   masm.call_far_patchable(dest, tocOffset);
2557   ICache::invalidate_range(instruction_addr, code_size); // Empty on z.
2558 }
2559 
2560 // Get dest address of a call_far_patchable instruction.
2561 address MacroAssembler::get_dest_of_call_far_patchable_at(address instruction_addr, address ctable) {
2562   // Dynamic TOC: absolute address in constant pool.
2563   // Check variant2 first, it is more frequent.
2564 
2565   // Relative address encoded in call instruction.
2566   if (is_call_far_patchable_variant2_at(instruction_addr)) {
2567     return MacroAssembler::get_target_addr_pcrel(instruction_addr + nop_size()); // Prepend each BRASL with a nop.
2568 
2569   // Absolute address in constant pool.
2570   } else if (is_call_far_patchable_variant0_at(instruction_addr)) {
2571     address iaddr = instruction_addr;
2572 
2573     long    tocOffset = get_load_const_from_toc_offset(iaddr);
2574     address tocLoc    = iaddr + tocOffset;
2575     return *(address *)(tocLoc);
2576   } else {
2577     fprintf(stderr, "MacroAssembler::get_dest_of_call_far_patchable_at has a problem at %p:\n", instruction_addr);
2578     fprintf(stderr, "not a call_far_patchable: %16.16lx %16.16lx, len = %d\n",
2579             *(unsigned long*)instruction_addr,
2580             *(unsigned long*)(instruction_addr+8),
2581             call_far_patchable_size());
2582     Disassembler::decode(instruction_addr, instruction_addr+call_far_patchable_size());
2583     ShouldNotReachHere();
2584     return NULL;
2585   }
2586 }
2587 
2588 void MacroAssembler::align_call_far_patchable(address pc) {
2589   if (call_far_patchable_requires_alignment_nop(pc)) { z_nop(); }
2590 }
2591 
2592 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
2593 }
2594 
2595 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
2596 }
2597 
2598 // Read from the polling page.
2599 // Use TM or TMY instruction, depending on read offset.
2600 //   offset = 0: Use TM, safepoint polling.
2601 //   offset < 0: Use TMY, profiling safepoint polling.
2602 void MacroAssembler::load_from_polling_page(Register polling_page_address, int64_t offset) {
2603   if (Immediate::is_uimm12(offset)) {
2604     z_tm(offset, polling_page_address, mask_safepoint);
2605   } else {
2606     z_tmy(offset, polling_page_address, mask_profiling);
2607   }
2608 }
2609 
2610 // Check whether z_instruction is a read access to the polling page
2611 // which was emitted by load_from_polling_page(..).
2612 bool MacroAssembler::is_load_from_polling_page(address instr_loc) {
2613   unsigned long z_instruction;
2614   unsigned int  ilen = get_instruction(instr_loc, &z_instruction);
2615 
2616   if (ilen == 2) { return false; } // It's none of the allowed instructions.
2617 
2618   if (ilen == 4) {
2619     if (!is_z_tm(z_instruction)) { return false; } // It's len=4, but not a z_tm. fail.
2620 
2621     int ms = inv_mask(z_instruction,8,32);  // mask
2622     int ra = inv_reg(z_instruction,16,32);  // base register
2623     int ds = inv_uimm12(z_instruction);     // displacement
2624 
2625     if (!(ds == 0 && ra != 0 && ms == mask_safepoint)) {
2626       return false; // It's not a z_tm(0, ra, mask_safepoint). Fail.
2627     }
2628 
2629   } else { /* if (ilen == 6) */
2630 
2631     assert(!is_z_lg(z_instruction), "old form (LG) polling page access. Please fix and use TM(Y).");
2632 
2633     if (!is_z_tmy(z_instruction)) { return false; } // It's len=6, but not a z_tmy. fail.
2634 
2635     int ms = inv_mask(z_instruction,8,48);  // mask
2636     int ra = inv_reg(z_instruction,16,48);  // base register
2637     int ds = inv_simm20(z_instruction);     // displacement
2638   }
2639 
2640   return true;
2641 }
2642 
2643 // Extract poll address from instruction and ucontext.
2644 address MacroAssembler::get_poll_address(address instr_loc, void* ucontext) {
2645   assert(ucontext != NULL, "must have ucontext");
2646   ucontext_t* uc = (ucontext_t*) ucontext;
2647   unsigned long z_instruction;
2648   unsigned int ilen = get_instruction(instr_loc, &z_instruction);
2649 
2650   if (ilen == 4 && is_z_tm(z_instruction)) {
2651     int ra = inv_reg(z_instruction, 16, 32);  // base register
2652     int ds = inv_uimm12(z_instruction);       // displacement
2653     address addr = (address)uc->uc_mcontext.gregs[ra];
2654     return addr + ds;
2655   } else if (ilen == 6 && is_z_tmy(z_instruction)) {
2656     int ra = inv_reg(z_instruction, 16, 48);  // base register
2657     int ds = inv_simm20(z_instruction);       // displacement
2658     address addr = (address)uc->uc_mcontext.gregs[ra];
2659     return addr + ds;
2660   }
2661 
2662   ShouldNotReachHere();
2663   return NULL;
2664 }
2665 
2666 // Extract poll register from instruction.
2667 uint MacroAssembler::get_poll_register(address instr_loc) {
2668   unsigned long z_instruction;
2669   unsigned int ilen = get_instruction(instr_loc, &z_instruction);
2670 
2671   if (ilen == 4 && is_z_tm(z_instruction)) {
2672     return (uint)inv_reg(z_instruction, 16, 32);  // base register
2673   } else if (ilen == 6 && is_z_tmy(z_instruction)) {
2674     return (uint)inv_reg(z_instruction, 16, 48);  // base register
2675   }
2676 
2677   ShouldNotReachHere();
2678   return 0;
2679 }
2680 
2681 bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) {
2682   ShouldNotCallThis();
2683   return false;
2684 }
2685 
2686 // Write serialization page so VM thread can do a pseudo remote membar
2687 // We use the current thread pointer to calculate a thread specific
2688 // offset to write to within the page. This minimizes bus traffic
2689 // due to cache line collision.
2690 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
2691   assert_different_registers(tmp1, tmp2);
2692   z_sllg(tmp2, thread, os::get_serialize_page_shift_count());
2693   load_const_optimized(tmp1, (long) os::get_memory_serialize_page());
2694 
2695   int mask = os::get_serialize_page_mask();
2696   if (Immediate::is_uimm16(mask)) {
2697     z_nill(tmp2, mask);
2698     z_llghr(tmp2, tmp2);
2699   } else {
2700     z_nilf(tmp2, mask);
2701     z_llgfr(tmp2, tmp2);
2702   }
2703 
2704   z_release();
2705   z_st(Z_R0, 0, tmp2, tmp1);
2706 }
2707 
2708 // Don't rely on register locking, always use Z_R1 as scratch register instead.
2709 void MacroAssembler::bang_stack_with_offset(int offset) {
2710   // Stack grows down, caller passes positive offset.
2711   assert(offset > 0, "must bang with positive offset");
2712   if (Displacement::is_validDisp(-offset)) {
2713     z_tmy(-offset, Z_SP, mask_stackbang);
2714   } else {
2715     add2reg(Z_R1, -offset, Z_SP);    // Do not destroy Z_SP!!!
2716     z_tm(0, Z_R1, mask_stackbang);  // Just banging.
2717   }
2718 }
2719 
2720 void MacroAssembler::reserved_stack_check(Register return_pc) {
2721   // Test if reserved zone needs to be enabled.
2722   Label no_reserved_zone_enabling;
2723   assert(return_pc == Z_R14, "Return pc must be in R14 before z_br() to StackOverflow stub.");
2724   BLOCK_COMMENT("reserved_stack_check {");
2725 
2726   z_clg(Z_SP, Address(Z_thread, JavaThread::reserved_stack_activation_offset()));
2727   z_brl(no_reserved_zone_enabling);
2728 
2729   // Enable reserved zone again, throw stack overflow exception.
2730   save_return_pc();
2731   push_frame_abi160(0);
2732   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), Z_thread);
2733   pop_frame();
2734   restore_return_pc();
2735 
2736   load_const_optimized(Z_R1, StubRoutines::throw_delayed_StackOverflowError_entry());
2737   // Don't use call() or z_basr(), they will invalidate Z_R14 which contains the return pc.
2738   z_br(Z_R1);
2739 
2740   should_not_reach_here();
2741 
2742   bind(no_reserved_zone_enabling);
2743   BLOCK_COMMENT("} reserved_stack_check");
2744 }
2745 
2746 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
2747 void MacroAssembler::tlab_allocate(Register obj,
2748                                    Register var_size_in_bytes,
2749                                    int con_size_in_bytes,
2750                                    Register t1,
2751                                    Label& slow_case) {
2752   assert_different_registers(obj, var_size_in_bytes, t1);
2753   Register end = t1;
2754   Register thread = Z_thread;
2755 
2756   z_lg(obj, Address(thread, JavaThread::tlab_top_offset()));
2757   if (var_size_in_bytes == noreg) {
2758     z_lay(end, Address(obj, con_size_in_bytes));
2759   } else {
2760     z_lay(end, Address(obj, var_size_in_bytes));
2761   }
2762   z_cg(end, Address(thread, JavaThread::tlab_end_offset()));
2763   branch_optimized(bcondHigh, slow_case);
2764 
2765   // Update the tlab top pointer.
2766   z_stg(end, Address(thread, JavaThread::tlab_top_offset()));
2767 
2768   // Recover var_size_in_bytes if necessary.
2769   if (var_size_in_bytes == end) {
2770     z_sgr(var_size_in_bytes, obj);
2771   }
2772 }
2773 
2774 // Emitter for interface method lookup.
2775 //   input: recv_klass, intf_klass, itable_index
2776 //   output: method_result
2777 //   kills: itable_index, temp1_reg, Z_R0, Z_R1
2778 // TODO: Temp2_reg is unused. we may use this emitter also in the itable stubs.
2779 // If the register is still not needed then, remove it.
2780 void MacroAssembler::lookup_interface_method(Register           recv_klass,
2781                                              Register           intf_klass,
2782                                              RegisterOrConstant itable_index,
2783                                              Register           method_result,
2784                                              Register           temp1_reg,
2785                                              Register           temp2_reg,
2786                                              Label&             no_such_interface) {
2787 
2788   const Register vtable_len = temp1_reg;    // Used to compute itable_entry_addr.
2789   const Register itable_entry_addr = Z_R1_scratch;
2790   const Register itable_interface = Z_R0_scratch;
2791 
2792   BLOCK_COMMENT("lookup_interface_method {");
2793 
2794   // Load start of itable entries into itable_entry_addr.
2795   z_llgf(vtable_len, Address(recv_klass, Klass::vtable_length_offset()));
2796   z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
2797 
2798   // Loop over all itable entries until desired interfaceOop(Rinterface) found.
2799   const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
2800 
2801   add2reg_with_index(itable_entry_addr,
2802                      vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(),
2803                      recv_klass, vtable_len);
2804 
2805   const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize;
2806   Label     search;
2807 
2808   bind(search);
2809 
2810   // Handle IncompatibleClassChangeError.
2811   // If the entry is NULL then we've reached the end of the table
2812   // without finding the expected interface, so throw an exception.
2813   load_and_test_long(itable_interface, Address(itable_entry_addr));
2814   z_bre(no_such_interface);
2815 
2816   add2reg(itable_entry_addr, itable_offset_search_inc);
2817   z_cgr(itable_interface, intf_klass);
2818   z_brne(search);
2819 
2820   // Entry found and itable_entry_addr points to it, get offset of vtable for interface.
2821 
2822   const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() -
2823                                     itableOffsetEntry::interface_offset_in_bytes()) -
2824                                    itable_offset_search_inc;
2825 
2826   // Compute itableMethodEntry and get method and entry point
2827   // we use addressing with index and displacement, since the formula
2828   // for computing the entry's offset has a fixed and a dynamic part,
2829   // the latter depending on the matched interface entry and on the case,
2830   // that the itable index has been passed as a register, not a constant value.
2831   int method_offset = itableMethodEntry::method_offset_in_bytes();
2832                            // Fixed part (displacement), common operand.
2833   Register itable_offset;  // Dynamic part (index register).
2834 
2835   if (itable_index.is_register()) {
2836      // Compute the method's offset in that register, for the formula, see the
2837      // else-clause below.
2838      itable_offset = itable_index.as_register();
2839 
2840      z_sllg(itable_offset, itable_offset, exact_log2(itableMethodEntry::size() * wordSize));
2841      z_agf(itable_offset, vtable_offset_offset, itable_entry_addr);
2842   } else {
2843     itable_offset = Z_R1_scratch;
2844     // Displacement increases.
2845     method_offset += itableMethodEntry::size() * wordSize * itable_index.as_constant();
2846 
2847     // Load index from itable.
2848     z_llgf(itable_offset, vtable_offset_offset, itable_entry_addr);
2849   }
2850 
2851   // Finally load the method's oop.
2852   z_lg(method_result, method_offset, itable_offset, recv_klass);
2853   BLOCK_COMMENT("} lookup_interface_method");
2854 }
2855 
2856 // Lookup for virtual method invocation.
2857 void MacroAssembler::lookup_virtual_method(Register           recv_klass,
2858                                            RegisterOrConstant vtable_index,
2859                                            Register           method_result) {
2860   assert_different_registers(recv_klass, vtable_index.register_or_noreg());
2861   assert(vtableEntry::size() * wordSize == wordSize,
2862          "else adjust the scaling in the code below");
2863 
2864   BLOCK_COMMENT("lookup_virtual_method {");
2865 
2866   const int base = in_bytes(Klass::vtable_start_offset());
2867 
2868   if (vtable_index.is_constant()) {
2869     // Load with base + disp.
2870     Address vtable_entry_addr(recv_klass,
2871                               vtable_index.as_constant() * wordSize +
2872                               base +
2873                               vtableEntry::method_offset_in_bytes());
2874 
2875     z_lg(method_result, vtable_entry_addr);
2876   } else {
2877     // Shift index properly and load with base + index + disp.
2878     Register vindex = vtable_index.as_register();
2879     Address  vtable_entry_addr(recv_klass, vindex,
2880                                base + vtableEntry::method_offset_in_bytes());
2881 
2882     z_sllg(vindex, vindex, exact_log2(wordSize));
2883     z_lg(method_result, vtable_entry_addr);
2884   }
2885   BLOCK_COMMENT("} lookup_virtual_method");
2886 }
2887 
2888 // Factor out code to call ic_miss_handler.
2889 // Generate code to call the inline cache miss handler.
2890 //
2891 // In most cases, this code will be generated out-of-line.
2892 // The method parameters are intended to provide some variability.
2893 //   ICM          - Label which has to be bound to the start of useful code (past any traps).
2894 //   trapMarker   - Marking byte for the generated illtrap instructions (if any).
2895 //                  Any value except 0x00 is supported.
2896 //                  = 0x00 - do not generate illtrap instructions.
2897 //                         use nops to fill ununsed space.
2898 //   requiredSize - required size of the generated code. If the actually
2899 //                  generated code is smaller, use padding instructions to fill up.
2900 //                  = 0 - no size requirement, no padding.
2901 //   scratch      - scratch register to hold branch target address.
2902 //
2903 //  The method returns the code offset of the bound label.
2904 unsigned int MacroAssembler::call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch) {
2905   intptr_t startOffset = offset();
2906 
2907   // Prevent entry at content_begin().
2908   if (trapMarker != 0) {
2909     z_illtrap(trapMarker);
2910   }
2911 
2912   // Load address of inline cache miss code into scratch register
2913   // and branch to cache miss handler.
2914   BLOCK_COMMENT("IC miss handler {");
2915   BIND(ICM);
2916   unsigned int   labelOffset = offset();
2917   AddressLiteral icmiss(SharedRuntime::get_ic_miss_stub());
2918 
2919   load_const_optimized(scratch, icmiss);
2920   z_br(scratch);
2921 
2922   // Fill unused space.
2923   if (requiredSize > 0) {
2924     while ((offset() - startOffset) < requiredSize) {
2925       if (trapMarker == 0) {
2926         z_nop();
2927       } else {
2928         z_illtrap(trapMarker);
2929       }
2930     }
2931   }
2932   BLOCK_COMMENT("} IC miss handler");
2933   return labelOffset;
2934 }
2935 
2936 void MacroAssembler::nmethod_UEP(Label& ic_miss) {
2937   Register ic_reg       = as_Register(Matcher::inline_cache_reg_encode());
2938   int      klass_offset = oopDesc::klass_offset_in_bytes();
2939   if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) {
2940     if (VM_Version::has_CompareBranch()) {
2941       z_cgij(Z_ARG1, 0, Assembler::bcondEqual, ic_miss);
2942     } else {
2943       z_ltgr(Z_ARG1, Z_ARG1);
2944       z_bre(ic_miss);
2945     }
2946   }
2947   // Compare cached class against klass from receiver.
2948   compare_klass_ptr(ic_reg, klass_offset, Z_ARG1, false);
2949   z_brne(ic_miss);
2950 }
2951 
2952 void MacroAssembler::check_klass_subtype_fast_path(Register   sub_klass,
2953                                                    Register   super_klass,
2954                                                    Register   temp1_reg,
2955                                                    Label*     L_success,
2956                                                    Label*     L_failure,
2957                                                    Label*     L_slow_path,
2958                                                    RegisterOrConstant super_check_offset) {
2959 
2960   const int sc_offset  = in_bytes(Klass::secondary_super_cache_offset());
2961   const int sco_offset = in_bytes(Klass::super_check_offset_offset());
2962 
2963   bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
2964   bool need_slow_path = (must_load_sco ||
2965                          super_check_offset.constant_or_zero() == sc_offset);
2966 
2967   // Input registers must not overlap.
2968   assert_different_registers(sub_klass, super_klass, temp1_reg);
2969   if (super_check_offset.is_register()) {
2970     assert_different_registers(sub_klass, super_klass,
2971                                super_check_offset.as_register());
2972   } else if (must_load_sco) {
2973     assert(temp1_reg != noreg, "supply either a temp or a register offset");
2974   }
2975 
2976   const Register Rsuper_check_offset = temp1_reg;
2977 
2978   NearLabel L_fallthrough;
2979   int label_nulls = 0;
2980   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
2981   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
2982   if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
2983   assert(label_nulls <= 1 ||
2984          (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
2985          "at most one NULL in the batch, usually");
2986 
2987   BLOCK_COMMENT("check_klass_subtype_fast_path {");
2988   // If the pointers are equal, we are done (e.g., String[] elements).
2989   // This self-check enables sharing of secondary supertype arrays among
2990   // non-primary types such as array-of-interface. Otherwise, each such
2991   // type would need its own customized SSA.
2992   // We move this check to the front of the fast path because many
2993   // type checks are in fact trivially successful in this manner,
2994   // so we get a nicely predicted branch right at the start of the check.
2995   compare64_and_branch(sub_klass, super_klass, bcondEqual, *L_success);
2996 
2997   // Check the supertype display, which is uint.
2998   if (must_load_sco) {
2999     z_llgf(Rsuper_check_offset, sco_offset, super_klass);
3000     super_check_offset = RegisterOrConstant(Rsuper_check_offset);
3001   }
3002   Address super_check_addr(sub_klass, super_check_offset, 0);
3003   z_cg(super_klass, super_check_addr); // compare w/ displayed supertype
3004 
3005   // This check has worked decisively for primary supers.
3006   // Secondary supers are sought in the super_cache ('super_cache_addr').
3007   // (Secondary supers are interfaces and very deeply nested subtypes.)
3008   // This works in the same check above because of a tricky aliasing
3009   // between the super_cache and the primary super display elements.
3010   // (The 'super_check_addr' can address either, as the case requires.)
3011   // Note that the cache is updated below if it does not help us find
3012   // what we need immediately.
3013   // So if it was a primary super, we can just fail immediately.
3014   // Otherwise, it's the slow path for us (no success at this point).
3015 
3016   // Hacked jmp, which may only be used just before L_fallthrough.
3017 #define final_jmp(label)                                                \
3018   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
3019   else                            { branch_optimized(Assembler::bcondAlways, label); } /*omit semicolon*/
3020 
3021   if (super_check_offset.is_register()) {
3022     branch_optimized(Assembler::bcondEqual, *L_success);
3023     z_cfi(super_check_offset.as_register(), sc_offset);
3024     if (L_failure == &L_fallthrough) {
3025       branch_optimized(Assembler::bcondEqual, *L_slow_path);
3026     } else {
3027       branch_optimized(Assembler::bcondNotEqual, *L_failure);
3028       final_jmp(*L_slow_path);
3029     }
3030   } else if (super_check_offset.as_constant() == sc_offset) {
3031     // Need a slow path; fast failure is impossible.
3032     if (L_slow_path == &L_fallthrough) {
3033       branch_optimized(Assembler::bcondEqual, *L_success);
3034     } else {
3035       branch_optimized(Assembler::bcondNotEqual, *L_slow_path);
3036       final_jmp(*L_success);
3037     }
3038   } else {
3039     // No slow path; it's a fast decision.
3040     if (L_failure == &L_fallthrough) {
3041       branch_optimized(Assembler::bcondEqual, *L_success);
3042     } else {
3043       branch_optimized(Assembler::bcondNotEqual, *L_failure);
3044       final_jmp(*L_success);
3045     }
3046   }
3047 
3048   bind(L_fallthrough);
3049 #undef local_brc
3050 #undef final_jmp
3051   BLOCK_COMMENT("} check_klass_subtype_fast_path");
3052   // fallthru (to slow path)
3053 }
3054 
3055 void MacroAssembler::check_klass_subtype_slow_path(Register Rsubklass,
3056                                                    Register Rsuperklass,
3057                                                    Register Rarray_ptr,  // tmp
3058                                                    Register Rlength,     // tmp
3059                                                    Label* L_success,
3060                                                    Label* L_failure) {
3061   // Input registers must not overlap.
3062   // Also check for R1 which is explicitely used here.
3063   assert_different_registers(Z_R1, Rsubklass, Rsuperklass, Rarray_ptr, Rlength);
3064   NearLabel L_fallthrough, L_loop;
3065   int label_nulls = 0;
3066   if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
3067   if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
3068   assert(label_nulls <= 1, "at most one NULL in the batch");
3069 
3070   const int ss_offset = in_bytes(Klass::secondary_supers_offset());
3071   const int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
3072 
3073   const int length_offset = Array<Klass*>::length_offset_in_bytes();
3074   const int base_offset   = Array<Klass*>::base_offset_in_bytes();
3075 
3076   // Hacked jmp, which may only be used just before L_fallthrough.
3077 #define final_jmp(label)                                                \
3078   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
3079   else                            branch_optimized(Assembler::bcondAlways, label) /*omit semicolon*/
3080 
3081   NearLabel loop_iterate, loop_count, match;
3082 
3083   BLOCK_COMMENT("check_klass_subtype_slow_path {");
3084   z_lg(Rarray_ptr, ss_offset, Rsubklass);
3085 
3086   load_and_test_int(Rlength, Address(Rarray_ptr, length_offset));
3087   branch_optimized(Assembler::bcondZero, *L_failure);
3088 
3089   // Oops in table are NO MORE compressed.
3090   z_cg(Rsuperklass, base_offset, Rarray_ptr); // Check array element for match.
3091   z_bre(match);                               // Shortcut for array length = 1.
3092 
3093   // No match yet, so we must walk the array's elements.
3094   z_lngfr(Rlength, Rlength);
3095   z_sllg(Rlength, Rlength, LogBytesPerWord); // -#bytes of cache array
3096   z_llill(Z_R1, BytesPerWord);               // Set increment/end index.
3097   add2reg(Rlength, 2 * BytesPerWord);        // start index  = -(n-2)*BytesPerWord
3098   z_slgr(Rarray_ptr, Rlength);               // start addr: +=  (n-2)*BytesPerWord
3099   z_bru(loop_count);
3100 
3101   BIND(loop_iterate);
3102   z_cg(Rsuperklass, base_offset, Rlength, Rarray_ptr); // Check array element for match.
3103   z_bre(match);
3104   BIND(loop_count);
3105   z_brxlg(Rlength, Z_R1, loop_iterate);
3106 
3107   // Rsuperklass not found among secondary super classes -> failure.
3108   branch_optimized(Assembler::bcondAlways, *L_failure);
3109 
3110   // Got a hit. Return success (zero result). Set cache.
3111   // Cache load doesn't happen here. For speed it is directly emitted by the compiler.
3112 
3113   BIND(match);
3114 
3115   z_stg(Rsuperklass, sc_offset, Rsubklass); // Save result to cache.
3116 
3117   final_jmp(*L_success);
3118 
3119   // Exit to the surrounding code.
3120   BIND(L_fallthrough);
3121 #undef local_brc
3122 #undef final_jmp
3123   BLOCK_COMMENT("} check_klass_subtype_slow_path");
3124 }
3125 
3126 // Emitter for combining fast and slow path.
3127 void MacroAssembler::check_klass_subtype(Register sub_klass,
3128                                          Register super_klass,
3129                                          Register temp1_reg,
3130                                          Register temp2_reg,
3131                                          Label&   L_success) {
3132   NearLabel failure;
3133   BLOCK_COMMENT(err_msg("check_klass_subtype(%s subclass of %s) {", sub_klass->name(), super_klass->name()));
3134   check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg,
3135                                 &L_success, &failure, NULL);
3136   check_klass_subtype_slow_path(sub_klass, super_klass,
3137                                 temp1_reg, temp2_reg, &L_success, NULL);
3138   BIND(failure);
3139   BLOCK_COMMENT("} check_klass_subtype");
3140 }
3141 
3142 // Increment a counter at counter_address when the eq condition code is
3143 // set. Kills registers tmp1_reg and tmp2_reg and preserves the condition code.
3144 void MacroAssembler::increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg) {
3145   Label l;
3146   z_brne(l);
3147   load_const(tmp1_reg, counter_address);
3148   add2mem_32(Address(tmp1_reg), 1, tmp2_reg);
3149   z_cr(tmp1_reg, tmp1_reg); // Set cc to eq.
3150   bind(l);
3151 }
3152 
3153 // Semantics are dependent on the slow_case label:
3154 //   If the slow_case label is not NULL, failure to biased-lock the object
3155 //   transfers control to the location of the slow_case label. If the
3156 //   object could be biased-locked, control is transferred to the done label.
3157 //   The condition code is unpredictable.
3158 //
3159 //   If the slow_case label is NULL, failure to biased-lock the object results
3160 //   in a transfer of control to the done label with a condition code of not_equal.
3161 //   If the biased-lock could be successfully obtained, control is transfered to
3162 //   the done label with a condition code of equal.
3163 //   It is mandatory to react on the condition code At the done label.
3164 //
3165 void MacroAssembler::biased_locking_enter(Register  obj_reg,
3166                                           Register  mark_reg,
3167                                           Register  temp_reg,
3168                                           Register  temp2_reg,    // May be Z_RO!
3169                                           Label    &done,
3170                                           Label    *slow_case) {
3171   assert(UseBiasedLocking, "why call this otherwise?");
3172   assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg);
3173 
3174   Label cas_label; // Try, if implemented, CAS locking. Fall thru to slow path otherwise.
3175 
3176   BLOCK_COMMENT("biased_locking_enter {");
3177 
3178   // Biased locking
3179   // See whether the lock is currently biased toward our thread and
3180   // whether the epoch is still valid.
3181   // Note that the runtime guarantees sufficient alignment of JavaThread
3182   // pointers to allow age to be placed into low bits.
3183   assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits,
3184          "biased locking makes assumptions about bit layout");
3185   z_lr(temp_reg, mark_reg);
3186   z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place);
3187   z_chi(temp_reg, markOopDesc::biased_lock_pattern);
3188   z_brne(cas_label);  // Try cas if object is not biased, i.e. cannot be biased locked.
3189 
3190   load_prototype_header(temp_reg, obj_reg);
3191   load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
3192 
3193   z_ogr(temp_reg, Z_thread);
3194   z_xgr(temp_reg, mark_reg);
3195   z_ngr(temp_reg, temp2_reg);
3196   if (PrintBiasedLockingStatistics) {
3197     increment_counter_eq((address) BiasedLocking::biased_lock_entry_count_addr(), mark_reg, temp2_reg);
3198     // Restore mark_reg.
3199     z_lg(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
3200   }
3201   branch_optimized(Assembler::bcondEqual, done);  // Biased lock obtained, return success.
3202 
3203   Label try_revoke_bias;
3204   Label try_rebias;
3205   Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes());
3206 
3207   //----------------------------------------------------------------------------
3208   // At this point we know that the header has the bias pattern and
3209   // that we are not the bias owner in the current epoch. We need to
3210   // figure out more details about the state of the header in order to
3211   // know what operations can be legally performed on the object's
3212   // header.
3213 
3214   // If the low three bits in the xor result aren't clear, that means
3215   // the prototype header is no longer biased and we have to revoke
3216   // the bias on this object.
3217   z_tmll(temp_reg, markOopDesc::biased_lock_mask_in_place);
3218   z_brnaz(try_revoke_bias);
3219 
3220   // Biasing is still enabled for this data type. See whether the
3221   // epoch of the current bias is still valid, meaning that the epoch
3222   // bits of the mark word are equal to the epoch bits of the
3223   // prototype header. (Note that the prototype header's epoch bits
3224   // only change at a safepoint.) If not, attempt to rebias the object
3225   // toward the current thread. Note that we must be absolutely sure
3226   // that the current epoch is invalid in order to do this because
3227   // otherwise the manipulations it performs on the mark word are
3228   // illegal.
3229   z_tmll(temp_reg, markOopDesc::epoch_mask_in_place);
3230   z_brnaz(try_rebias);
3231 
3232   //----------------------------------------------------------------------------
3233   // The epoch of the current bias is still valid but we know nothing
3234   // about the owner; it might be set or it might be clear. Try to
3235   // acquire the bias of the object using an atomic operation. If this
3236   // fails we will go in to the runtime to revoke the object's bias.
3237   // Note that we first construct the presumed unbiased header so we
3238   // don't accidentally blow away another thread's valid bias.
3239   z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place |
3240          markOopDesc::epoch_mask_in_place);
3241   z_lgr(temp_reg, Z_thread);
3242   z_llgfr(mark_reg, mark_reg);
3243   z_ogr(temp_reg, mark_reg);
3244 
3245   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3246 
3247   z_csg(mark_reg, temp_reg, 0, obj_reg);
3248 
3249   // If the biasing toward our thread failed, this means that
3250   // another thread succeeded in biasing it toward itself and we
3251   // need to revoke that bias. The revocation will occur in the
3252   // interpreter runtime in the slow case.
3253 
3254   if (PrintBiasedLockingStatistics) {
3255     increment_counter_eq((address) BiasedLocking::anonymously_biased_lock_entry_count_addr(),
3256                          temp_reg, temp2_reg);
3257   }
3258   if (slow_case != NULL) {
3259     branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way.
3260   }
3261   branch_optimized(Assembler::bcondAlways, done);           // Biased lock status given in condition code.
3262 
3263   //----------------------------------------------------------------------------
3264   bind(try_rebias);
3265   // At this point we know the epoch has expired, meaning that the
3266   // current "bias owner", if any, is actually invalid. Under these
3267   // circumstances _only_, we are allowed to use the current header's
3268   // value as the comparison value when doing the cas to acquire the
3269   // bias in the current epoch. In other words, we allow transfer of
3270   // the bias from one thread to another directly in this situation.
3271 
3272   z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
3273   load_prototype_header(temp_reg, obj_reg);
3274   z_llgfr(mark_reg, mark_reg);
3275 
3276   z_ogr(temp_reg, Z_thread);
3277 
3278   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3279 
3280   z_csg(mark_reg, temp_reg, 0, obj_reg);
3281 
3282   // If the biasing toward our thread failed, this means that
3283   // another thread succeeded in biasing it toward itself and we
3284   // need to revoke that bias. The revocation will occur in the
3285   // interpreter runtime in the slow case.
3286 
3287   if (PrintBiasedLockingStatistics) {
3288     increment_counter_eq((address) BiasedLocking::rebiased_lock_entry_count_addr(), temp_reg, temp2_reg);
3289   }
3290   if (slow_case != NULL) {
3291     branch_optimized(Assembler::bcondNotEqual, *slow_case);  // Biased lock not obtained, need to go the long way.
3292   }
3293   z_bru(done);           // Biased lock status given in condition code.
3294 
3295   //----------------------------------------------------------------------------
3296   bind(try_revoke_bias);
3297   // The prototype mark in the klass doesn't have the bias bit set any
3298   // more, indicating that objects of this data type are not supposed
3299   // to be biased any more. We are going to try to reset the mark of
3300   // this object to the prototype value and fall through to the
3301   // CAS-based locking scheme. Note that if our CAS fails, it means
3302   // that another thread raced us for the privilege of revoking the
3303   // bias of this particular object, so it's okay to continue in the
3304   // normal locking code.
3305   load_prototype_header(temp_reg, obj_reg);
3306 
3307   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3308 
3309   z_csg(mark_reg, temp_reg, 0, obj_reg);
3310 
3311   // Fall through to the normal CAS-based lock, because no matter what
3312   // the result of the above CAS, some thread must have succeeded in
3313   // removing the bias bit from the object's header.
3314   if (PrintBiasedLockingStatistics) {
3315     // z_cgr(mark_reg, temp2_reg);
3316     increment_counter_eq((address) BiasedLocking::revoked_lock_entry_count_addr(), temp_reg, temp2_reg);
3317   }
3318 
3319   bind(cas_label);
3320   BLOCK_COMMENT("} biased_locking_enter");
3321 }
3322 
3323 void MacroAssembler::biased_locking_exit(Register mark_addr, Register temp_reg, Label& done) {
3324   // Check for biased locking unlock case, which is a no-op
3325   // Note: we do not have to check the thread ID for two reasons.
3326   // First, the interpreter checks for IllegalMonitorStateException at
3327   // a higher level. Second, if the bias was revoked while we held the
3328   // lock, the object could not be rebiased toward another thread, so
3329   // the bias bit would be clear.
3330   BLOCK_COMMENT("biased_locking_exit {");
3331 
3332   z_lg(temp_reg, 0, mark_addr);
3333   z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place);
3334 
3335   z_chi(temp_reg, markOopDesc::biased_lock_pattern);
3336   z_bre(done);
3337   BLOCK_COMMENT("} biased_locking_exit");
3338 }
3339 
3340 void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) {
3341   Register displacedHeader = temp1;
3342   Register currentHeader = temp1;
3343   Register temp = temp2;
3344   NearLabel done, object_has_monitor;
3345 
3346   BLOCK_COMMENT("compiler_fast_lock_object {");
3347 
3348   // Load markOop from oop into mark.
3349   z_lg(displacedHeader, 0, oop);
3350 
3351   if (try_bias) {
3352     biased_locking_enter(oop, displacedHeader, temp, Z_R0, done);
3353   }
3354 
3355   // Handle existing monitor.
3356   if ((EmitSync & 0x01) == 0) {
3357     // The object has an existing monitor iff (mark & monitor_value) != 0.
3358     guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word");
3359     z_lr(temp, displacedHeader);
3360     z_nill(temp, markOopDesc::monitor_value);
3361     z_brne(object_has_monitor);
3362   }
3363 
3364   // Set mark to markOop | markOopDesc::unlocked_value.
3365   z_oill(displacedHeader, markOopDesc::unlocked_value);
3366 
3367   // Load Compare Value application register.
3368 
3369   // Initialize the box (must happen before we update the object mark).
3370   z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box);
3371 
3372   // Memory Fence (in cmpxchgd)
3373   // Compare object markOop with mark and if equal exchange scratch1 with object markOop.
3374 
3375   // If the compare-and-swap succeeded, then we found an unlocked object and we
3376   // have now locked it.
3377   z_csg(displacedHeader, box, 0, oop);
3378   assert(currentHeader==displacedHeader, "must be same register"); // Identified two registers from z/Architecture.
3379   z_bre(done);
3380 
3381   // We did not see an unlocked object so try the fast recursive case.
3382 
3383   z_sgr(currentHeader, Z_SP);
3384   load_const_optimized(temp, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
3385 
3386   z_ngr(currentHeader, temp);
3387   //   z_brne(done);
3388   //   z_release();
3389   z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box);
3390 
3391   z_bru(done);
3392 
3393   if ((EmitSync & 0x01) == 0) {
3394     Register zero = temp;
3395     Register monitor_tagged = displacedHeader; // Tagged with markOopDesc::monitor_value.
3396     bind(object_has_monitor);
3397     // The object's monitor m is unlocked iff m->owner == NULL,
3398     // otherwise m->owner may contain a thread or a stack address.
3399     //
3400     // Try to CAS m->owner from NULL to current thread.
3401     z_lghi(zero, 0);
3402     // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ.
3403     z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged);
3404     // Store a non-null value into the box.
3405     z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box);
3406 #ifdef ASSERT
3407       z_brne(done);
3408       // We've acquired the monitor, check some invariants.
3409       // Invariant 1: _recursions should be 0.
3410       asm_assert_mem8_is_zero(OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), monitor_tagged,
3411                               "monitor->_recursions should be 0", -1);
3412       z_ltgr(zero, zero); // Set CR=EQ.
3413 #endif
3414   }
3415   bind(done);
3416 
3417   BLOCK_COMMENT("} compiler_fast_lock_object");
3418   // If locking was successful, CR should indicate 'EQ'.
3419   // The compiler or the native wrapper generates a branch to the runtime call
3420   // _complete_monitor_locking_Java.
3421 }
3422 
3423 void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) {
3424   Register displacedHeader = temp1;
3425   Register currentHeader = temp2;
3426   Register temp = temp1;
3427   Register monitor = temp2;
3428 
3429   Label done, object_has_monitor;
3430 
3431   BLOCK_COMMENT("compiler_fast_unlock_object {");
3432 
3433   if (try_bias) {
3434     biased_locking_exit(oop, currentHeader, done);
3435   }
3436 
3437   // Find the lock address and load the displaced header from the stack.
3438   // if the displaced header is zero, we have a recursive unlock.
3439   load_and_test_long(displacedHeader, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3440   z_bre(done);
3441 
3442   // Handle existing monitor.
3443   if ((EmitSync & 0x02) == 0) {
3444     // The object has an existing monitor iff (mark & monitor_value) != 0.
3445     z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);
3446     guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word");
3447     z_nill(currentHeader, markOopDesc::monitor_value);
3448     z_brne(object_has_monitor);
3449   }
3450 
3451   // Check if it is still a light weight lock, this is true if we see
3452   // the stack address of the basicLock in the markOop of the object
3453   // copy box to currentHeader such that csg does not kill it.
3454   z_lgr(currentHeader, box);
3455   z_csg(currentHeader, displacedHeader, 0, oop);
3456   z_bru(done); // Csg sets CR as desired.
3457 
3458   // Handle existing monitor.
3459   if ((EmitSync & 0x02) == 0) {
3460     bind(object_has_monitor);
3461     z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);    // CurrentHeader is tagged with monitor_value set.
3462     load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
3463     z_brne(done);
3464     load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
3465     z_brne(done);
3466     load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
3467     z_brne(done);
3468     load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
3469     z_brne(done);
3470     z_release();
3471     z_stg(temp/*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), currentHeader);
3472   }
3473 
3474   bind(done);
3475 
3476   BLOCK_COMMENT("} compiler_fast_unlock_object");
3477   // flag == EQ indicates success
3478   // flag == NE indicates failure
3479 }
3480 
3481 // Write to card table for modification at store_addr - register is destroyed afterwards.
3482 void MacroAssembler::card_write_barrier_post(Register store_addr, Register tmp) {
3483   CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
3484   assert(bs->kind() == BarrierSet::CardTableForRS ||
3485          bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
3486   assert_different_registers(store_addr, tmp);
3487   z_srlg(store_addr, store_addr, CardTableModRefBS::card_shift);
3488   load_absolute_address(tmp, (address)bs->byte_map_base);
3489   z_agr(store_addr, tmp);
3490   z_mvi(0, store_addr, 0); // Store byte 0.
3491 }
3492 
3493 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) {
3494   NearLabel Ldone;
3495   z_ltgr(tmp1, value);
3496   z_bre(Ldone);          // Use NULL result as-is.
3497 
3498   z_nill(value, ~JNIHandles::weak_tag_mask);
3499   z_lg(value, 0, value); // Resolve (untagged) jobject.
3500 
3501 #if INCLUDE_ALL_GCS
3502   if (UseG1GC) {
3503     NearLabel Lnot_weak;
3504     z_tmll(tmp1, JNIHandles::weak_tag_mask); // Test for jweak tag.
3505     z_braz(Lnot_weak);
3506     verify_oop(value);
3507     g1_write_barrier_pre(noreg /* obj */,
3508                          noreg /* offset */,
3509                          value /* pre_val */,
3510                          noreg /* val */,
3511                          tmp1  /* tmp1 */,
3512                          tmp2  /* tmp2 */,
3513                          true  /* pre_val_needed */);
3514     bind(Lnot_weak);
3515   }
3516 #endif // INCLUDE_ALL_GCS
3517   verify_oop(value);
3518   bind(Ldone);
3519 }
3520 
3521 #if INCLUDE_ALL_GCS
3522 
3523 //------------------------------------------------------
3524 // General G1 pre-barrier generator.
3525 // Purpose: record the previous value if it is not null.
3526 // All non-tmps are preserved.
3527 //------------------------------------------------------
3528 // Note: Rpre_val needs special attention.
3529 //   The flag pre_val_needed indicated that the caller of this emitter function
3530 //   relies on Rpre_val containing the correct value, that is:
3531 //     either the value it contained on entry to this code segment
3532 //     or the value that was loaded into the register from (Robj+offset).
3533 //
3534 //   Independent from this requirement, the contents of Rpre_val must survive
3535 //   the push_frame() operation. push_frame() uses Z_R0_scratch by default
3536 //   to temporarily remember the frame pointer.
3537 //   If Rpre_val is assigned Z_R0_scratch by the caller, code must be emitted to
3538 //   save it's value.
3539 void MacroAssembler::g1_write_barrier_pre(Register           Robj,
3540                                           RegisterOrConstant offset,
3541                                           Register           Rpre_val,      // Ideally, this is a non-volatile register.
3542                                           Register           Rval,          // Will be preserved.
3543                                           Register           Rtmp1,         // If Rpre_val is volatile, either Rtmp1
3544                                           Register           Rtmp2,         // or Rtmp2 has to be non-volatile..
3545                                           bool               pre_val_needed // Save Rpre_val across runtime call, caller uses it.
3546                                        ) {
3547   Label callRuntime, filtered;
3548   const int active_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active());
3549   const int buffer_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_buf());
3550   const int index_offset  = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_index());
3551   assert_different_registers(Rtmp1, Rtmp2, Z_R0_scratch); // None of the Rtmp<i> must be Z_R0!!
3552   assert_different_registers(Robj, Z_R0_scratch);         // Used for addressing. Furthermore, push_frame destroys Z_R0!!
3553   assert_different_registers(Rval, Z_R0_scratch);         // push_frame destroys Z_R0!!
3554 
3555 #ifdef ASSERT
3556   // make sure the register is not Z_R0. Used for addressing. Furthermore, would be destroyed by push_frame.
3557   if (offset.is_register() && offset.as_register()->encoding() == 0) {
3558     tty->print_cr("Roffset(g1_write_barrier_pre)  = %%r%d", offset.as_register()->encoding());
3559     assert(false, "bad register for offset");
3560   }
3561 #endif
3562 
3563   BLOCK_COMMENT("g1_write_barrier_pre {");
3564 
3565   // Is marking active?
3566   // Note: value is loaded for test purposes only. No further use here.
3567   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
3568     load_and_test_int(Rtmp1, Address(Z_thread, active_offset));
3569   } else {
3570     guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
3571     load_and_test_byte(Rtmp1, Address(Z_thread, active_offset));
3572   }
3573   z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently.
3574 
3575   assert(Rpre_val != noreg, "must have a real register");
3576 
3577 
3578   // If an object is given, we need to load the previous value into Rpre_val.
3579   if (Robj != noreg) {
3580     // Load the previous value...
3581     Register ixReg = offset.is_register() ? offset.register_or_noreg() : Z_R0;
3582     if (UseCompressedOops) {
3583       z_llgf(Rpre_val, offset.constant_or_zero(), ixReg, Robj);
3584     } else {
3585       z_lg(Rpre_val, offset.constant_or_zero(), ixReg, Robj);
3586     }
3587   }
3588 
3589   // Is the previous value NULL?
3590   // If so, we don't need to record it and we're done.
3591   // Note: pre_val is loaded, decompressed and stored (directly or via runtime call).
3592   //       Register contents is preserved across runtime call if caller requests to do so.
3593   z_ltgr(Rpre_val, Rpre_val);
3594   z_bre(filtered); // previous value is NULL, so we don't need to record it.
3595 
3596   // Decode the oop now. We know it's not NULL.
3597   if (Robj != noreg && UseCompressedOops) {
3598     oop_decoder(Rpre_val, Rpre_val, /*maybeNULL=*/false);
3599   }
3600 
3601   // OK, it's not filtered, so we'll need to call enqueue.
3602 
3603   // We can store the original value in the thread's buffer
3604   // only if index > 0. Otherwise, we need runtime to handle.
3605   // (The index field is typed as size_t.)
3606   Register Rbuffer = Rtmp1, Rindex = Rtmp2;
3607   assert_different_registers(Rbuffer, Rindex, Rpre_val);
3608 
3609   z_lg(Rbuffer, buffer_offset, Z_thread);
3610 
3611   load_and_test_long(Rindex, Address(Z_thread, index_offset));
3612   z_bre(callRuntime); // If index == 0, goto runtime.
3613 
3614   add2reg(Rindex, -wordSize); // Decrement index.
3615   z_stg(Rindex, index_offset, Z_thread);
3616 
3617   // Record the previous value.
3618   z_stg(Rpre_val, 0, Rbuffer, Rindex);
3619   z_bru(filtered);  // We are done.
3620 
3621   Rbuffer = noreg;  // end of life
3622   Rindex  = noreg;  // end of life
3623 
3624   bind(callRuntime);
3625 
3626   // Save some registers (inputs and result) over runtime call
3627   // by spilling them into the top frame.
3628   if (Robj != noreg && Robj->is_volatile()) {
3629     z_stg(Robj, Robj->encoding()*BytesPerWord, Z_SP);
3630   }
3631   if (offset.is_register() && offset.as_register()->is_volatile()) {
3632     Register Roff = offset.as_register();
3633     z_stg(Roff, Roff->encoding()*BytesPerWord, Z_SP);
3634   }
3635   if (Rval != noreg && Rval->is_volatile()) {
3636     z_stg(Rval, Rval->encoding()*BytesPerWord, Z_SP);
3637   }
3638 
3639   // Save Rpre_val (result) over runtime call.
3640   Register Rpre_save = Rpre_val;
3641   if ((Rpre_val == Z_R0_scratch) || (pre_val_needed && Rpre_val->is_volatile())) {
3642     guarantee(!Rtmp1->is_volatile() || !Rtmp2->is_volatile(), "oops!");
3643     Rpre_save = !Rtmp1->is_volatile() ? Rtmp1 : Rtmp2;
3644   }
3645   lgr_if_needed(Rpre_save, Rpre_val);
3646 
3647   // Push frame to protect top frame with return pc and spilled register values.
3648   save_return_pc();
3649   push_frame_abi160(0); // Will use Z_R0 as tmp.
3650 
3651   // Rpre_val may be destroyed by push_frame().
3652   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_save, Z_thread);
3653 
3654   pop_frame();
3655   restore_return_pc();
3656 
3657   // Restore spilled values.
3658   if (Robj != noreg && Robj->is_volatile()) {
3659     z_lg(Robj, Robj->encoding()*BytesPerWord, Z_SP);
3660   }
3661   if (offset.is_register() && offset.as_register()->is_volatile()) {
3662     Register Roff = offset.as_register();
3663     z_lg(Roff, Roff->encoding()*BytesPerWord, Z_SP);
3664   }
3665   if (Rval != noreg && Rval->is_volatile()) {
3666     z_lg(Rval, Rval->encoding()*BytesPerWord, Z_SP);
3667   }
3668   if (pre_val_needed && Rpre_val->is_volatile()) {
3669     lgr_if_needed(Rpre_val, Rpre_save);
3670   }
3671 
3672   bind(filtered);
3673   BLOCK_COMMENT("} g1_write_barrier_pre");
3674 }
3675 
3676 // General G1 post-barrier generator.
3677 // Purpose: Store cross-region card.
3678 void MacroAssembler::g1_write_barrier_post(Register Rstore_addr,
3679                                            Register Rnew_val,
3680                                            Register Rtmp1,
3681                                            Register Rtmp2,
3682                                            Register Rtmp3) {
3683   Label callRuntime, filtered;
3684 
3685   assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); // Most probably, Rnew_val == Rtmp3.
3686 
3687   G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
3688   assert(bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
3689 
3690   BLOCK_COMMENT("g1_write_barrier_post {");
3691 
3692   // Does store cross heap regions?
3693   // It does if the two addresses specify different grain addresses.
3694   if (G1RSBarrierRegionFilter) {
3695     if (VM_Version::has_DistinctOpnds()) {
3696       z_xgrk(Rtmp1, Rstore_addr, Rnew_val);
3697     } else {
3698       z_lgr(Rtmp1, Rstore_addr);
3699       z_xgr(Rtmp1, Rnew_val);
3700     }
3701     z_srag(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes);
3702     z_bre(filtered);
3703   }
3704 
3705   // Crosses regions, storing NULL?
3706 #ifdef ASSERT
3707   z_ltgr(Rnew_val, Rnew_val);
3708   asm_assert_ne("null oop not allowed (G1)", 0x255); // TODO: also on z? Checked by caller on PPC64, so following branch is obsolete:
3709   z_bre(filtered);  // Safety net: don't break if we have a NULL oop.
3710 #endif
3711   Rnew_val = noreg; // end of lifetime
3712 
3713   // Storing region crossing non-NULL, is card already dirty?
3714   assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code");
3715   assert_different_registers(Rtmp1, Rtmp2, Rtmp3);
3716   // Make sure not to use Z_R0 for any of these registers.
3717   Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3;
3718   Register Rbase      = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp3;
3719 
3720   // calculate address of card
3721   load_const_optimized(Rbase, (address)bs->byte_map_base);        // Card table base.
3722   z_srlg(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift); // Index into card table.
3723   z_algr(Rcard_addr, Rbase);                                      // Explicit calculation needed for cli.
3724   Rbase = noreg; // end of lifetime
3725 
3726   // Filter young.
3727   assert((unsigned int)G1SATBCardTableModRefBS::g1_young_card_val() <= 255, "otherwise check this code");
3728   z_cli(0, Rcard_addr, (int)G1SATBCardTableModRefBS::g1_young_card_val());
3729   z_bre(filtered);
3730 
3731   // Check the card value. If dirty, we're done.
3732   // This also avoids false sharing of the (already dirty) card.
3733   z_sync(); // Required to support concurrent cleaning.
3734   assert((unsigned int)CardTableModRefBS::dirty_card_val() <= 255, "otherwise check this code");
3735   z_cli(0, Rcard_addr, CardTableModRefBS::dirty_card_val()); // Reload after membar.
3736   z_bre(filtered);
3737 
3738   // Storing a region crossing, non-NULL oop, card is clean.
3739   // Dirty card and log.
3740   z_mvi(0, Rcard_addr, CardTableModRefBS::dirty_card_val());
3741 
3742   Register Rcard_addr_x = Rcard_addr;
3743   Register Rqueue_index = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp1;
3744   Register Rqueue_buf   = (Rtmp3 != Z_R0_scratch) ? Rtmp3 : Rtmp1;
3745   const int qidx_off    = in_bytes(JavaThread::dirty_card_queue_offset() + SATBMarkQueue::byte_offset_of_index());
3746   const int qbuf_off    = in_bytes(JavaThread::dirty_card_queue_offset() + SATBMarkQueue::byte_offset_of_buf());
3747   if ((Rcard_addr == Rqueue_buf) || (Rcard_addr == Rqueue_index)) {
3748     Rcard_addr_x = Z_R0_scratch;  // Register shortage. We have to use Z_R0.
3749   }
3750   lgr_if_needed(Rcard_addr_x, Rcard_addr);
3751 
3752   load_and_test_long(Rqueue_index, Address(Z_thread, qidx_off));
3753   z_bre(callRuntime); // Index == 0 then jump to runtime.
3754 
3755   z_lg(Rqueue_buf, qbuf_off, Z_thread);
3756 
3757   add2reg(Rqueue_index, -wordSize); // Decrement index.
3758   z_stg(Rqueue_index, qidx_off, Z_thread);
3759 
3760   z_stg(Rcard_addr_x, 0, Rqueue_index, Rqueue_buf); // Store card.
3761   z_bru(filtered);
3762 
3763   bind(callRuntime);
3764 
3765   // TODO: do we need a frame? Introduced to be on the safe side.
3766   bool needs_frame = true;
3767   lgr_if_needed(Rcard_addr, Rcard_addr_x); // copy back asap. push_frame will destroy Z_R0_scratch!
3768 
3769   // VM call need frame to access(write) O register.
3770   if (needs_frame) {
3771     save_return_pc();
3772     push_frame_abi160(0); // Will use Z_R0 as tmp on old CPUs.
3773   }
3774 
3775   // Save the live input values.
3776   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr, Z_thread);
3777 
3778   if (needs_frame) {
3779     pop_frame();
3780     restore_return_pc();
3781   }
3782 
3783   bind(filtered);
3784 
3785   BLOCK_COMMENT("} g1_write_barrier_post");
3786 }
3787 #endif // INCLUDE_ALL_GCS
3788 
3789 // Last_Java_sp must comply to the rules in frame_s390.hpp.
3790 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation) {
3791   BLOCK_COMMENT("set_last_Java_frame {");
3792 
3793   // Always set last_Java_pc and flags first because once last_Java_sp
3794   // is visible has_last_Java_frame is true and users will look at the
3795   // rest of the fields. (Note: flags should always be zero before we
3796   // get here so doesn't need to be set.)
3797 
3798   // Verify that last_Java_pc was zeroed on return to Java.
3799   if (allow_relocation) {
3800     asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()),
3801                             Z_thread,
3802                             "last_Java_pc not zeroed before leaving Java",
3803                             0x200);
3804   } else {
3805     asm_assert_mem8_is_zero_static(in_bytes(JavaThread::last_Java_pc_offset()),
3806                                    Z_thread,
3807                                    "last_Java_pc not zeroed before leaving Java",
3808                                    0x200);
3809   }
3810 
3811   // When returning from calling out from Java mode the frame anchor's
3812   // last_Java_pc will always be set to NULL. It is set here so that
3813   // if we are doing a call to native (not VM) that we capture the
3814   // known pc and don't have to rely on the native call having a
3815   // standard frame linkage where we can find the pc.
3816   if (last_Java_pc!=noreg) {
3817     z_stg(last_Java_pc, Address(Z_thread, JavaThread::last_Java_pc_offset()));
3818   }
3819 
3820   // This membar release is not required on z/Architecture, since the sequence of stores
3821   // in maintained. Nevertheless, we leave it in to document the required ordering.
3822   // The implementation of z_release() should be empty.
3823   // z_release();
3824 
3825   z_stg(last_Java_sp, Address(Z_thread, JavaThread::last_Java_sp_offset()));
3826   BLOCK_COMMENT("} set_last_Java_frame");
3827 }
3828 
3829 void MacroAssembler::reset_last_Java_frame(bool allow_relocation) {
3830   BLOCK_COMMENT("reset_last_Java_frame {");
3831 
3832   if (allow_relocation) {
3833     asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()),
3834                                Z_thread,
3835                                "SP was not set, still zero",
3836                                0x202);
3837   } else {
3838     asm_assert_mem8_isnot_zero_static(in_bytes(JavaThread::last_Java_sp_offset()),
3839                                       Z_thread,
3840                                       "SP was not set, still zero",
3841                                       0x202);
3842   }
3843 
3844   // _last_Java_sp = 0
3845   // Clearing storage must be atomic here, so don't use clear_mem()!
3846   store_const(Address(Z_thread, JavaThread::last_Java_sp_offset()), 0);
3847 
3848   // _last_Java_pc = 0
3849   store_const(Address(Z_thread, JavaThread::last_Java_pc_offset()), 0);
3850 
3851   BLOCK_COMMENT("} reset_last_Java_frame");
3852   return;
3853 }
3854 
3855 void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation) {
3856   assert_different_registers(sp, tmp1);
3857 
3858   // We cannot trust that code generated by the C++ compiler saves R14
3859   // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at
3860   // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()).
3861   // Therefore we load the PC into tmp1 and let set_last_Java_frame() save
3862   // it into the frame anchor.
3863   get_PC(tmp1);
3864   set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1, allow_relocation);
3865 }
3866 
3867 void MacroAssembler::set_thread_state(JavaThreadState new_state) {
3868   z_release();
3869 
3870   assert(Immediate::is_uimm16(_thread_max_state), "enum value out of range for instruction");
3871   assert(sizeof(JavaThreadState) == sizeof(int), "enum value must have base type int");
3872   store_const(Address(Z_thread, JavaThread::thread_state_offset()), new_state, Z_R0, false);
3873 }
3874 
3875 void MacroAssembler::get_vm_result(Register oop_result) {
3876   verify_thread();
3877 
3878   z_lg(oop_result, Address(Z_thread, JavaThread::vm_result_offset()));
3879   clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(void*));
3880 
3881   verify_oop(oop_result);
3882 }
3883 
3884 void MacroAssembler::get_vm_result_2(Register result) {
3885   verify_thread();
3886 
3887   z_lg(result, Address(Z_thread, JavaThread::vm_result_2_offset()));
3888   clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(void*));
3889 }
3890 
3891 // We require that C code which does not return a value in vm_result will
3892 // leave it undisturbed.
3893 void MacroAssembler::set_vm_result(Register oop_result) {
3894   z_stg(oop_result, Address(Z_thread, JavaThread::vm_result_offset()));
3895 }
3896 
3897 // Explicit null checks (used for method handle code).
3898 void MacroAssembler::null_check(Register reg, Register tmp, int64_t offset) {
3899   if (!ImplicitNullChecks) {
3900     NearLabel ok;
3901 
3902     compare64_and_branch(reg, (intptr_t) 0, Assembler::bcondNotEqual, ok);
3903 
3904     // We just put the address into reg if it was 0 (tmp==Z_R0 is allowed so we can't use it for the address).
3905     address exception_entry = Interpreter::throw_NullPointerException_entry();
3906     load_absolute_address(reg, exception_entry);
3907     z_br(reg);
3908 
3909     bind(ok);
3910   } else {
3911     if (needs_explicit_null_check((intptr_t)offset)) {
3912       // Provoke OS NULL exception if reg = NULL by
3913       // accessing M[reg] w/o changing any registers.
3914       z_lg(tmp, 0, reg);
3915     }
3916     // else
3917       // Nothing to do, (later) access of M[reg + offset]
3918       // will provoke OS NULL exception if reg = NULL.
3919   }
3920 }
3921 
3922 //-------------------------------------
3923 //  Compressed Klass Pointers
3924 //-------------------------------------
3925 
3926 // Klass oop manipulations if compressed.
3927 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
3928   Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible.
3929   address  base    = Universe::narrow_klass_base();
3930   int      shift   = Universe::narrow_klass_shift();
3931   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3932 
3933   BLOCK_COMMENT("cKlass encoder {");
3934 
3935 #ifdef ASSERT
3936   Label ok;
3937   z_tmll(current, KlassAlignmentInBytes-1); // Check alignment.
3938   z_brc(Assembler::bcondAllZero, ok);
3939   // The plain disassembler does not recognize illtrap. It instead displays
3940   // a 32-bit value. Issueing two illtraps assures the disassembler finds
3941   // the proper beginning of the next instruction.
3942   z_illtrap(0xee);
3943   z_illtrap(0xee);
3944   bind(ok);
3945 #endif
3946 
3947   if (base != NULL) {
3948     unsigned int base_h = ((unsigned long)base)>>32;
3949     unsigned int base_l = (unsigned int)((unsigned long)base);
3950     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3951       lgr_if_needed(dst, current);
3952       z_aih(dst, -((int)base_h));     // Base has no set bits in lower half.
3953     } else if ((base_h == 0) && (base_l != 0)) {
3954       lgr_if_needed(dst, current);
3955       z_agfi(dst, -(int)base_l);
3956     } else {
3957       load_const(Z_R0, base);
3958       lgr_if_needed(dst, current);
3959       z_sgr(dst, Z_R0);
3960     }
3961     current = dst;
3962   }
3963   if (shift != 0) {
3964     assert (LogKlassAlignmentInBytes == shift, "decode alg wrong");
3965     z_srlg(dst, current, shift);
3966     current = dst;
3967   }
3968   lgr_if_needed(dst, current); // Move may be required (if neither base nor shift != 0).
3969 
3970   BLOCK_COMMENT("} cKlass encoder");
3971 }
3972 
3973 // This function calculates the size of the code generated by
3974 //   decode_klass_not_null(register dst, Register src)
3975 // when (Universe::heap() != NULL). Hence, if the instructions
3976 // it generates change, then this method needs to be updated.
3977 int MacroAssembler::instr_size_for_decode_klass_not_null() {
3978   address  base    = Universe::narrow_klass_base();
3979   int shift_size   = Universe::narrow_klass_shift() == 0 ? 0 : 6; /* sllg */
3980   int addbase_size = 0;
3981   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3982 
3983   if (base != NULL) {
3984     unsigned int base_h = ((unsigned long)base)>>32;
3985     unsigned int base_l = (unsigned int)((unsigned long)base);
3986     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3987       addbase_size += 6; /* aih */
3988     } else if ((base_h == 0) && (base_l != 0)) {
3989       addbase_size += 6; /* algfi */
3990     } else {
3991       addbase_size += load_const_size();
3992       addbase_size += 4; /* algr */
3993     }
3994   }
3995 #ifdef ASSERT
3996   addbase_size += 10;
3997   addbase_size += 2; // Extra sigill.
3998 #endif
3999   return addbase_size + shift_size;
4000 }
4001 
4002 // !!! If the instructions that get generated here change
4003 //     then function instr_size_for_decode_klass_not_null()
4004 //     needs to get updated.
4005 // This variant of decode_klass_not_null() must generate predictable code!
4006 // The code must only depend on globally known parameters.
4007 void MacroAssembler::decode_klass_not_null(Register dst) {
4008   address  base    = Universe::narrow_klass_base();
4009   int      shift   = Universe::narrow_klass_shift();
4010   int      beg_off = offset();
4011   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
4012 
4013   BLOCK_COMMENT("cKlass decoder (const size) {");
4014 
4015   if (shift != 0) { // Shift required?
4016     z_sllg(dst, dst, shift);
4017   }
4018   if (base != NULL) {
4019     unsigned int base_h = ((unsigned long)base)>>32;
4020     unsigned int base_l = (unsigned int)((unsigned long)base);
4021     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
4022       z_aih(dst, base_h);     // Base has no set bits in lower half.
4023     } else if ((base_h == 0) && (base_l != 0)) {
4024       z_algfi(dst, base_l);   // Base has no set bits in upper half.
4025     } else {
4026       load_const(Z_R0, base); // Base has set bits everywhere.
4027       z_algr(dst, Z_R0);
4028     }
4029   }
4030 
4031 #ifdef ASSERT
4032   Label ok;
4033   z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
4034   z_brc(Assembler::bcondAllZero, ok);
4035   // The plain disassembler does not recognize illtrap. It instead displays
4036   // a 32-bit value. Issueing two illtraps assures the disassembler finds
4037   // the proper beginning of the next instruction.
4038   z_illtrap(0xd1);
4039   z_illtrap(0xd1);
4040   bind(ok);
4041 #endif
4042   assert(offset() == beg_off + instr_size_for_decode_klass_not_null(), "Code gen mismatch.");
4043 
4044   BLOCK_COMMENT("} cKlass decoder (const size)");
4045 }
4046 
4047 // This variant of decode_klass_not_null() is for cases where
4048 //  1) the size of the generated instructions may vary
4049 //  2) the result is (potentially) stored in a register different from the source.
4050 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
4051   address base  = Universe::narrow_klass_base();
4052   int     shift = Universe::narrow_klass_shift();
4053   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
4054 
4055   BLOCK_COMMENT("cKlass decoder {");
4056 
4057   if (src == noreg) src = dst;
4058 
4059   if (shift != 0) { // Shift or at least move required?
4060     z_sllg(dst, src, shift);
4061   } else {
4062     lgr_if_needed(dst, src);
4063   }
4064 
4065   if (base != NULL) {
4066     unsigned int base_h = ((unsigned long)base)>>32;
4067     unsigned int base_l = (unsigned int)((unsigned long)base);
4068     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
4069       z_aih(dst, base_h);     // Base has not set bits in lower half.
4070     } else if ((base_h == 0) && (base_l != 0)) {
4071       z_algfi(dst, base_l);   // Base has no set bits in upper half.
4072     } else {
4073       load_const_optimized(Z_R0, base); // Base has set bits everywhere.
4074       z_algr(dst, Z_R0);
4075     }
4076   }
4077 
4078 #ifdef ASSERT
4079   Label ok;
4080   z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
4081   z_brc(Assembler::bcondAllZero, ok);
4082   // The plain disassembler does not recognize illtrap. It instead displays
4083   // a 32-bit value. Issueing two illtraps assures the disassembler finds
4084   // the proper beginning of the next instruction.
4085   z_illtrap(0xd2);
4086   z_illtrap(0xd2);
4087   bind(ok);
4088 #endif
4089   BLOCK_COMMENT("} cKlass decoder");
4090 }
4091 
4092 void MacroAssembler::load_klass(Register klass, Address mem) {
4093   if (UseCompressedClassPointers) {
4094     z_llgf(klass, mem);
4095     // Attention: no null check here!
4096     decode_klass_not_null(klass);
4097   } else {
4098     z_lg(klass, mem);
4099   }
4100 }
4101 
4102 void MacroAssembler::load_klass(Register klass, Register src_oop) {
4103   if (UseCompressedClassPointers) {
4104     z_llgf(klass, oopDesc::klass_offset_in_bytes(), src_oop);
4105     // Attention: no null check here!
4106     decode_klass_not_null(klass);
4107   } else {
4108     z_lg(klass, oopDesc::klass_offset_in_bytes(), src_oop);
4109   }
4110 }
4111 
4112 void MacroAssembler::load_prototype_header(Register Rheader, Register Rsrc_oop) {
4113   assert_different_registers(Rheader, Rsrc_oop);
4114   load_klass(Rheader, Rsrc_oop);
4115   z_lg(Rheader, Address(Rheader, Klass::prototype_header_offset()));
4116 }
4117 
4118 void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) {
4119   if (UseCompressedClassPointers) {
4120     assert_different_registers(dst_oop, klass, Z_R0);
4121     if (ck == noreg) ck = klass;
4122     encode_klass_not_null(ck, klass);
4123     z_st(ck, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
4124   } else {
4125     z_stg(klass, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
4126   }
4127 }
4128 
4129 void MacroAssembler::store_klass_gap(Register s, Register d) {
4130   if (UseCompressedClassPointers) {
4131     assert(s != d, "not enough registers");
4132     // Support s = noreg.
4133     if (s != noreg) {
4134       z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes()));
4135     } else {
4136       z_mvhi(Address(d, oopDesc::klass_gap_offset_in_bytes()), 0);
4137     }
4138   }
4139 }
4140 
4141 // Compare klass ptr in memory against klass ptr in register.
4142 //
4143 // Rop1            - klass in register, always uncompressed.
4144 // disp            - Offset of klass in memory, compressed/uncompressed, depending on runtime flag.
4145 // Rbase           - Base address of cKlass in memory.
4146 // maybeNULL       - True if Rop1 possibly is a NULL.
4147 void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL) {
4148 
4149   BLOCK_COMMENT("compare klass ptr {");
4150 
4151   if (UseCompressedClassPointers) {
4152     const int shift = Universe::narrow_klass_shift();
4153     address   base  = Universe::narrow_klass_base();
4154 
4155     assert((shift == 0) || (shift == LogKlassAlignmentInBytes), "cKlass encoder detected bad shift");
4156     assert_different_registers(Rop1, Z_R0);
4157     assert_different_registers(Rop1, Rbase, Z_R1);
4158 
4159     // First encode register oop and then compare with cOop in memory.
4160     // This sequence saves an unnecessary cOop load and decode.
4161     if (base == NULL) {
4162       if (shift == 0) {
4163         z_cl(Rop1, disp, Rbase);     // Unscaled
4164       } else {
4165         z_srlg(Z_R0, Rop1, shift);   // ZeroBased
4166         z_cl(Z_R0, disp, Rbase);
4167       }
4168     } else {                         // HeapBased
4169 #ifdef ASSERT
4170       bool     used_R0 = true;
4171       bool     used_R1 = true;
4172 #endif
4173       Register current = Rop1;
4174       Label    done;
4175 
4176       if (maybeNULL) {       // NULL ptr must be preserved!
4177         z_ltgr(Z_R0, current);
4178         z_bre(done);
4179         current = Z_R0;
4180       }
4181 
4182       unsigned int base_h = ((unsigned long)base)>>32;
4183       unsigned int base_l = (unsigned int)((unsigned long)base);
4184       if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
4185         lgr_if_needed(Z_R0, current);
4186         z_aih(Z_R0, -((int)base_h));     // Base has no set bits in lower half.
4187       } else if ((base_h == 0) && (base_l != 0)) {
4188         lgr_if_needed(Z_R0, current);
4189         z_agfi(Z_R0, -(int)base_l);
4190       } else {
4191         int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
4192         add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); // Subtract base by adding complement.
4193       }
4194 
4195       if (shift != 0) {
4196         z_srlg(Z_R0, Z_R0, shift);
4197       }
4198       bind(done);
4199       z_cl(Z_R0, disp, Rbase);
4200 #ifdef ASSERT
4201       if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);
4202       if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);
4203 #endif
4204     }
4205   } else {
4206     z_clg(Rop1, disp, Z_R0, Rbase);
4207   }
4208   BLOCK_COMMENT("} compare klass ptr");
4209 }
4210 
4211 //---------------------------
4212 //  Compressed oops
4213 //---------------------------
4214 
4215 void MacroAssembler::encode_heap_oop(Register oop) {
4216   oop_encoder(oop, oop, true /*maybe null*/);
4217 }
4218 
4219 void MacroAssembler::encode_heap_oop_not_null(Register oop) {
4220   oop_encoder(oop, oop, false /*not null*/);
4221 }
4222 
4223 // Called with something derived from the oop base. e.g. oop_base>>3.
4224 int MacroAssembler::get_oop_base_pow2_offset(uint64_t oop_base) {
4225   unsigned int oop_base_ll = ((unsigned int)(oop_base >>  0)) & 0xffff;
4226   unsigned int oop_base_lh = ((unsigned int)(oop_base >> 16)) & 0xffff;
4227   unsigned int oop_base_hl = ((unsigned int)(oop_base >> 32)) & 0xffff;
4228   unsigned int oop_base_hh = ((unsigned int)(oop_base >> 48)) & 0xffff;
4229   unsigned int n_notzero_parts = (oop_base_ll == 0 ? 0:1)
4230                                + (oop_base_lh == 0 ? 0:1)
4231                                + (oop_base_hl == 0 ? 0:1)
4232                                + (oop_base_hh == 0 ? 0:1);
4233 
4234   assert(oop_base != 0, "This is for HeapBased cOops only");
4235 
4236   if (n_notzero_parts != 1) { //  Check if oop_base is just a few pages shy of a power of 2.
4237     uint64_t pow2_offset = 0x10000 - oop_base_ll;
4238     if (pow2_offset < 0x8000) {  // This might not be necessary.
4239       uint64_t oop_base2 = oop_base + pow2_offset;
4240 
4241       oop_base_ll = ((unsigned int)(oop_base2 >>  0)) & 0xffff;
4242       oop_base_lh = ((unsigned int)(oop_base2 >> 16)) & 0xffff;
4243       oop_base_hl = ((unsigned int)(oop_base2 >> 32)) & 0xffff;
4244       oop_base_hh = ((unsigned int)(oop_base2 >> 48)) & 0xffff;
4245       n_notzero_parts = (oop_base_ll == 0 ? 0:1) +
4246                         (oop_base_lh == 0 ? 0:1) +
4247                         (oop_base_hl == 0 ? 0:1) +
4248                         (oop_base_hh == 0 ? 0:1);
4249       if (n_notzero_parts == 1) {
4250         assert(-(int64_t)pow2_offset != (int64_t)-1, "We use -1 to signal uninitialized base register");
4251         return -pow2_offset;
4252       }
4253     }
4254   }
4255   return 0;
4256 }
4257 
4258 // If base address is offset from a straight power of two by just a few pages,
4259 // return this offset to the caller for a possible later composite add.
4260 // TODO/FIX: will only work correctly for 4k pages.
4261 int MacroAssembler::get_oop_base(Register Rbase, uint64_t oop_base) {
4262   int pow2_offset = get_oop_base_pow2_offset(oop_base);
4263 
4264   load_const_optimized(Rbase, oop_base - pow2_offset); // Best job possible.
4265 
4266   return pow2_offset;
4267 }
4268 
4269 int MacroAssembler::get_oop_base_complement(Register Rbase, uint64_t oop_base) {
4270   int offset = get_oop_base(Rbase, oop_base);
4271   z_lcgr(Rbase, Rbase);
4272   return -offset;
4273 }
4274 
4275 // Compare compressed oop in memory against oop in register.
4276 // Rop1            - Oop in register.
4277 // disp            - Offset of cOop in memory.
4278 // Rbase           - Base address of cOop in memory.
4279 // maybeNULL       - True if Rop1 possibly is a NULL.
4280 // maybeNULLtarget - Branch target for Rop1 == NULL, if flow control shall NOT continue with compare instruction.
4281 void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL) {
4282   Register Rbase  = mem.baseOrR0();
4283   Register Rindex = mem.indexOrR0();
4284   int64_t  disp   = mem.disp();
4285 
4286   const int shift = Universe::narrow_oop_shift();
4287   address   base  = Universe::narrow_oop_base();
4288 
4289   assert(UseCompressedOops, "must be on to call this method");
4290   assert(Universe::heap() != NULL, "java heap must be initialized to call this method");
4291   assert((shift == 0) || (shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");
4292   assert_different_registers(Rop1, Z_R0);
4293   assert_different_registers(Rop1, Rbase, Z_R1);
4294   assert_different_registers(Rop1, Rindex, Z_R1);
4295 
4296   BLOCK_COMMENT("compare heap oop {");
4297 
4298   // First encode register oop and then compare with cOop in memory.
4299   // This sequence saves an unnecessary cOop load and decode.
4300   if (base == NULL) {
4301     if (shift == 0) {
4302       z_cl(Rop1, disp, Rindex, Rbase);  // Unscaled
4303     } else {
4304       z_srlg(Z_R0, Rop1, shift);        // ZeroBased
4305       z_cl(Z_R0, disp, Rindex, Rbase);
4306     }
4307   } else {                              // HeapBased
4308 #ifdef ASSERT
4309     bool  used_R0 = true;
4310     bool  used_R1 = true;
4311 #endif
4312     Label done;
4313     int   pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
4314 
4315     if (maybeNULL) {       // NULL ptr must be preserved!
4316       z_ltgr(Z_R0, Rop1);
4317       z_bre(done);
4318     }
4319 
4320     add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1);
4321     z_srlg(Z_R0, Z_R0, shift);
4322 
4323     bind(done);
4324     z_cl(Z_R0, disp, Rindex, Rbase);
4325 #ifdef ASSERT
4326     if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);
4327     if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);
4328 #endif
4329   }
4330   BLOCK_COMMENT("} compare heap oop");
4331 }
4332 
4333 // Load heap oop and decompress, if necessary.
4334 void  MacroAssembler::load_heap_oop(Register dest, const Address &a) {
4335   if (UseCompressedOops) {
4336     z_llgf(dest, a.disp(), a.indexOrR0(), a.baseOrR0());
4337     oop_decoder(dest, dest, true);
4338   } else {
4339     z_lg(dest, a.disp(), a.indexOrR0(), a.baseOrR0());
4340   }
4341 }
4342 
4343 // Load heap oop and decompress, if necessary.
4344 void MacroAssembler::load_heap_oop(Register dest, int64_t disp, Register base) {
4345   if (UseCompressedOops) {
4346     z_llgf(dest, disp, base);
4347     oop_decoder(dest, dest, true);
4348   } else {
4349     z_lg(dest, disp, base);
4350   }
4351 }
4352 
4353 // Load heap oop and decompress, if necessary.
4354 void MacroAssembler::load_heap_oop_not_null(Register dest, int64_t disp, Register base) {
4355   if (UseCompressedOops) {
4356     z_llgf(dest, disp, base);
4357     oop_decoder(dest, dest, false);
4358   } else {
4359     z_lg(dest, disp, base);
4360   }
4361 }
4362 
4363 // Compress, if necessary, and store oop to heap.
4364 void MacroAssembler::store_heap_oop(Register Roop, RegisterOrConstant offset, Register base) {
4365   Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0;
4366   if (UseCompressedOops) {
4367     assert_different_registers(Roop, offset.register_or_noreg(), base);
4368     encode_heap_oop(Roop);
4369     z_st(Roop, offset.constant_or_zero(), Ridx, base);
4370   } else {
4371     z_stg(Roop, offset.constant_or_zero(), Ridx, base);
4372   }
4373 }
4374 
4375 // Compress, if necessary, and store oop to heap. Oop is guaranteed to be not NULL.
4376 void MacroAssembler::store_heap_oop_not_null(Register Roop, RegisterOrConstant offset, Register base) {
4377   Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0;
4378   if (UseCompressedOops) {
4379     assert_different_registers(Roop, offset.register_or_noreg(), base);
4380     encode_heap_oop_not_null(Roop);
4381     z_st(Roop, offset.constant_or_zero(), Ridx, base);
4382   } else {
4383     z_stg(Roop, offset.constant_or_zero(), Ridx, base);
4384   }
4385 }
4386 
4387 // Store NULL oop to heap.
4388 void MacroAssembler::store_heap_oop_null(Register zero, RegisterOrConstant offset, Register base) {
4389   Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0;
4390   if (UseCompressedOops) {
4391     z_st(zero, offset.constant_or_zero(), Ridx, base);
4392   } else {
4393     z_stg(zero, offset.constant_or_zero(), Ridx, base);
4394   }
4395 }
4396 
4397 //-------------------------------------------------
4398 // Encode compressed oop. Generally usable encoder.
4399 //-------------------------------------------------
4400 // Rsrc - contains regular oop on entry. It remains unchanged.
4401 // Rdst - contains compressed oop on exit.
4402 // Rdst and Rsrc may indicate same register, in which case Rsrc does not remain unchanged.
4403 //
4404 // Rdst must not indicate scratch register Z_R1 (Z_R1_scratch) for functionality.
4405 // Rdst should not indicate scratch register Z_R0 (Z_R0_scratch) for performance.
4406 //
4407 // only32bitValid is set, if later code only uses the lower 32 bits. In this
4408 // case we must not fix the upper 32 bits.
4409 void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL,
4410                                  Register Rbase, int pow2_offset, bool only32bitValid) {
4411 
4412   const address oop_base  = Universe::narrow_oop_base();
4413   const int     oop_shift = Universe::narrow_oop_shift();
4414   const bool    disjoint  = Universe::narrow_oop_base_disjoint();
4415 
4416   assert(UseCompressedOops, "must be on to call this method");
4417   assert(Universe::heap() != NULL, "java heap must be initialized to call this encoder");
4418   assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");
4419 
4420   if (disjoint || (oop_base == NULL)) {
4421     BLOCK_COMMENT("cOop encoder zeroBase {");
4422     if (oop_shift == 0) {
4423       if (oop_base != NULL && !only32bitValid) {
4424         z_llgfr(Rdst, Rsrc); // Clear upper bits in case the register will be decoded again.
4425       } else {
4426         lgr_if_needed(Rdst, Rsrc);
4427       }
4428     } else {
4429       z_srlg(Rdst, Rsrc, oop_shift);
4430       if (oop_base != NULL && !only32bitValid) {
4431         z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4432       }
4433     }
4434     BLOCK_COMMENT("} cOop encoder zeroBase");
4435     return;
4436   }
4437 
4438   bool used_R0 = false;
4439   bool used_R1 = false;
4440 
4441   BLOCK_COMMENT("cOop encoder general {");
4442   assert_different_registers(Rdst, Z_R1);
4443   assert_different_registers(Rsrc, Rbase);
4444   if (maybeNULL) {
4445     Label done;
4446     // We reorder shifting and subtracting, so that we can compare
4447     // and shift in parallel:
4448     //
4449     // cycle 0:  potential LoadN, base = <const>
4450     // cycle 1:  base = !base     dst = src >> 3,    cmp cr = (src != 0)
4451     // cycle 2:  if (cr) br,      dst = dst + base + offset
4452 
4453     // Get oop_base components.
4454     if (pow2_offset == -1) {
4455       if (Rdst == Rbase) {
4456         if (Rdst == Z_R1 || Rsrc == Z_R1) {
4457           Rbase = Z_R0;
4458           used_R0 = true;
4459         } else {
4460           Rdst = Z_R1;
4461           used_R1 = true;
4462         }
4463       }
4464       if (Rbase == Z_R1) {
4465         used_R1 = true;
4466       }
4467       pow2_offset = get_oop_base_complement(Rbase, ((uint64_t)(intptr_t)oop_base) >> oop_shift);
4468     }
4469     assert_different_registers(Rdst, Rbase);
4470 
4471     // Check for NULL oop (must be left alone) and shift.
4472     if (oop_shift != 0) {  // Shift out alignment bits
4473       if (((intptr_t)oop_base&0xc000000000000000L) == 0L) { // We are sure: no single address will have the leftmost bit set.
4474         z_srag(Rdst, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4475       } else {
4476         z_srlg(Rdst, Rsrc, oop_shift);
4477         z_ltgr(Rsrc, Rsrc);  // This is the recommended way of testing for zero.
4478         // This probably is faster, as it does not write a register. No!
4479         // z_cghi(Rsrc, 0);
4480       }
4481     } else {
4482       z_ltgr(Rdst, Rsrc);   // Move NULL to result register.
4483     }
4484     z_bre(done);
4485 
4486     // Subtract oop_base components.
4487     if ((Rdst == Z_R0) || (Rbase == Z_R0)) {
4488       z_algr(Rdst, Rbase);
4489       if (pow2_offset != 0) { add2reg(Rdst, pow2_offset); }
4490     } else {
4491       add2reg_with_index(Rdst, pow2_offset, Rbase, Rdst);
4492     }
4493     if (!only32bitValid) {
4494       z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4495     }
4496     bind(done);
4497 
4498   } else {  // not null
4499     // Get oop_base components.
4500     if (pow2_offset == -1) {
4501       pow2_offset = get_oop_base_complement(Rbase, (uint64_t)(intptr_t)oop_base);
4502     }
4503 
4504     // Subtract oop_base components and shift.
4505     if (Rdst == Z_R0 || Rsrc == Z_R0 || Rbase == Z_R0) {
4506       // Don't use lay instruction.
4507       if (Rdst == Rsrc) {
4508         z_algr(Rdst, Rbase);
4509       } else {
4510         lgr_if_needed(Rdst, Rbase);
4511         z_algr(Rdst, Rsrc);
4512       }
4513       if (pow2_offset != 0) add2reg(Rdst, pow2_offset);
4514     } else {
4515       add2reg_with_index(Rdst, pow2_offset, Rbase, Rsrc);
4516     }
4517     if (oop_shift != 0) {   // Shift out alignment bits.
4518       z_srlg(Rdst, Rdst, oop_shift);
4519     }
4520     if (!only32bitValid) {
4521       z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4522     }
4523   }
4524 #ifdef ASSERT
4525   if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb01bUL, 2); }
4526   if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb02bUL, 2); }
4527 #endif
4528   BLOCK_COMMENT("} cOop encoder general");
4529 }
4530 
4531 //-------------------------------------------------
4532 // decode compressed oop. Generally usable decoder.
4533 //-------------------------------------------------
4534 // Rsrc - contains compressed oop on entry.
4535 // Rdst - contains regular oop on exit.
4536 // Rdst and Rsrc may indicate same register.
4537 // Rdst must not be the same register as Rbase, if Rbase was preloaded (before call).
4538 // Rdst can be the same register as Rbase. Then, either Z_R0 or Z_R1 must be available as scratch.
4539 // Rbase - register to use for the base
4540 // pow2_offset - offset of base to nice value. If -1, base must be loaded.
4541 // For performance, it is good to
4542 //  - avoid Z_R0 for any of the argument registers.
4543 //  - keep Rdst and Rsrc distinct from Rbase. Rdst == Rsrc is ok for performance.
4544 //  - avoid Z_R1 for Rdst if Rdst == Rbase.
4545 void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, Register Rbase, int pow2_offset) {
4546 
4547   const address oop_base  = Universe::narrow_oop_base();
4548   const int     oop_shift = Universe::narrow_oop_shift();
4549   const bool    disjoint  = Universe::narrow_oop_base_disjoint();
4550 
4551   assert(UseCompressedOops, "must be on to call this method");
4552   assert(Universe::heap() != NULL, "java heap must be initialized to call this decoder");
4553   assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes),
4554          "cOop encoder detected bad shift");
4555 
4556   // cOops are always loaded zero-extended from memory. No explicit zero-extension necessary.
4557 
4558   if (oop_base != NULL) {
4559     unsigned int oop_base_hl = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xffff;
4560     unsigned int oop_base_hh = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 48)) & 0xffff;
4561     unsigned int oop_base_hf = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xFFFFffff;
4562     if (disjoint && (oop_base_hl == 0 || oop_base_hh == 0)) {
4563       BLOCK_COMMENT("cOop decoder disjointBase {");
4564       // We do not need to load the base. Instead, we can install the upper bits
4565       // with an OR instead of an ADD.
4566       Label done;
4567 
4568       // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.
4569       if (maybeNULL) {  // NULL ptr must be preserved!
4570         z_slag(Rdst, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4571         z_bre(done);
4572       } else {
4573         z_sllg(Rdst, Rsrc, oop_shift);  // Logical shift leaves condition code alone.
4574       }
4575       if ((oop_base_hl != 0) && (oop_base_hh != 0)) {
4576         z_oihf(Rdst, oop_base_hf);
4577       } else if (oop_base_hl != 0) {
4578         z_oihl(Rdst, oop_base_hl);
4579       } else {
4580         assert(oop_base_hh != 0, "not heapbased mode");
4581         z_oihh(Rdst, oop_base_hh);
4582       }
4583       bind(done);
4584       BLOCK_COMMENT("} cOop decoder disjointBase");
4585     } else {
4586       BLOCK_COMMENT("cOop decoder general {");
4587       // There are three decode steps:
4588       //   scale oop offset (shift left)
4589       //   get base (in reg) and pow2_offset (constant)
4590       //   add base, pow2_offset, and oop offset
4591       // The following register overlap situations may exist:
4592       // Rdst == Rsrc,  Rbase any other
4593       //   not a problem. Scaling in-place leaves Rbase undisturbed.
4594       //   Loading Rbase does not impact the scaled offset.
4595       // Rdst == Rbase, Rsrc  any other
4596       //   scaling would destroy a possibly preloaded Rbase. Loading Rbase
4597       //   would destroy the scaled offset.
4598       //   Remedy: use Rdst_tmp if Rbase has been preloaded.
4599       //           use Rbase_tmp if base has to be loaded.
4600       // Rsrc == Rbase, Rdst  any other
4601       //   Only possible without preloaded Rbase.
4602       //   Loading Rbase does not destroy compressed oop because it was scaled into Rdst before.
4603       // Rsrc == Rbase, Rdst == Rbase
4604       //   Only possible without preloaded Rbase.
4605       //   Loading Rbase would destroy compressed oop. Scaling in-place is ok.
4606       //   Remedy: use Rbase_tmp.
4607       //
4608       Label    done;
4609       Register Rdst_tmp       = Rdst;
4610       Register Rbase_tmp      = Rbase;
4611       bool     used_R0        = false;
4612       bool     used_R1        = false;
4613       bool     base_preloaded = pow2_offset >= 0;
4614       guarantee(!(base_preloaded && (Rsrc == Rbase)), "Register clash, check caller");
4615       assert(oop_shift != 0, "room for optimization");
4616 
4617       // Check if we need to use scratch registers.
4618       if (Rdst == Rbase) {
4619         assert(!(((Rdst == Z_R0) && (Rsrc == Z_R1)) || ((Rdst == Z_R1) && (Rsrc == Z_R0))), "need a scratch reg");
4620         if (Rdst != Rsrc) {
4621           if (base_preloaded) { Rdst_tmp  = (Rdst == Z_R1) ? Z_R0 : Z_R1; }
4622           else                { Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; }
4623         } else {
4624           Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1;
4625         }
4626       }
4627       if (base_preloaded) lgr_if_needed(Rbase_tmp, Rbase);
4628 
4629       // Scale oop and check for NULL.
4630       // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.
4631       if (maybeNULL) {  // NULL ptr must be preserved!
4632         z_slag(Rdst_tmp, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4633         z_bre(done);
4634       } else {
4635         z_sllg(Rdst_tmp, Rsrc, oop_shift);  // Logical shift leaves condition code alone.
4636       }
4637 
4638       // Get oop_base components.
4639       if (!base_preloaded) {
4640         pow2_offset = get_oop_base(Rbase_tmp, (uint64_t)(intptr_t)oop_base);
4641       }
4642 
4643       // Add up all components.
4644       if ((Rbase_tmp == Z_R0) || (Rdst_tmp == Z_R0)) {
4645         z_algr(Rdst_tmp, Rbase_tmp);
4646         if (pow2_offset != 0) { add2reg(Rdst_tmp, pow2_offset); }
4647       } else {
4648         add2reg_with_index(Rdst_tmp, pow2_offset, Rbase_tmp, Rdst_tmp);
4649       }
4650 
4651       bind(done);
4652       lgr_if_needed(Rdst, Rdst_tmp);
4653 #ifdef ASSERT
4654       if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb03bUL, 2); }
4655       if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb04bUL, 2); }
4656 #endif
4657       BLOCK_COMMENT("} cOop decoder general");
4658     }
4659   } else {
4660     BLOCK_COMMENT("cOop decoder zeroBase {");
4661     if (oop_shift == 0) {
4662       lgr_if_needed(Rdst, Rsrc);
4663     } else {
4664       z_sllg(Rdst, Rsrc, oop_shift);
4665     }
4666     BLOCK_COMMENT("} cOop decoder zeroBase");
4667   }
4668 }
4669 
4670 // ((OopHandle)result).resolve();
4671 void MacroAssembler::resolve_oop_handle(Register result) {
4672   // OopHandle::resolve is an indirection.
4673   z_lg(result, 0, result);
4674 }
4675 
4676 void MacroAssembler::load_mirror(Register mirror, Register method) {
4677   mem2reg_opt(mirror, Address(method, Method::const_offset()));
4678   mem2reg_opt(mirror, Address(mirror, ConstMethod::constants_offset()));
4679   mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes()));
4680   mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset()));
4681   resolve_oop_handle(mirror);
4682 }
4683 
4684 //---------------------------------------------------------------
4685 //---  Operations on arrays.
4686 //---------------------------------------------------------------
4687 
4688 // Compiler ensures base is doubleword aligned and cnt is #doublewords.
4689 // Emitter does not KILL cnt and base arguments, since they need to be copied to
4690 // work registers anyway.
4691 // Actually, only r0, r1, and r5 are killed.
4692 unsigned int MacroAssembler::Clear_Array(Register cnt_arg, Register base_pointer_arg, Register src_addr, Register src_len) {
4693   // Src_addr is evenReg.
4694   // Src_len is odd_Reg.
4695 
4696   int      block_start = offset();
4697   Register tmp_reg  = src_len; // Holds target instr addr for EX.
4698   Register dst_len  = Z_R1;    // Holds dst len  for MVCLE.
4699   Register dst_addr = Z_R0;    // Holds dst addr for MVCLE.
4700 
4701   Label doXC, doMVCLE, done;
4702 
4703   BLOCK_COMMENT("Clear_Array {");
4704 
4705   // Check for zero len and convert to long.
4706   z_ltgfr(src_len, cnt_arg);      // Remember casted value for doSTG case.
4707   z_bre(done);                    // Nothing to do if len == 0.
4708 
4709   // Prefetch data to be cleared.
4710   if (VM_Version::has_Prefetch()) {
4711     z_pfd(0x02,   0, Z_R0, base_pointer_arg);
4712     z_pfd(0x02, 256, Z_R0, base_pointer_arg);
4713   }
4714 
4715   z_sllg(dst_len, src_len, 3);    // #bytes to clear.
4716   z_cghi(src_len, 32);            // Check for len <= 256 bytes (<=32 DW).
4717   z_brnh(doXC);                   // If so, use executed XC to clear.
4718 
4719   // MVCLE: initialize long arrays (general case).
4720   bind(doMVCLE);
4721   z_lgr(dst_addr, base_pointer_arg);
4722   clear_reg(src_len, true, false); // Src len of MVCLE is zero.
4723 
4724   MacroAssembler::move_long_ext(dst_addr, src_addr, 0);
4725   z_bru(done);
4726 
4727   // XC: initialize short arrays.
4728   Label XC_template; // Instr template, never exec directly!
4729     bind(XC_template);
4730     z_xc(0,0,base_pointer_arg,0,base_pointer_arg);
4731 
4732   bind(doXC);
4733     add2reg(dst_len, -1);             // Get #bytes-1 for EXECUTE.
4734     if (VM_Version::has_ExecuteExtensions()) {
4735       z_exrl(dst_len, XC_template);   // Execute XC with var. len.
4736     } else {
4737       z_larl(tmp_reg, XC_template);
4738       z_ex(dst_len,0,Z_R0,tmp_reg);   // Execute XC with var. len.
4739     }
4740     // z_bru(done);      // fallthru
4741 
4742   bind(done);
4743 
4744   BLOCK_COMMENT("} Clear_Array");
4745 
4746   int block_end = offset();
4747   return block_end - block_start;
4748 }
4749 
4750 // Compiler ensures base is doubleword aligned and cnt is count of doublewords.
4751 // Emitter does not KILL any arguments nor work registers.
4752 // Emitter generates up to 16 XC instructions, depending on the array length.
4753 unsigned int MacroAssembler::Clear_Array_Const(long cnt, Register base) {
4754   int  block_start    = offset();
4755   int  off;
4756   int  lineSize_Bytes = AllocatePrefetchStepSize;
4757   int  lineSize_DW    = AllocatePrefetchStepSize>>LogBytesPerWord;
4758   bool doPrefetch     = VM_Version::has_Prefetch();
4759   int  XC_maxlen      = 256;
4760   int  numXCInstr     = cnt > 0 ? (cnt*BytesPerWord-1)/XC_maxlen+1 : 0;
4761 
4762   BLOCK_COMMENT("Clear_Array_Const {");
4763   assert(cnt*BytesPerWord <= 4096, "ClearArrayConst can handle 4k only");
4764 
4765   // Do less prefetching for very short arrays.
4766   if (numXCInstr > 0) {
4767     // Prefetch only some cache lines, then begin clearing.
4768     if (doPrefetch) {
4769       if (cnt*BytesPerWord <= lineSize_Bytes/4) {  // If less than 1/4 of a cache line to clear,
4770         z_pfd(0x02, 0, Z_R0, base);                // prefetch just the first cache line.
4771       } else {
4772         assert(XC_maxlen == lineSize_Bytes, "ClearArrayConst needs 256B cache lines");
4773         for (off = 0; (off < AllocatePrefetchLines) && (off <= numXCInstr); off ++) {
4774           z_pfd(0x02, off*lineSize_Bytes, Z_R0, base);
4775         }
4776       }
4777     }
4778 
4779     for (off=0; off<(numXCInstr-1); off++) {
4780       z_xc(off*XC_maxlen, XC_maxlen-1, base, off*XC_maxlen, base);
4781 
4782       // Prefetch some cache lines in advance.
4783       if (doPrefetch && (off <= numXCInstr-AllocatePrefetchLines)) {
4784         z_pfd(0x02, (off+AllocatePrefetchLines)*lineSize_Bytes, Z_R0, base);
4785       }
4786     }
4787     if (off*XC_maxlen < cnt*BytesPerWord) {
4788       z_xc(off*XC_maxlen, (cnt*BytesPerWord-off*XC_maxlen)-1, base, off*XC_maxlen, base);
4789     }
4790   }
4791   BLOCK_COMMENT("} Clear_Array_Const");
4792 
4793   int block_end = offset();
4794   return block_end - block_start;
4795 }
4796 
4797 // Compiler ensures base is doubleword aligned and cnt is #doublewords.
4798 // Emitter does not KILL cnt and base arguments, since they need to be copied to
4799 // work registers anyway.
4800 // Actually, only r0, r1, r4, and r5 (which are work registers) are killed.
4801 //
4802 // For very large arrays, exploit MVCLE H/W support.
4803 // MVCLE instruction automatically exploits H/W-optimized page mover.
4804 // - Bytes up to next page boundary are cleared with a series of XC to self.
4805 // - All full pages are cleared with the page mover H/W assist.
4806 // - Remaining bytes are again cleared by a series of XC to self.
4807 //
4808 unsigned int MacroAssembler::Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register src_addr, Register src_len) {
4809   // Src_addr is evenReg.
4810   // Src_len is odd_Reg.
4811 
4812   int      block_start = offset();
4813   Register dst_len  = Z_R1;      // Holds dst len  for MVCLE.
4814   Register dst_addr = Z_R0;      // Holds dst addr for MVCLE.
4815 
4816   BLOCK_COMMENT("Clear_Array_Const_Big {");
4817 
4818   // Get len to clear.
4819   load_const_optimized(dst_len, (long)cnt*8L);  // in Bytes = #DW*8
4820 
4821   // Prepare other args to MVCLE.
4822   z_lgr(dst_addr, base_pointer_arg);
4823   // Indicate unused result.
4824   (void) clear_reg(src_len, true, false);  // Src len of MVCLE is zero.
4825 
4826   // Clear.
4827   MacroAssembler::move_long_ext(dst_addr, src_addr, 0);
4828   BLOCK_COMMENT("} Clear_Array_Const_Big");
4829 
4830   int block_end = offset();
4831   return block_end - block_start;
4832 }
4833 
4834 // Allocator.
4835 unsigned int MacroAssembler::CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg,
4836                                                            Register cnt_reg,
4837                                                            Register tmp1_reg, Register tmp2_reg) {
4838   // Tmp1 is oddReg.
4839   // Tmp2 is evenReg.
4840 
4841   int block_start = offset();
4842   Label doMVC, doMVCLE, done, MVC_template;
4843 
4844   BLOCK_COMMENT("CopyRawMemory_AlignedDisjoint {");
4845 
4846   // Check for zero len and convert to long.
4847   z_ltgfr(cnt_reg, cnt_reg);      // Remember casted value for doSTG case.
4848   z_bre(done);                    // Nothing to do if len == 0.
4849 
4850   z_sllg(Z_R1, cnt_reg, 3);       // Dst len in bytes. calc early to have the result ready.
4851 
4852   z_cghi(cnt_reg, 32);            // Check for len <= 256 bytes (<=32 DW).
4853   z_brnh(doMVC);                  // If so, use executed MVC to clear.
4854 
4855   bind(doMVCLE);                  // A lot of data (more than 256 bytes).
4856   // Prep dest reg pair.
4857   z_lgr(Z_R0, dst_reg);           // dst addr
4858   // Dst len already in Z_R1.
4859   // Prep src reg pair.
4860   z_lgr(tmp2_reg, src_reg);       // src addr
4861   z_lgr(tmp1_reg, Z_R1);          // Src len same as dst len.
4862 
4863   // Do the copy.
4864   move_long_ext(Z_R0, tmp2_reg, 0xb0); // Bypass cache.
4865   z_bru(done);                         // All done.
4866 
4867   bind(MVC_template);             // Just some data (not more than 256 bytes).
4868   z_mvc(0, 0, dst_reg, 0, src_reg);
4869 
4870   bind(doMVC);
4871 
4872   if (VM_Version::has_ExecuteExtensions()) {
4873     add2reg(Z_R1, -1);
4874   } else {
4875     add2reg(tmp1_reg, -1, Z_R1);
4876     z_larl(Z_R1, MVC_template);
4877   }
4878 
4879   if (VM_Version::has_Prefetch()) {
4880     z_pfd(1,  0,Z_R0,src_reg);
4881     z_pfd(2,  0,Z_R0,dst_reg);
4882     //    z_pfd(1,256,Z_R0,src_reg);    // Assume very short copy.
4883     //    z_pfd(2,256,Z_R0,dst_reg);
4884   }
4885 
4886   if (VM_Version::has_ExecuteExtensions()) {
4887     z_exrl(Z_R1, MVC_template);
4888   } else {
4889     z_ex(tmp1_reg, 0, Z_R0, Z_R1);
4890   }
4891 
4892   bind(done);
4893 
4894   BLOCK_COMMENT("} CopyRawMemory_AlignedDisjoint");
4895 
4896   int block_end = offset();
4897   return block_end - block_start;
4898 }
4899 
4900 //------------------------------------------------------
4901 //   Special String Intrinsics. Implementation
4902 //------------------------------------------------------
4903 
4904 // Intrinsics for CompactStrings
4905 
4906 // Compress char[] to byte[].
4907 //   Restores: src, dst
4908 //   Uses:     cnt
4909 //   Kills:    tmp, Z_R0, Z_R1.
4910 //   Early clobber: result.
4911 // Note:
4912 //   cnt is signed int. Do not rely on high word!
4913 //       counts # characters, not bytes.
4914 // The result is the number of characters copied before the first incompatible character was found.
4915 // If precise is true, the processing stops exactly at this point. Otherwise, the result may be off
4916 // by a few bytes. The result always indicates the number of copied characters.
4917 //
4918 // Note: Does not behave exactly like package private StringUTF16 compress java implementation in case of failure:
4919 // - Different number of characters may have been written to dead array (if precise is false).
4920 // - Returns a number <cnt instead of 0. (Result gets compared with cnt.)
4921 unsigned int MacroAssembler::string_compress(Register result, Register src, Register dst, Register cnt,
4922                                              Register tmp,    bool precise) {
4923   assert_different_registers(Z_R0, Z_R1, src, dst, cnt, tmp);
4924 
4925   if (precise) {
4926     BLOCK_COMMENT("encode_iso_array {");
4927   } else {
4928     BLOCK_COMMENT("string_compress {");
4929   }
4930   int  block_start = offset();
4931 
4932   Register       Rsrc  = src;
4933   Register       Rdst  = dst;
4934   Register       Rix   = tmp;
4935   Register       Rcnt  = cnt;
4936   Register       Rmask = result;  // holds incompatibility check mask until result value is stored.
4937   Label          ScalarShortcut, AllDone;
4938 
4939   z_iilf(Rmask, 0xFF00FF00);
4940   z_iihf(Rmask, 0xFF00FF00);
4941 
4942   {
4943     //---<  shortcuts for short strings (very frequent)   >---
4944     //   Strings with 4 and 8 characters were fond to occur very frequently.
4945     //   Therefore, we handle them right away with minimal overhead.
4946     Label     skipShortcut, skip4Shortcut, skip8Shortcut;
4947     Register  Rout = Z_R0;
4948     z_chi(Rcnt, 4);
4949     z_brne(skip4Shortcut);                 // 4 characters are very frequent
4950       z_lg(Z_R0, 0, Rsrc);                 // Treat exactly 4 characters specially.
4951       if (VM_Version::has_DistinctOpnds()) {
4952         Rout = Z_R0;
4953         z_ngrk(Rix, Z_R0, Rmask);
4954       } else {
4955         Rout = Rix;
4956         z_lgr(Rix, Z_R0);
4957         z_ngr(Z_R0, Rmask);
4958       }
4959       z_brnz(skipShortcut);
4960       z_stcmh(Rout, 5, 0, Rdst);
4961       z_stcm(Rout,  5, 2, Rdst);
4962       z_lgfr(result, Rcnt);
4963       z_bru(AllDone);
4964     bind(skip4Shortcut);
4965 
4966     z_chi(Rcnt, 8);
4967     z_brne(skip8Shortcut);                 // There's more to do...
4968       z_lmg(Z_R0, Z_R1, 0, Rsrc);          // Treat exactly 8 characters specially.
4969       if (VM_Version::has_DistinctOpnds()) {
4970         Rout = Z_R0;
4971         z_ogrk(Rix, Z_R0, Z_R1);
4972         z_ngr(Rix, Rmask);
4973       } else {
4974         Rout = Rix;
4975         z_lgr(Rix, Z_R0);
4976         z_ogr(Z_R0, Z_R1);
4977         z_ngr(Z_R0, Rmask);
4978       }
4979       z_brnz(skipShortcut);
4980       z_stcmh(Rout, 5, 0, Rdst);
4981       z_stcm(Rout,  5, 2, Rdst);
4982       z_stcmh(Z_R1, 5, 4, Rdst);
4983       z_stcm(Z_R1,  5, 6, Rdst);
4984       z_lgfr(result, Rcnt);
4985       z_bru(AllDone);
4986 
4987     bind(skip8Shortcut);
4988     clear_reg(Z_R0, true, false);          // #characters already processed (none). Precond for scalar loop.
4989     z_brl(ScalarShortcut);                 // Just a few characters
4990 
4991     bind(skipShortcut);
4992   }
4993   clear_reg(Z_R0);                         // make sure register is properly initialized.
4994 
4995   if (VM_Version::has_VectorFacility()) {
4996     const int  min_vcnt     = 32;          // Minimum #characters required to use vector instructions.
4997                                            // Otherwise just do nothing in vector mode.
4998                                            // Must be multiple of 2*(vector register length in chars (8 HW = 128 bits)).
4999     const int  log_min_vcnt = exact_log2(min_vcnt);
5000     Label      VectorLoop, VectorDone, VectorBreak;
5001 
5002     VectorRegister Vtmp1      = Z_V16;
5003     VectorRegister Vtmp2      = Z_V17;
5004     VectorRegister Vmask      = Z_V18;
5005     VectorRegister Vzero      = Z_V19;
5006     VectorRegister Vsrc_first = Z_V20;
5007     VectorRegister Vsrc_last  = Z_V23;
5008 
5009     assert((Vsrc_last->encoding() - Vsrc_first->encoding() + 1) == min_vcnt/8, "logic error");
5010     assert(VM_Version::has_DistinctOpnds(), "Assumption when has_VectorFacility()");
5011     z_srak(Rix, Rcnt, log_min_vcnt);       // # vector loop iterations
5012     z_brz(VectorDone);                     // not enough data for vector loop
5013 
5014     z_vzero(Vzero);                        // all zeroes
5015     z_vgmh(Vmask, 0, 7);                   // generate 0xff00 mask for all 2-byte elements
5016     z_sllg(Z_R0, Rix, log_min_vcnt);       // remember #chars that will be processed by vector loop
5017 
5018     bind(VectorLoop);
5019       z_vlm(Vsrc_first, Vsrc_last, 0, Rsrc);
5020       add2reg(Rsrc, min_vcnt*2);
5021 
5022       //---<  check for incompatible character  >---
5023       z_vo(Vtmp1, Z_V20, Z_V21);
5024       z_vo(Vtmp2, Z_V22, Z_V23);
5025       z_vo(Vtmp1, Vtmp1, Vtmp2);
5026       z_vn(Vtmp1, Vtmp1, Vmask);
5027       z_vceqhs(Vtmp1, Vtmp1, Vzero);       // high half of all chars must be zero for successful compress.
5028       z_brne(VectorBreak);                 // break vector loop, incompatible character found.
5029                                            // re-process data from current iteration in break handler.
5030 
5031       //---<  pack & store characters  >---
5032       z_vpkh(Vtmp1, Z_V20, Z_V21);         // pack (src1, src2) -> tmp1
5033       z_vpkh(Vtmp2, Z_V22, Z_V23);         // pack (src3, src4) -> tmp2
5034       z_vstm(Vtmp1, Vtmp2, 0, Rdst);       // store packed string
5035       add2reg(Rdst, min_vcnt);
5036 
5037       z_brct(Rix, VectorLoop);
5038 
5039     z_bru(VectorDone);
5040 
5041     bind(VectorBreak);
5042       z_sll(Rix, log_min_vcnt);            // # chars processed so far in VectorLoop, excl. current iteration.
5043       z_sr(Z_R0, Rix);                     // correct # chars processed in total.
5044 
5045     bind(VectorDone);
5046   }
5047 
5048   {
5049     const int  min_cnt     =  8;           // Minimum #characters required to use unrolled loop.
5050                                            // Otherwise just do nothing in unrolled loop.
5051                                            // Must be multiple of 8.
5052     const int  log_min_cnt = exact_log2(min_cnt);
5053     Label      UnrolledLoop, UnrolledDone, UnrolledBreak;
5054 
5055     if (VM_Version::has_DistinctOpnds()) {
5056       z_srk(Rix, Rcnt, Z_R0);              // remaining # chars to compress in unrolled loop
5057     } else {
5058       z_lr(Rix, Rcnt);
5059       z_sr(Rix, Z_R0);
5060     }
5061     z_sra(Rix, log_min_cnt);             // unrolled loop count
5062     z_brz(UnrolledDone);
5063 
5064     bind(UnrolledLoop);
5065       z_lmg(Z_R0, Z_R1, 0, Rsrc);
5066       if (precise) {
5067         z_ogr(Z_R1, Z_R0);                 // check all 8 chars for incompatibility
5068         z_ngr(Z_R1, Rmask);
5069         z_brnz(UnrolledBreak);
5070 
5071         z_lg(Z_R1, 8, Rsrc);               // reload destroyed register
5072         z_stcmh(Z_R0, 5, 0, Rdst);
5073         z_stcm(Z_R0,  5, 2, Rdst);
5074       } else {
5075         z_stcmh(Z_R0, 5, 0, Rdst);
5076         z_stcm(Z_R0,  5, 2, Rdst);
5077 
5078         z_ogr(Z_R0, Z_R1);
5079         z_ngr(Z_R0, Rmask);
5080         z_brnz(UnrolledBreak);
5081       }
5082       z_stcmh(Z_R1, 5, 4, Rdst);
5083       z_stcm(Z_R1,  5, 6, Rdst);
5084 
5085       add2reg(Rsrc, min_cnt*2);
5086       add2reg(Rdst, min_cnt);
5087       z_brct(Rix, UnrolledLoop);
5088 
5089     z_lgfr(Z_R0, Rcnt);                    // # chars processed in total after unrolled loop.
5090     z_nilf(Z_R0, ~(min_cnt-1));
5091     z_tmll(Rcnt, min_cnt-1);
5092     z_brnaz(ScalarShortcut);               // if all bits zero, there is nothing left to do for scalar loop.
5093                                            // Rix == 0 in all cases.
5094     z_lgfr(result, Rcnt);                  // all characters processed.
5095     z_sgfr(Rdst, Rcnt);                    // restore ptr
5096     z_sgfr(Rsrc, Rcnt);                    // restore ptr, double the element count for Rsrc restore
5097     z_sgfr(Rsrc, Rcnt);
5098     z_bru(AllDone);
5099 
5100     bind(UnrolledBreak);
5101     z_lgfr(Z_R0, Rcnt);                    // # chars processed in total after unrolled loop
5102     z_nilf(Z_R0, ~(min_cnt-1));
5103     z_sll(Rix, log_min_cnt);               // # chars processed so far in UnrolledLoop, excl. current iteration.
5104     z_sr(Z_R0, Rix);                       // correct # chars processed in total.
5105     if (!precise) {
5106       z_lgfr(result, Z_R0);
5107       z_aghi(result, min_cnt/2);           // min_cnt/2 characters have already been written
5108                                            // but ptrs were not updated yet.
5109       z_sgfr(Rdst, Z_R0);                  // restore ptr
5110       z_sgfr(Rsrc, Z_R0);                  // restore ptr, double the element count for Rsrc restore
5111       z_sgfr(Rsrc, Z_R0);
5112       z_bru(AllDone);
5113     }
5114     bind(UnrolledDone);
5115   }
5116 
5117   {
5118     Label     ScalarLoop, ScalarDone, ScalarBreak;
5119 
5120     bind(ScalarShortcut);
5121     z_ltgfr(result, Rcnt);
5122     z_brz(AllDone);
5123 
5124     {
5125       //---<  Special treatment for very short strings (one or two characters)  >---
5126       //   For these strings, we are sure that the above code was skipped.
5127       //   Thus, no registers were modified, register restore is not required.
5128       Label     ScalarDoit, Scalar2Char;
5129       z_chi(Rcnt, 2);
5130       z_brh(ScalarDoit);
5131       z_llh(Z_R1,  0, Z_R0, Rsrc);
5132       z_bre(Scalar2Char);
5133       z_tmll(Z_R1, 0xff00);
5134       z_lghi(result, 0);                   // cnt == 1, first char invalid, no chars successfully processed
5135       z_brnaz(AllDone);
5136       z_stc(Z_R1,  0, Z_R0, Rdst);
5137       z_lghi(result, 1);
5138       z_bru(AllDone);
5139 
5140       bind(Scalar2Char);
5141       z_llh(Z_R0,  2, Z_R0, Rsrc);
5142       z_tmll(Z_R1, 0xff00);
5143       z_lghi(result, 0);                   // cnt == 2, first char invalid, no chars successfully processed
5144       z_brnaz(AllDone);
5145       z_stc(Z_R1,  0, Z_R0, Rdst);
5146       z_tmll(Z_R0, 0xff00);
5147       z_lghi(result, 1);                   // cnt == 2, second char invalid, one char successfully processed
5148       z_brnaz(AllDone);
5149       z_stc(Z_R0,  1, Z_R0, Rdst);
5150       z_lghi(result, 2);
5151       z_bru(AllDone);
5152 
5153       bind(ScalarDoit);
5154     }
5155 
5156     if (VM_Version::has_DistinctOpnds()) {
5157       z_srk(Rix, Rcnt, Z_R0);              // remaining # chars to compress in unrolled loop
5158     } else {
5159       z_lr(Rix, Rcnt);
5160       z_sr(Rix, Z_R0);
5161     }
5162     z_lgfr(result, Rcnt);                  // # processed characters (if all runs ok).
5163     z_brz(ScalarDone);
5164 
5165     bind(ScalarLoop);
5166       z_llh(Z_R1, 0, Z_R0, Rsrc);
5167       z_tmll(Z_R1, 0xff00);
5168       z_brnaz(ScalarBreak);
5169       z_stc(Z_R1, 0, Z_R0, Rdst);
5170       add2reg(Rsrc, 2);
5171       add2reg(Rdst, 1);
5172       z_brct(Rix, ScalarLoop);
5173 
5174     z_bru(ScalarDone);
5175 
5176     bind(ScalarBreak);
5177     z_sr(result, Rix);
5178 
5179     bind(ScalarDone);
5180     z_sgfr(Rdst, result);                  // restore ptr
5181     z_sgfr(Rsrc, result);                  // restore ptr, double the element count for Rsrc restore
5182     z_sgfr(Rsrc, result);
5183   }
5184   bind(AllDone);
5185 
5186   if (precise) {
5187     BLOCK_COMMENT("} encode_iso_array");
5188   } else {
5189     BLOCK_COMMENT("} string_compress");
5190   }
5191   return offset() - block_start;
5192 }
5193 
5194 // Inflate byte[] to char[].
5195 unsigned int MacroAssembler::string_inflate_trot(Register src, Register dst, Register cnt, Register tmp) {
5196   int block_start = offset();
5197 
5198   BLOCK_COMMENT("string_inflate {");
5199 
5200   Register stop_char = Z_R0;
5201   Register table     = Z_R1;
5202   Register src_addr  = tmp;
5203 
5204   assert_different_registers(Z_R0, Z_R1, tmp, src, dst, cnt);
5205   assert(dst->encoding()%2 == 0, "must be even reg");
5206   assert(cnt->encoding()%2 == 1, "must be odd reg");
5207   assert(cnt->encoding() - dst->encoding() == 1, "must be even/odd pair");
5208 
5209   StubRoutines::zarch::generate_load_trot_table_addr(this, table);  // kills Z_R0 (if ASSERT)
5210   clear_reg(stop_char);  // Stop character. Not used here, but initialized to have a defined value.
5211   lgr_if_needed(src_addr, src);
5212   z_llgfr(cnt, cnt);     // # src characters, must be a positive simm32.
5213 
5214   translate_ot(dst, src_addr, /* mask = */ 0x0001);
5215 
5216   BLOCK_COMMENT("} string_inflate");
5217 
5218   return offset() - block_start;
5219 }
5220 
5221 // Inflate byte[] to char[].
5222 //   Restores: src, dst
5223 //   Uses:     cnt
5224 //   Kills:    tmp, Z_R0, Z_R1.
5225 // Note:
5226 //   cnt is signed int. Do not rely on high word!
5227 //       counts # characters, not bytes.
5228 unsigned int MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp) {
5229   assert_different_registers(Z_R0, Z_R1, src, dst, cnt, tmp);
5230 
5231   BLOCK_COMMENT("string_inflate {");
5232   int block_start = offset();
5233 
5234   Register   Rcnt = cnt;   // # characters (src: bytes, dst: char (2-byte)), remaining after current loop.
5235   Register   Rix  = tmp;   // loop index
5236   Register   Rsrc = src;   // addr(src array)
5237   Register   Rdst = dst;   // addr(dst array)
5238   Label      ScalarShortcut, AllDone;
5239 
5240   {
5241     //---<  shortcuts for short strings (very frequent)   >---
5242     Label   skipShortcut, skip4Shortcut;
5243     z_ltr(Rcnt, Rcnt);                     // absolutely nothing to do for strings of len == 0.
5244     z_brz(AllDone);
5245     clear_reg(Z_R0);                       // make sure registers are properly initialized.
5246     clear_reg(Z_R1);
5247     z_chi(Rcnt, 4);
5248     z_brne(skip4Shortcut);                 // 4 characters are very frequent
5249       z_icm(Z_R0, 5,    0, Rsrc);          // Treat exactly 4 characters specially.
5250       z_icm(Z_R1, 5,    2, Rsrc);
5251       z_stm(Z_R0, Z_R1, 0, Rdst);
5252       z_bru(AllDone);
5253     bind(skip4Shortcut);
5254 
5255     z_chi(Rcnt, 8);
5256     z_brh(skipShortcut);                   // There's a lot to do...
5257     z_lgfr(Z_R0, Rcnt);                    // remaining #characters (<= 8). Precond for scalar loop.
5258                                            // This does not destroy the "register cleared" state of Z_R0.
5259     z_brl(ScalarShortcut);                 // Just a few characters
5260       z_icmh(Z_R0, 5, 0, Rsrc);            // Treat exactly 8 characters specially.
5261       z_icmh(Z_R1, 5, 4, Rsrc);
5262       z_icm(Z_R0,  5, 2, Rsrc);
5263       z_icm(Z_R1,  5, 6, Rsrc);
5264       z_stmg(Z_R0, Z_R1, 0, Rdst);
5265       z_bru(AllDone);
5266     bind(skipShortcut);
5267   }
5268   clear_reg(Z_R0);                         // make sure register is properly initialized.
5269 
5270   if (VM_Version::has_VectorFacility()) {
5271     const int  min_vcnt     = 32;          // Minimum #characters required to use vector instructions.
5272                                            // Otherwise just do nothing in vector mode.
5273                                            // Must be multiple of vector register length (16 bytes = 128 bits).
5274     const int  log_min_vcnt = exact_log2(min_vcnt);
5275     Label      VectorLoop, VectorDone;
5276 
5277     assert(VM_Version::has_DistinctOpnds(), "Assumption when has_VectorFacility()");
5278     z_srak(Rix, Rcnt, log_min_vcnt);       // calculate # vector loop iterations
5279     z_brz(VectorDone);                     // skip if none
5280 
5281     z_sllg(Z_R0, Rix, log_min_vcnt);       // remember #chars that will be processed by vector loop
5282 
5283     bind(VectorLoop);
5284       z_vlm(Z_V20, Z_V21, 0, Rsrc);        // get next 32 characters (single-byte)
5285       add2reg(Rsrc, min_vcnt);
5286 
5287       z_vuplhb(Z_V22, Z_V20);              // V2 <- (expand) V0(high)
5288       z_vupllb(Z_V23, Z_V20);              // V3 <- (expand) V0(low)
5289       z_vuplhb(Z_V24, Z_V21);              // V4 <- (expand) V1(high)
5290       z_vupllb(Z_V25, Z_V21);              // V5 <- (expand) V1(low)
5291       z_vstm(Z_V22, Z_V25, 0, Rdst);       // store next 32 bytes
5292       add2reg(Rdst, min_vcnt*2);
5293 
5294       z_brct(Rix, VectorLoop);
5295 
5296     bind(VectorDone);
5297   }
5298 
5299   const int  min_cnt     =  8;             // Minimum #characters required to use unrolled scalar loop.
5300                                            // Otherwise just do nothing in unrolled scalar mode.
5301                                            // Must be multiple of 8.
5302   {
5303     const int  log_min_cnt = exact_log2(min_cnt);
5304     Label      UnrolledLoop, UnrolledDone;
5305 
5306 
5307     if (VM_Version::has_DistinctOpnds()) {
5308       z_srk(Rix, Rcnt, Z_R0);              // remaining # chars to process in unrolled loop
5309     } else {
5310       z_lr(Rix, Rcnt);
5311       z_sr(Rix, Z_R0);
5312     }
5313     z_sra(Rix, log_min_cnt);               // unrolled loop count
5314     z_brz(UnrolledDone);
5315 
5316     clear_reg(Z_R0);
5317     clear_reg(Z_R1);
5318 
5319     bind(UnrolledLoop);
5320       z_icmh(Z_R0, 5, 0, Rsrc);
5321       z_icmh(Z_R1, 5, 4, Rsrc);
5322       z_icm(Z_R0,  5, 2, Rsrc);
5323       z_icm(Z_R1,  5, 6, Rsrc);
5324       add2reg(Rsrc, min_cnt);
5325 
5326       z_stmg(Z_R0, Z_R1, 0, Rdst);
5327 
5328       add2reg(Rdst, min_cnt*2);
5329       z_brct(Rix, UnrolledLoop);
5330 
5331     bind(UnrolledDone);
5332     z_lgfr(Z_R0, Rcnt);                    // # chars left over after unrolled loop.
5333     z_nilf(Z_R0, min_cnt-1);
5334     z_brnz(ScalarShortcut);                // if zero, there is nothing left to do for scalar loop.
5335                                            // Rix == 0 in all cases.
5336     z_sgfr(Z_R0, Rcnt);                    // negative # characters the ptrs have been advanced previously.
5337     z_agr(Rdst, Z_R0);                     // restore ptr, double the element count for Rdst restore.
5338     z_agr(Rdst, Z_R0);
5339     z_agr(Rsrc, Z_R0);                     // restore ptr.
5340     z_bru(AllDone);
5341   }
5342 
5343   {
5344     bind(ScalarShortcut);
5345     // Z_R0 must contain remaining # characters as 64-bit signed int here.
5346     //      register contents is preserved over scalar processing (for register fixup).
5347 
5348     {
5349       Label      ScalarDefault;
5350       z_chi(Rcnt, 2);
5351       z_brh(ScalarDefault);
5352       z_llc(Z_R0,  0, Z_R0, Rsrc);     // 6 bytes
5353       z_sth(Z_R0,  0, Z_R0, Rdst);     // 4 bytes
5354       z_brl(AllDone);
5355       z_llc(Z_R0,  1, Z_R0, Rsrc);     // 6 bytes
5356       z_sth(Z_R0,  2, Z_R0, Rdst);     // 4 bytes
5357       z_bru(AllDone);
5358       bind(ScalarDefault);
5359     }
5360 
5361     Label   CodeTable;
5362     // Some comments on Rix calculation:
5363     //  - Rcnt is small, therefore no bits shifted out of low word (sll(g) instructions).
5364     //  - high word of both Rix and Rcnt may contain garbage
5365     //  - the final lngfr takes care of that garbage, extending the sign to high word
5366     z_sllg(Rix, Z_R0, 2);                // calculate 10*Rix = (4*Rix + Rix)*2
5367     z_ar(Rix, Z_R0);
5368     z_larl(Z_R1, CodeTable);
5369     z_sll(Rix, 1);
5370     z_lngfr(Rix, Rix);      // ix range: [0..7], after inversion & mult: [-(7*12)..(0*12)].
5371     z_bc(Assembler::bcondAlways, 0, Rix, Z_R1);
5372 
5373     z_llc(Z_R1,  6, Z_R0, Rsrc);  // 6 bytes
5374     z_sth(Z_R1, 12, Z_R0, Rdst);  // 4 bytes
5375 
5376     z_llc(Z_R1,  5, Z_R0, Rsrc);
5377     z_sth(Z_R1, 10, Z_R0, Rdst);
5378 
5379     z_llc(Z_R1,  4, Z_R0, Rsrc);
5380     z_sth(Z_R1,  8, Z_R0, Rdst);
5381 
5382     z_llc(Z_R1,  3, Z_R0, Rsrc);
5383     z_sth(Z_R1,  6, Z_R0, Rdst);
5384 
5385     z_llc(Z_R1,  2, Z_R0, Rsrc);
5386     z_sth(Z_R1,  4, Z_R0, Rdst);
5387 
5388     z_llc(Z_R1,  1, Z_R0, Rsrc);
5389     z_sth(Z_R1,  2, Z_R0, Rdst);
5390 
5391     z_llc(Z_R1,  0, Z_R0, Rsrc);
5392     z_sth(Z_R1,  0, Z_R0, Rdst);
5393     bind(CodeTable);
5394 
5395     z_chi(Rcnt, 8);                        // no fixup for small strings. Rdst, Rsrc were not modified.
5396     z_brl(AllDone);
5397 
5398     z_sgfr(Z_R0, Rcnt);                    // # characters the ptrs have been advanced previously.
5399     z_agr(Rdst, Z_R0);                     // restore ptr, double the element count for Rdst restore.
5400     z_agr(Rdst, Z_R0);
5401     z_agr(Rsrc, Z_R0);                     // restore ptr.
5402   }
5403   bind(AllDone);
5404 
5405   BLOCK_COMMENT("} string_inflate");
5406   return offset() - block_start;
5407 }
5408 
5409 // Inflate byte[] to char[], length known at compile time.
5410 //   Restores: src, dst
5411 //   Kills:    tmp, Z_R0, Z_R1.
5412 // Note:
5413 //   len is signed int. Counts # characters, not bytes.
5414 unsigned int MacroAssembler::string_inflate_const(Register src, Register dst, Register tmp, int len) {
5415   assert_different_registers(Z_R0, Z_R1, src, dst, tmp);
5416 
5417   BLOCK_COMMENT("string_inflate_const {");
5418   int block_start = offset();
5419 
5420   Register   Rix  = tmp;   // loop index
5421   Register   Rsrc = src;   // addr(src array)
5422   Register   Rdst = dst;   // addr(dst array)
5423   Label      ScalarShortcut, AllDone;
5424   int        nprocessed = 0;
5425   int        src_off    = 0;  // compensate for saved (optimized away) ptr advancement.
5426   int        dst_off    = 0;  // compensate for saved (optimized away) ptr advancement.
5427   bool       restore_inputs = false;
5428   bool       workreg_clear  = false;
5429 
5430   if ((len >= 32) && VM_Version::has_VectorFacility()) {
5431     const int  min_vcnt     = 32;          // Minimum #characters required to use vector instructions.
5432                                            // Otherwise just do nothing in vector mode.
5433                                            // Must be multiple of vector register length (16 bytes = 128 bits).
5434     const int  log_min_vcnt = exact_log2(min_vcnt);
5435     const int  iterations   = (len - nprocessed) >> log_min_vcnt;
5436     nprocessed             += iterations << log_min_vcnt;
5437     Label      VectorLoop;
5438 
5439     if (iterations == 1) {
5440       z_vlm(Z_V20, Z_V21, 0+src_off, Rsrc);  // get next 32 characters (single-byte)
5441       z_vuplhb(Z_V22, Z_V20);                // V2 <- (expand) V0(high)
5442       z_vupllb(Z_V23, Z_V20);                // V3 <- (expand) V0(low)
5443       z_vuplhb(Z_V24, Z_V21);                // V4 <- (expand) V1(high)
5444       z_vupllb(Z_V25, Z_V21);                // V5 <- (expand) V1(low)
5445       z_vstm(Z_V22, Z_V25, 0+dst_off, Rdst); // store next 32 bytes
5446 
5447       src_off += min_vcnt;
5448       dst_off += min_vcnt*2;
5449     } else {
5450       restore_inputs = true;
5451 
5452       z_lgfi(Rix, len>>log_min_vcnt);
5453       bind(VectorLoop);
5454         z_vlm(Z_V20, Z_V21, 0, Rsrc);        // get next 32 characters (single-byte)
5455         add2reg(Rsrc, min_vcnt);
5456 
5457         z_vuplhb(Z_V22, Z_V20);              // V2 <- (expand) V0(high)
5458         z_vupllb(Z_V23, Z_V20);              // V3 <- (expand) V0(low)
5459         z_vuplhb(Z_V24, Z_V21);              // V4 <- (expand) V1(high)
5460         z_vupllb(Z_V25, Z_V21);              // V5 <- (expand) V1(low)
5461         z_vstm(Z_V22, Z_V25, 0, Rdst);       // store next 32 bytes
5462         add2reg(Rdst, min_vcnt*2);
5463 
5464         z_brct(Rix, VectorLoop);
5465     }
5466   }
5467 
5468   if (((len-nprocessed) >= 16) && VM_Version::has_VectorFacility()) {
5469     const int  min_vcnt     = 16;          // Minimum #characters required to use vector instructions.
5470                                            // Otherwise just do nothing in vector mode.
5471                                            // Must be multiple of vector register length (16 bytes = 128 bits).
5472     const int  log_min_vcnt = exact_log2(min_vcnt);
5473     const int  iterations   = (len - nprocessed) >> log_min_vcnt;
5474     nprocessed             += iterations << log_min_vcnt;
5475     assert(iterations == 1, "must be!");
5476 
5477     z_vl(Z_V20, 0+src_off, Z_R0, Rsrc);    // get next 16 characters (single-byte)
5478     z_vuplhb(Z_V22, Z_V20);                // V2 <- (expand) V0(high)
5479     z_vupllb(Z_V23, Z_V20);                // V3 <- (expand) V0(low)
5480     z_vstm(Z_V22, Z_V23, 0+dst_off, Rdst); // store next 32 bytes
5481 
5482     src_off += min_vcnt;
5483     dst_off += min_vcnt*2;
5484   }
5485 
5486   if ((len-nprocessed) > 8) {
5487     const int  min_cnt     =  8;           // Minimum #characters required to use unrolled scalar loop.
5488                                            // Otherwise just do nothing in unrolled scalar mode.
5489                                            // Must be multiple of 8.
5490     const int  log_min_cnt = exact_log2(min_cnt);
5491     const int  iterations  = (len - nprocessed) >> log_min_cnt;
5492     nprocessed     += iterations << log_min_cnt;
5493 
5494     //---<  avoid loop overhead/ptr increment for small # iterations  >---
5495     if (iterations <= 2) {
5496       clear_reg(Z_R0);
5497       clear_reg(Z_R1);
5498       workreg_clear = true;
5499 
5500       z_icmh(Z_R0, 5, 0+src_off, Rsrc);
5501       z_icmh(Z_R1, 5, 4+src_off, Rsrc);
5502       z_icm(Z_R0,  5, 2+src_off, Rsrc);
5503       z_icm(Z_R1,  5, 6+src_off, Rsrc);
5504       z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst);
5505 
5506       src_off += min_cnt;
5507       dst_off += min_cnt*2;
5508     }
5509 
5510     if (iterations == 2) {
5511       z_icmh(Z_R0, 5, 0+src_off, Rsrc);
5512       z_icmh(Z_R1, 5, 4+src_off, Rsrc);
5513       z_icm(Z_R0,  5, 2+src_off, Rsrc);
5514       z_icm(Z_R1,  5, 6+src_off, Rsrc);
5515       z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst);
5516 
5517       src_off += min_cnt;
5518       dst_off += min_cnt*2;
5519     }
5520 
5521     if (iterations > 2) {
5522       Label      UnrolledLoop;
5523       restore_inputs  = true;
5524 
5525       clear_reg(Z_R0);
5526       clear_reg(Z_R1);
5527       workreg_clear = true;
5528 
5529       z_lgfi(Rix, iterations);
5530       bind(UnrolledLoop);
5531         z_icmh(Z_R0, 5, 0, Rsrc);
5532         z_icmh(Z_R1, 5, 4, Rsrc);
5533         z_icm(Z_R0,  5, 2, Rsrc);
5534         z_icm(Z_R1,  5, 6, Rsrc);
5535         add2reg(Rsrc, min_cnt);
5536 
5537         z_stmg(Z_R0, Z_R1, 0, Rdst);
5538         add2reg(Rdst, min_cnt*2);
5539 
5540         z_brct(Rix, UnrolledLoop);
5541     }
5542   }
5543 
5544   if ((len-nprocessed) > 0) {
5545     switch (len-nprocessed) {
5546       case 8:
5547         if (!workreg_clear) {
5548           clear_reg(Z_R0);
5549           clear_reg(Z_R1);
5550         }
5551         z_icmh(Z_R0, 5, 0+src_off, Rsrc);
5552         z_icmh(Z_R1, 5, 4+src_off, Rsrc);
5553         z_icm(Z_R0,  5, 2+src_off, Rsrc);
5554         z_icm(Z_R1,  5, 6+src_off, Rsrc);
5555         z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst);
5556         break;
5557       case 7:
5558         if (!workreg_clear) {
5559           clear_reg(Z_R0);
5560           clear_reg(Z_R1);
5561         }
5562         clear_reg(Rix);
5563         z_icm(Z_R0,  5, 0+src_off, Rsrc);
5564         z_icm(Z_R1,  5, 2+src_off, Rsrc);
5565         z_icm(Rix,   5, 4+src_off, Rsrc);
5566         z_stm(Z_R0,  Z_R1, 0+dst_off, Rdst);
5567         z_llc(Z_R0,  6+src_off, Z_R0, Rsrc);
5568         z_st(Rix,    8+dst_off, Z_R0, Rdst);
5569         z_sth(Z_R0, 12+dst_off, Z_R0, Rdst);
5570         break;
5571       case 6:
5572         if (!workreg_clear) {
5573           clear_reg(Z_R0);
5574           clear_reg(Z_R1);
5575         }
5576         clear_reg(Rix);
5577         z_icm(Z_R0, 5, 0+src_off, Rsrc);
5578         z_icm(Z_R1, 5, 2+src_off, Rsrc);
5579         z_icm(Rix,  5, 4+src_off, Rsrc);
5580         z_stm(Z_R0, Z_R1, 0+dst_off, Rdst);
5581         z_st(Rix,   8+dst_off, Z_R0, Rdst);
5582         break;
5583       case 5:
5584         if (!workreg_clear) {
5585           clear_reg(Z_R0);
5586           clear_reg(Z_R1);
5587         }
5588         z_icm(Z_R0, 5, 0+src_off, Rsrc);
5589         z_icm(Z_R1, 5, 2+src_off, Rsrc);
5590         z_llc(Rix,  4+src_off, Z_R0, Rsrc);
5591         z_stm(Z_R0, Z_R1, 0+dst_off, Rdst);
5592         z_sth(Rix,  8+dst_off, Z_R0, Rdst);
5593         break;
5594       case 4:
5595         if (!workreg_clear) {
5596           clear_reg(Z_R0);
5597           clear_reg(Z_R1);
5598         }
5599         z_icm(Z_R0, 5, 0+src_off, Rsrc);
5600         z_icm(Z_R1, 5, 2+src_off, Rsrc);
5601         z_stm(Z_R0, Z_R1, 0+dst_off, Rdst);
5602         break;
5603       case 3:
5604         if (!workreg_clear) {
5605           clear_reg(Z_R0);
5606         }
5607         z_llc(Z_R1, 2+src_off, Z_R0, Rsrc);
5608         z_icm(Z_R0, 5, 0+src_off, Rsrc);
5609         z_sth(Z_R1, 4+dst_off, Z_R0, Rdst);
5610         z_st(Z_R0,  0+dst_off, Rdst);
5611         break;
5612       case 2:
5613         z_llc(Z_R0, 0+src_off, Z_R0, Rsrc);
5614         z_llc(Z_R1, 1+src_off, Z_R0, Rsrc);
5615         z_sth(Z_R0, 0+dst_off, Z_R0, Rdst);
5616         z_sth(Z_R1, 2+dst_off, Z_R0, Rdst);
5617         break;
5618       case 1:
5619         z_llc(Z_R0, 0+src_off, Z_R0, Rsrc);
5620         z_sth(Z_R0, 0+dst_off, Z_R0, Rdst);
5621         break;
5622       default:
5623         guarantee(false, "Impossible");
5624         break;
5625     }
5626     src_off   +=  len-nprocessed;
5627     dst_off   += (len-nprocessed)*2;
5628     nprocessed = len;
5629   }
5630 
5631   //---< restore modified input registers  >---
5632   if ((nprocessed > 0) && restore_inputs) {
5633     z_agfi(Rsrc, -(nprocessed-src_off));
5634     if (nprocessed < 1000000000) { // avoid int overflow
5635       z_agfi(Rdst, -(nprocessed*2-dst_off));
5636     } else {
5637       z_agfi(Rdst, -(nprocessed-dst_off));
5638       z_agfi(Rdst, -nprocessed);
5639     }
5640   }
5641 
5642   BLOCK_COMMENT("} string_inflate_const");
5643   return offset() - block_start;
5644 }
5645 
5646 // Kills src.
5647 unsigned int MacroAssembler::has_negatives(Register result, Register src, Register cnt,
5648                                            Register odd_reg, Register even_reg, Register tmp) {
5649   int block_start = offset();
5650   Label Lloop1, Lloop2, Lslow, Lnotfound, Ldone;
5651   const Register addr = src, mask = tmp;
5652 
5653   BLOCK_COMMENT("has_negatives {");
5654 
5655   z_llgfr(Z_R1, cnt);      // Number of bytes to read. (Must be a positive simm32.)
5656   z_llilf(mask, 0x80808080);
5657   z_lhi(result, 1);        // Assume true.
5658   // Last possible addr for fast loop.
5659   z_lay(odd_reg, -16, Z_R1, src);
5660   z_chi(cnt, 16);
5661   z_brl(Lslow);
5662 
5663   // ind1: index, even_reg: index increment, odd_reg: index limit
5664   z_iihf(mask, 0x80808080);
5665   z_lghi(even_reg, 16);
5666 
5667   bind(Lloop1); // 16 bytes per iteration.
5668   z_lg(Z_R0, Address(addr));
5669   z_lg(Z_R1, Address(addr, 8));
5670   z_ogr(Z_R0, Z_R1);
5671   z_ngr(Z_R0, mask);
5672   z_brne(Ldone);           // If found return 1.
5673   z_brxlg(addr, even_reg, Lloop1);
5674 
5675   bind(Lslow);
5676   z_aghi(odd_reg, 16-1);   // Last possible addr for slow loop.
5677   z_lghi(even_reg, 1);
5678   z_cgr(addr, odd_reg);
5679   z_brh(Lnotfound);
5680 
5681   bind(Lloop2); // 1 byte per iteration.
5682   z_cli(Address(addr), 0x80);
5683   z_brnl(Ldone);           // If found return 1.
5684   z_brxlg(addr, even_reg, Lloop2);
5685 
5686   bind(Lnotfound);
5687   z_lhi(result, 0);
5688 
5689   bind(Ldone);
5690 
5691   BLOCK_COMMENT("} has_negatives");
5692 
5693   return offset() - block_start;
5694 }
5695 
5696 // kill: cnt1, cnt2, odd_reg, even_reg; early clobber: result
5697 unsigned int MacroAssembler::string_compare(Register str1, Register str2,
5698                                             Register cnt1, Register cnt2,
5699                                             Register odd_reg, Register even_reg, Register result, int ae) {
5700   int block_start = offset();
5701 
5702   assert_different_registers(str1, cnt1, cnt2, odd_reg, even_reg, result);
5703   assert_different_registers(str2, cnt1, cnt2, odd_reg, even_reg, result);
5704 
5705   // If strings are equal up to min length, return the length difference.
5706   const Register diff = result, // Pre-set result with length difference.
5707                  min  = cnt1,   // min number of bytes
5708                  tmp  = cnt2;
5709 
5710   // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a)
5711   // we interchange str1 and str2 in the UL case and negate the result.
5712   // Like this, str1 is always latin1 encoded, except for the UU case.
5713   // In addition, we need 0 (or sign which is 0) extend when using 64 bit register.
5714   const bool used_as_LU = (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL);
5715 
5716   BLOCK_COMMENT("string_compare {");
5717 
5718   if (used_as_LU) {
5719     z_srl(cnt2, 1);
5720   }
5721 
5722   // See if the lengths are different, and calculate min in cnt1.
5723   // Save diff in case we need it for a tie-breaker.
5724 
5725   // diff = cnt1 - cnt2
5726   if (VM_Version::has_DistinctOpnds()) {
5727     z_srk(diff, cnt1, cnt2);
5728   } else {
5729     z_lr(diff, cnt1);
5730     z_sr(diff, cnt2);
5731   }
5732   if (str1 != str2) {
5733     if (VM_Version::has_LoadStoreConditional()) {
5734       z_locr(min, cnt2, Assembler::bcondHigh);
5735     } else {
5736       Label Lskip;
5737       z_brl(Lskip);    // min ok if cnt1 < cnt2
5738       z_lr(min, cnt2); // min = cnt2
5739       bind(Lskip);
5740     }
5741   }
5742 
5743   if (ae == StrIntrinsicNode::UU) {
5744     z_sra(diff, 1);
5745   }
5746   if (str1 != str2) {
5747     Label Ldone;
5748     if (used_as_LU) {
5749       // Loop which searches the first difference character by character.
5750       Label Lloop;
5751       const Register ind1 = Z_R1,
5752                      ind2 = min;
5753       int stride1 = 1, stride2 = 2; // See comment above.
5754 
5755       // ind1: index, even_reg: index increment, odd_reg: index limit
5756       z_llilf(ind1, (unsigned int)(-stride1));
5757       z_lhi(even_reg, stride1);
5758       add2reg(odd_reg, -stride1, min);
5759       clear_reg(ind2); // kills min
5760 
5761       bind(Lloop);
5762       z_brxh(ind1, even_reg, Ldone);
5763       z_llc(tmp, Address(str1, ind1));
5764       z_llh(Z_R0, Address(str2, ind2));
5765       z_ahi(ind2, stride2);
5766       z_sr(tmp, Z_R0);
5767       z_bre(Lloop);
5768 
5769       z_lr(result, tmp);
5770 
5771     } else {
5772       // Use clcle in fast loop (only for same encoding).
5773       z_lgr(Z_R0, str1);
5774       z_lgr(even_reg, str2);
5775       z_llgfr(Z_R1, min);
5776       z_llgfr(odd_reg, min);
5777 
5778       if (ae == StrIntrinsicNode::LL) {
5779         compare_long_ext(Z_R0, even_reg, 0);
5780       } else {
5781         compare_long_uni(Z_R0, even_reg, 0);
5782       }
5783       z_bre(Ldone);
5784       z_lgr(Z_R1, Z_R0);
5785       if (ae == StrIntrinsicNode::LL) {
5786         z_llc(Z_R0, Address(even_reg));
5787         z_llc(result, Address(Z_R1));
5788       } else {
5789         z_llh(Z_R0, Address(even_reg));
5790         z_llh(result, Address(Z_R1));
5791       }
5792       z_sr(result, Z_R0);
5793     }
5794 
5795     // Otherwise, return the difference between the first mismatched chars.
5796     bind(Ldone);
5797   }
5798 
5799   if (ae == StrIntrinsicNode::UL) {
5800     z_lcr(result, result); // Negate result (see note above).
5801   }
5802 
5803   BLOCK_COMMENT("} string_compare");
5804 
5805   return offset() - block_start;
5806 }
5807 
5808 unsigned int MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, Register limit,
5809                                           Register odd_reg, Register even_reg, Register result, bool is_byte) {
5810   int block_start = offset();
5811 
5812   BLOCK_COMMENT("array_equals {");
5813 
5814   assert_different_registers(ary1, limit, odd_reg, even_reg);
5815   assert_different_registers(ary2, limit, odd_reg, even_reg);
5816 
5817   Label Ldone, Ldone_true, Ldone_false, Lclcle, CLC_template;
5818   int base_offset = 0;
5819 
5820   if (ary1 != ary2) {
5821     if (is_array_equ) {
5822       base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR);
5823 
5824       // Return true if the same array.
5825       compareU64_and_branch(ary1, ary2, Assembler::bcondEqual, Ldone_true);
5826 
5827       // Return false if one of them is NULL.
5828       compareU64_and_branch(ary1, (intptr_t)0, Assembler::bcondEqual, Ldone_false);
5829       compareU64_and_branch(ary2, (intptr_t)0, Assembler::bcondEqual, Ldone_false);
5830 
5831       // Load the lengths of arrays.
5832       z_llgf(odd_reg, Address(ary1, arrayOopDesc::length_offset_in_bytes()));
5833 
5834       // Return false if the two arrays are not equal length.
5835       z_c(odd_reg, Address(ary2, arrayOopDesc::length_offset_in_bytes()));
5836       z_brne(Ldone_false);
5837 
5838       // string len in bytes (right operand)
5839       if (!is_byte) {
5840         z_chi(odd_reg, 128);
5841         z_sll(odd_reg, 1); // preserves flags
5842         z_brh(Lclcle);
5843       } else {
5844         compareU32_and_branch(odd_reg, (intptr_t)256, Assembler::bcondHigh, Lclcle);
5845       }
5846     } else {
5847       z_llgfr(odd_reg, limit); // Need to zero-extend prior to using the value.
5848       compareU32_and_branch(limit, (intptr_t)256, Assembler::bcondHigh, Lclcle);
5849     }
5850 
5851 
5852     // Use clc instruction for up to 256 bytes.
5853     {
5854       Register str1_reg = ary1,
5855           str2_reg = ary2;
5856       if (is_array_equ) {
5857         str1_reg = Z_R1;
5858         str2_reg = even_reg;
5859         add2reg(str1_reg, base_offset, ary1); // string addr (left operand)
5860         add2reg(str2_reg, base_offset, ary2); // string addr (right operand)
5861       }
5862       z_ahi(odd_reg, -1); // Clc uses decremented limit. Also compare result to 0.
5863       z_brl(Ldone_true);
5864       // Note: We could jump to the template if equal.
5865 
5866       assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware");
5867       z_exrl(odd_reg, CLC_template);
5868       z_bre(Ldone_true);
5869       // fall through
5870 
5871       bind(Ldone_false);
5872       clear_reg(result);
5873       z_bru(Ldone);
5874 
5875       bind(CLC_template);
5876       z_clc(0, 0, str1_reg, 0, str2_reg);
5877     }
5878 
5879     // Use clcle instruction.
5880     {
5881       bind(Lclcle);
5882       add2reg(even_reg, base_offset, ary2); // string addr (right operand)
5883       add2reg(Z_R0, base_offset, ary1);     // string addr (left operand)
5884 
5885       z_lgr(Z_R1, odd_reg); // string len in bytes (left operand)
5886       if (is_byte) {
5887         compare_long_ext(Z_R0, even_reg, 0);
5888       } else {
5889         compare_long_uni(Z_R0, even_reg, 0);
5890       }
5891       z_lghi(result, 0); // Preserve flags.
5892       z_brne(Ldone);
5893     }
5894   }
5895   // fall through
5896 
5897   bind(Ldone_true);
5898   z_lghi(result, 1); // All characters are equal.
5899   bind(Ldone);
5900 
5901   BLOCK_COMMENT("} array_equals");
5902 
5903   return offset() - block_start;
5904 }
5905 
5906 // kill: haycnt, needlecnt, odd_reg, even_reg; early clobber: result
5907 unsigned int MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt,
5908                                             Register needle, Register needlecnt, int needlecntval,
5909                                             Register odd_reg, Register even_reg, int ae) {
5910   int block_start = offset();
5911 
5912   // Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite!
5913   assert(ae != StrIntrinsicNode::LU, "Invalid encoding");
5914   const int h_csize = (ae == StrIntrinsicNode::LL) ? 1 : 2;
5915   const int n_csize = (ae == StrIntrinsicNode::UU) ? 2 : 1;
5916   Label L_needle1, L_Found, L_NotFound;
5917 
5918   BLOCK_COMMENT("string_indexof {");
5919 
5920   if (needle == haystack) {
5921     z_lhi(result, 0);
5922   } else {
5923 
5924   // Load first character of needle (R0 used by search_string instructions).
5925   if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); }
5926 
5927   // Compute last haystack addr to use if no match gets found.
5928   if (needlecnt != noreg) { // variable needlecnt
5929     z_ahi(needlecnt, -1); // Remaining characters after first one.
5930     z_sr(haycnt, needlecnt); // Compute index succeeding last element to compare.
5931     if (n_csize == 2) { z_sll(needlecnt, 1); } // In bytes.
5932   } else { // constant needlecnt
5933     assert((needlecntval & 0x7fff) == needlecntval, "must be positive simm16 immediate");
5934     // Compute index succeeding last element to compare.
5935     if (needlecntval != 1) { z_ahi(haycnt, 1 - needlecntval); }
5936   }
5937 
5938   z_llgfr(haycnt, haycnt); // Clear high half.
5939   z_lgr(result, haystack); // Final result will be computed from needle start pointer.
5940   if (h_csize == 2) { z_sll(haycnt, 1); } // Scale to number of bytes.
5941   z_agr(haycnt, haystack); // Point to address succeeding last element (haystack+scale*(haycnt-needlecnt+1)).
5942 
5943   if (h_csize != n_csize) {
5944     assert(ae == StrIntrinsicNode::UL, "Invalid encoding");
5945 
5946     if (needlecnt != noreg || needlecntval != 1) {
5947       if (needlecnt != noreg) {
5948         compare32_and_branch(needlecnt, (intptr_t)0, Assembler::bcondEqual, L_needle1);
5949       }
5950 
5951       // Main Loop: UL version (now we have at least 2 characters).
5952       Label L_OuterLoop, L_InnerLoop, L_Skip;
5953       bind(L_OuterLoop); // Search for 1st 2 characters.
5954       z_lgr(Z_R1, haycnt);
5955       MacroAssembler::search_string_uni(Z_R1, result);
5956       z_brc(Assembler::bcondNotFound, L_NotFound);
5957       z_lgr(result, Z_R1);
5958 
5959       z_lghi(Z_R1, n_csize);
5960       z_lghi(even_reg, h_csize);
5961       bind(L_InnerLoop);
5962       z_llgc(odd_reg, Address(needle, Z_R1));
5963       z_ch(odd_reg, Address(result, even_reg));
5964       z_brne(L_Skip);
5965       if (needlecnt != noreg) { z_cr(Z_R1, needlecnt); } else { z_chi(Z_R1, needlecntval - 1); }
5966       z_brnl(L_Found);
5967       z_aghi(Z_R1, n_csize);
5968       z_aghi(even_reg, h_csize);
5969       z_bru(L_InnerLoop);
5970 
5971       bind(L_Skip);
5972       z_aghi(result, h_csize); // This is the new address we want to use for comparing.
5973       z_bru(L_OuterLoop);
5974     }
5975 
5976   } else {
5977     const intptr_t needle_bytes = (n_csize == 2) ? ((needlecntval - 1) << 1) : (needlecntval - 1);
5978     Label L_clcle;
5979 
5980     if (needlecnt != noreg || (needlecntval != 1 && needle_bytes <= 256)) {
5981       if (needlecnt != noreg) {
5982         compare32_and_branch(needlecnt, 256, Assembler::bcondHigh, L_clcle);
5983         z_ahi(needlecnt, -1); // remaining bytes -1 (for CLC)
5984         z_brl(L_needle1);
5985       }
5986 
5987       // Main Loop: clc version (now we have at least 2 characters).
5988       Label L_OuterLoop, CLC_template;
5989       bind(L_OuterLoop); // Search for 1st 2 characters.
5990       z_lgr(Z_R1, haycnt);
5991       if (h_csize == 1) {
5992         MacroAssembler::search_string(Z_R1, result);
5993       } else {
5994         MacroAssembler::search_string_uni(Z_R1, result);
5995       }
5996       z_brc(Assembler::bcondNotFound, L_NotFound);
5997       z_lgr(result, Z_R1);
5998 
5999       if (needlecnt != noreg) {
6000         assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware");
6001         z_exrl(needlecnt, CLC_template);
6002       } else {
6003         z_clc(h_csize, needle_bytes -1, Z_R1, n_csize, needle);
6004       }
6005       z_bre(L_Found);
6006       z_aghi(result, h_csize); // This is the new address we want to use for comparing.
6007       z_bru(L_OuterLoop);
6008 
6009       if (needlecnt != noreg) {
6010         bind(CLC_template);
6011         z_clc(h_csize, 0, Z_R1, n_csize, needle);
6012       }
6013     }
6014 
6015     if (needlecnt != noreg || needle_bytes > 256) {
6016       bind(L_clcle);
6017 
6018       // Main Loop: clcle version (now we have at least 256 bytes).
6019       Label L_OuterLoop, CLC_template;
6020       bind(L_OuterLoop); // Search for 1st 2 characters.
6021       z_lgr(Z_R1, haycnt);
6022       if (h_csize == 1) {
6023         MacroAssembler::search_string(Z_R1, result);
6024       } else {
6025         MacroAssembler::search_string_uni(Z_R1, result);
6026       }
6027       z_brc(Assembler::bcondNotFound, L_NotFound);
6028 
6029       add2reg(Z_R0, n_csize, needle);
6030       add2reg(even_reg, h_csize, Z_R1);
6031       z_lgr(result, Z_R1);
6032       if (needlecnt != noreg) {
6033         z_llgfr(Z_R1, needlecnt); // needle len in bytes (left operand)
6034         z_llgfr(odd_reg, needlecnt);
6035       } else {
6036         load_const_optimized(Z_R1, needle_bytes);
6037         if (Immediate::is_simm16(needle_bytes)) { z_lghi(odd_reg, needle_bytes); } else { z_lgr(odd_reg, Z_R1); }
6038       }
6039       if (h_csize == 1) {
6040         compare_long_ext(Z_R0, even_reg, 0);
6041       } else {
6042         compare_long_uni(Z_R0, even_reg, 0);
6043       }
6044       z_bre(L_Found);
6045 
6046       if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); } // Reload.
6047       z_aghi(result, h_csize); // This is the new address we want to use for comparing.
6048       z_bru(L_OuterLoop);
6049     }
6050   }
6051 
6052   if (needlecnt != noreg || needlecntval == 1) {
6053     bind(L_needle1);
6054 
6055     // Single needle character version.
6056     if (h_csize == 1) {
6057       MacroAssembler::search_string(haycnt, result);
6058     } else {
6059       MacroAssembler::search_string_uni(haycnt, result);
6060     }
6061     z_lgr(result, haycnt);
6062     z_brc(Assembler::bcondFound, L_Found);
6063   }
6064 
6065   bind(L_NotFound);
6066   add2reg(result, -1, haystack); // Return -1.
6067 
6068   bind(L_Found); // Return index (or -1 in fallthrough case).
6069   z_sgr(result, haystack);
6070   if (h_csize == 2) { z_srag(result, result, exact_log2(sizeof(jchar))); }
6071   }
6072   BLOCK_COMMENT("} string_indexof");
6073 
6074   return offset() - block_start;
6075 }
6076 
6077 // early clobber: result
6078 unsigned int MacroAssembler::string_indexof_char(Register result, Register haystack, Register haycnt,
6079                                                  Register needle, jchar needleChar, Register odd_reg, Register even_reg, bool is_byte) {
6080   int block_start = offset();
6081 
6082   BLOCK_COMMENT("string_indexof_char {");
6083 
6084   if (needle == haystack) {
6085     z_lhi(result, 0);
6086   } else {
6087 
6088   Label Ldone;
6089 
6090   z_llgfr(odd_reg, haycnt);  // Preset loop ctr/searchrange end.
6091   if (needle == noreg) {
6092     load_const_optimized(Z_R0, (unsigned long)needleChar);
6093   } else {
6094     if (is_byte) {
6095       z_llgcr(Z_R0, needle); // First (and only) needle char.
6096     } else {
6097       z_llghr(Z_R0, needle); // First (and only) needle char.
6098     }
6099   }
6100 
6101   if (!is_byte) {
6102     z_agr(odd_reg, odd_reg); // Calc #bytes to be processed with SRSTU.
6103   }
6104 
6105   z_lgr(even_reg, haystack); // haystack addr
6106   z_agr(odd_reg, haystack);  // First char after range end.
6107   z_lghi(result, -1);
6108 
6109   if (is_byte) {
6110     MacroAssembler::search_string(odd_reg, even_reg);
6111   } else {
6112     MacroAssembler::search_string_uni(odd_reg, even_reg);
6113   }
6114   z_brc(Assembler::bcondNotFound, Ldone);
6115   if (is_byte) {
6116     if (VM_Version::has_DistinctOpnds()) {
6117       z_sgrk(result, odd_reg, haystack);
6118     } else {
6119       z_sgr(odd_reg, haystack);
6120       z_lgr(result, odd_reg);
6121     }
6122   } else {
6123     z_slgr(odd_reg, haystack);
6124     z_srlg(result, odd_reg, exact_log2(sizeof(jchar)));
6125   }
6126 
6127   bind(Ldone);
6128   }
6129   BLOCK_COMMENT("} string_indexof_char");
6130 
6131   return offset() - block_start;
6132 }
6133 
6134 
6135 //-------------------------------------------------
6136 //   Constants (scalar and oop) in constant pool
6137 //-------------------------------------------------
6138 
6139 // Add a non-relocated constant to the CP.
6140 int MacroAssembler::store_const_in_toc(AddressLiteral& val) {
6141   long    value  = val.value();
6142   address tocPos = long_constant(value);
6143 
6144   if (tocPos != NULL) {
6145     int tocOffset = (int)(tocPos - code()->consts()->start());
6146     return tocOffset;
6147   }
6148   // Address_constant returned NULL, so no constant entry has been created.
6149   // In that case, we return a "fatal" offset, just in case that subsequently
6150   // generated access code is executed.
6151   return -1;
6152 }
6153 
6154 // Returns the TOC offset where the address is stored.
6155 // Add a relocated constant to the CP.
6156 int MacroAssembler::store_oop_in_toc(AddressLiteral& oop) {
6157   // Use RelocationHolder::none for the constant pool entry.
6158   // Otherwise we will end up with a failing NativeCall::verify(x),
6159   // where x is the address of the constant pool entry.
6160   address tocPos = address_constant((address)oop.value(), RelocationHolder::none);
6161 
6162   if (tocPos != NULL) {
6163     int              tocOffset = (int)(tocPos - code()->consts()->start());
6164     RelocationHolder rsp = oop.rspec();
6165     Relocation      *rel = rsp.reloc();
6166 
6167     // Store toc_offset in relocation, used by call_far_patchable.
6168     if ((relocInfo::relocType)rel->type() == relocInfo::runtime_call_w_cp_type) {
6169       ((runtime_call_w_cp_Relocation *)(rel))->set_constant_pool_offset(tocOffset);
6170     }
6171     // Relocate at the load's pc.
6172     relocate(rsp);
6173 
6174     return tocOffset;
6175   }
6176   // Address_constant returned NULL, so no constant entry has been created
6177   // in that case, we return a "fatal" offset, just in case that subsequently
6178   // generated access code is executed.
6179   return -1;
6180 }
6181 
6182 bool MacroAssembler::load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc) {
6183   int     tocOffset = store_const_in_toc(a);
6184   if (tocOffset == -1) return false;
6185   address tocPos    = tocOffset + code()->consts()->start();
6186   assert((address)code()->consts()->start() != NULL, "Please add CP address");
6187 
6188   load_long_pcrelative(dst, tocPos);
6189   return true;
6190 }
6191 
6192 bool MacroAssembler::load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc) {
6193   int     tocOffset = store_oop_in_toc(a);
6194   if (tocOffset == -1) return false;
6195   address tocPos    = tocOffset + code()->consts()->start();
6196   assert((address)code()->consts()->start() != NULL, "Please add CP address");
6197 
6198   load_addr_pcrelative(dst, tocPos);
6199   return true;
6200 }
6201 
6202 // If the instruction sequence at the given pc is a load_const_from_toc
6203 // sequence, return the value currently stored at the referenced position
6204 // in the TOC.
6205 intptr_t MacroAssembler::get_const_from_toc(address pc) {
6206 
6207   assert(is_load_const_from_toc(pc), "must be load_const_from_pool");
6208 
6209   long    offset  = get_load_const_from_toc_offset(pc);
6210   address dataLoc = NULL;
6211   if (is_load_const_from_toc_pcrelative(pc)) {
6212     dataLoc = pc + offset;
6213   } else {
6214     CodeBlob* cb = CodeCache::find_blob_unsafe(pc);   // Else we get assertion if nmethod is zombie.
6215     assert(cb && cb->is_nmethod(), "sanity");
6216     nmethod* nm = (nmethod*)cb;
6217     dataLoc = nm->ctable_begin() + offset;
6218   }
6219   return *(intptr_t *)dataLoc;
6220 }
6221 
6222 // If the instruction sequence at the given pc is a load_const_from_toc
6223 // sequence, copy the passed-in new_data value into the referenced
6224 // position in the TOC.
6225 void MacroAssembler::set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb) {
6226   assert(is_load_const_from_toc(pc), "must be load_const_from_pool");
6227 
6228   long    offset = MacroAssembler::get_load_const_from_toc_offset(pc);
6229   address dataLoc = NULL;
6230   if (is_load_const_from_toc_pcrelative(pc)) {
6231     dataLoc = pc+offset;
6232   } else {
6233     nmethod* nm = CodeCache::find_nmethod(pc);
6234     assert((cb == NULL) || (nm == (nmethod*)cb), "instruction address should be in CodeBlob");
6235     dataLoc = nm->ctable_begin() + offset;
6236   }
6237   if (*(unsigned long *)dataLoc != new_data) { // Prevent cache invalidation: update only if necessary.
6238     *(unsigned long *)dataLoc = new_data;
6239   }
6240 }
6241 
6242 // Dynamic TOC. Getter must only be called if "a" is a load_const_from_toc
6243 // site. Verify by calling is_load_const_from_toc() before!!
6244 // Offset is +/- 2**32 -> use long.
6245 long MacroAssembler::get_load_const_from_toc_offset(address a) {
6246   assert(is_load_const_from_toc_pcrelative(a), "expected pc relative load");
6247   //  expected code sequence:
6248   //    z_lgrl(t, simm32);    len = 6
6249   unsigned long inst;
6250   unsigned int  len = get_instruction(a, &inst);
6251   return get_pcrel_offset(inst);
6252 }
6253 
6254 //**********************************************************************************
6255 //  inspection of generated instruction sequences for a particular pattern
6256 //**********************************************************************************
6257 
6258 bool MacroAssembler::is_load_const_from_toc_pcrelative(address a) {
6259 #ifdef ASSERT
6260   unsigned long inst;
6261   unsigned int  len = get_instruction(a+2, &inst);
6262   if ((len == 6) && is_load_pcrelative_long(a) && is_call_pcrelative_long(inst)) {
6263     const int range = 128;
6264     Assembler::dump_code_range(tty, a, range, "instr(a) == z_lgrl && instr(a+2) == z_brasl");
6265     VM_Version::z_SIGSEGV();
6266   }
6267 #endif
6268   // expected code sequence:
6269   //   z_lgrl(t, relAddr32);    len = 6
6270   //TODO: verify accessed data is in CP, if possible.
6271   return is_load_pcrelative_long(a);  // TODO: might be too general. Currently, only lgrl is used.
6272 }
6273 
6274 bool MacroAssembler::is_load_const_from_toc_call(address a) {
6275   return is_load_const_from_toc(a) && is_call_byregister(a + load_const_from_toc_size());
6276 }
6277 
6278 bool MacroAssembler::is_load_const_call(address a) {
6279   return is_load_const(a) && is_call_byregister(a + load_const_size());
6280 }
6281 
6282 //-------------------------------------------------
6283 //   Emitters for some really CICS instructions
6284 //-------------------------------------------------
6285 
6286 void MacroAssembler::move_long_ext(Register dst, Register src, unsigned int pad) {
6287   assert(dst->encoding()%2==0, "must be an even/odd register pair");
6288   assert(src->encoding()%2==0, "must be an even/odd register pair");
6289   assert(pad<256, "must be a padding BYTE");
6290 
6291   Label retry;
6292   bind(retry);
6293   Assembler::z_mvcle(dst, src, pad);
6294   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6295 }
6296 
6297 void MacroAssembler::compare_long_ext(Register left, Register right, unsigned int pad) {
6298   assert(left->encoding() % 2 == 0, "must be an even/odd register pair");
6299   assert(right->encoding() % 2 == 0, "must be an even/odd register pair");
6300   assert(pad<256, "must be a padding BYTE");
6301 
6302   Label retry;
6303   bind(retry);
6304   Assembler::z_clcle(left, right, pad, Z_R0);
6305   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6306 }
6307 
6308 void MacroAssembler::compare_long_uni(Register left, Register right, unsigned int pad) {
6309   assert(left->encoding() % 2 == 0, "must be an even/odd register pair");
6310   assert(right->encoding() % 2 == 0, "must be an even/odd register pair");
6311   assert(pad<=0xfff, "must be a padding HALFWORD");
6312   assert(VM_Version::has_ETF2(), "instruction must be available");
6313 
6314   Label retry;
6315   bind(retry);
6316   Assembler::z_clclu(left, right, pad, Z_R0);
6317   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6318 }
6319 
6320 void MacroAssembler::search_string(Register end, Register start) {
6321   assert(end->encoding() != 0, "end address must not be in R0");
6322   assert(start->encoding() != 0, "start address must not be in R0");
6323 
6324   Label retry;
6325   bind(retry);
6326   Assembler::z_srst(end, start);
6327   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6328 }
6329 
6330 void MacroAssembler::search_string_uni(Register end, Register start) {
6331   assert(end->encoding() != 0, "end address must not be in R0");
6332   assert(start->encoding() != 0, "start address must not be in R0");
6333   assert(VM_Version::has_ETF3(), "instruction must be available");
6334 
6335   Label retry;
6336   bind(retry);
6337   Assembler::z_srstu(end, start);
6338   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6339 }
6340 
6341 void MacroAssembler::kmac(Register srcBuff) {
6342   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
6343   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
6344 
6345   Label retry;
6346   bind(retry);
6347   Assembler::z_kmac(Z_R0, srcBuff);
6348   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6349 }
6350 
6351 void MacroAssembler::kimd(Register srcBuff) {
6352   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
6353   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
6354 
6355   Label retry;
6356   bind(retry);
6357   Assembler::z_kimd(Z_R0, srcBuff);
6358   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6359 }
6360 
6361 void MacroAssembler::klmd(Register srcBuff) {
6362   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
6363   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
6364 
6365   Label retry;
6366   bind(retry);
6367   Assembler::z_klmd(Z_R0, srcBuff);
6368   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6369 }
6370 
6371 void MacroAssembler::km(Register dstBuff, Register srcBuff) {
6372   // DstBuff and srcBuff are allowed to be the same register (encryption in-place).
6373   // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.
6374   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
6375   assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");
6376   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
6377 
6378   Label retry;
6379   bind(retry);
6380   Assembler::z_km(dstBuff, srcBuff);
6381   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6382 }
6383 
6384 void MacroAssembler::kmc(Register dstBuff, Register srcBuff) {
6385   // DstBuff and srcBuff are allowed to be the same register (encryption in-place).
6386   // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.
6387   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
6388   assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");
6389   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
6390 
6391   Label retry;
6392   bind(retry);
6393   Assembler::z_kmc(dstBuff, srcBuff);
6394   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6395 }
6396 
6397 void MacroAssembler::cksm(Register crcBuff, Register srcBuff) {
6398   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
6399 
6400   Label retry;
6401   bind(retry);
6402   Assembler::z_cksm(crcBuff, srcBuff);
6403   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6404 }
6405 
6406 void MacroAssembler::translate_oo(Register r1, Register r2, uint m3) {
6407   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
6408   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
6409 
6410   Label retry;
6411   bind(retry);
6412   Assembler::z_troo(r1, r2, m3);
6413   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6414 }
6415 
6416 void MacroAssembler::translate_ot(Register r1, Register r2, uint m3) {
6417   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
6418   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
6419 
6420   Label retry;
6421   bind(retry);
6422   Assembler::z_trot(r1, r2, m3);
6423   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6424 }
6425 
6426 void MacroAssembler::translate_to(Register r1, Register r2, uint m3) {
6427   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
6428   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
6429 
6430   Label retry;
6431   bind(retry);
6432   Assembler::z_trto(r1, r2, m3);
6433   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6434 }
6435 
6436 void MacroAssembler::translate_tt(Register r1, Register r2, uint m3) {
6437   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
6438   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
6439 
6440   Label retry;
6441   bind(retry);
6442   Assembler::z_trtt(r1, r2, m3);
6443   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6444 }
6445 
6446 void MacroAssembler::generate_safepoint_check(Label& slow_path, Register scratch, bool may_relocate) {
6447   if (scratch == noreg) scratch = Z_R1;
6448   address Astate = SafepointSynchronize::address_of_state();
6449   BLOCK_COMMENT("safepoint check:");
6450 
6451   if (may_relocate) {
6452     ptrdiff_t total_distance = Astate - this->pc();
6453     if (RelAddr::is_in_range_of_RelAddr32(total_distance)) {
6454       RelocationHolder rspec = external_word_Relocation::spec(Astate);
6455       (this)->relocate(rspec, relocInfo::pcrel_addr_format);
6456       load_absolute_address(scratch, Astate);
6457     } else {
6458       load_const_optimized(scratch, Astate);
6459     }
6460   } else {
6461     load_absolute_address(scratch, Astate);
6462   }
6463   z_cli(/*SafepointSynchronize::sz_state()*/4-1, scratch, SafepointSynchronize::_not_synchronized);
6464   z_brne(slow_path);
6465 }
6466 
6467 
6468 void MacroAssembler::generate_type_profiling(const Register Rdata,
6469                                              const Register Rreceiver_klass,
6470                                              const Register Rwanted_receiver_klass,
6471                                              const Register Rmatching_row,
6472                                              bool is_virtual_call) {
6473   const int row_size = in_bytes(ReceiverTypeData::receiver_offset(1)) -
6474                        in_bytes(ReceiverTypeData::receiver_offset(0));
6475   const int num_rows = ReceiverTypeData::row_limit();
6476   NearLabel found_free_row;
6477   NearLabel do_increment;
6478   NearLabel found_no_slot;
6479 
6480   BLOCK_COMMENT("type profiling {");
6481 
6482   // search for:
6483   //    a) The type given in Rwanted_receiver_klass.
6484   //    b) The *first* empty row.
6485 
6486   // First search for a) only, just running over b) with no regard.
6487   // This is possible because
6488   //    wanted_receiver_class == receiver_class  &&  wanted_receiver_class == 0
6489   // is never true (receiver_class can't be zero).
6490   for (int row_num = 0; row_num < num_rows; row_num++) {
6491     // Row_offset should be a well-behaved positive number. The generated code relies
6492     // on that wrt constant code size. Add2reg can handle all row_offset values, but
6493     // will have to vary generated code size.
6494     int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num));
6495     assert(Displacement::is_shortDisp(row_offset), "Limitation of generated code");
6496 
6497     // Is Rwanted_receiver_klass in this row?
6498     if (VM_Version::has_CompareBranch()) {
6499       z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata);
6500       // Rmatching_row = Rdata + row_offset;
6501       add2reg(Rmatching_row, row_offset, Rdata);
6502       // if (*row_recv == (intptr_t) receiver_klass) goto fill_existing_slot;
6503       compare64_and_branch(Rwanted_receiver_klass, Rreceiver_klass, Assembler::bcondEqual, do_increment);
6504     } else {
6505       add2reg(Rmatching_row, row_offset, Rdata);
6506       z_cg(Rreceiver_klass, row_offset, Z_R0, Rdata);
6507       z_bre(do_increment);
6508     }
6509   }
6510 
6511   // Now that we did not find a match, let's search for b).
6512 
6513   // We could save the first calculation of Rmatching_row if we woud search for a) in reverse order.
6514   // We would then end up here with Rmatching_row containing the value for row_num == 0.
6515   // We would not see much benefit, if any at all, because the CPU can schedule
6516   // two instructions together with a branch anyway.
6517   for (int row_num = 0; row_num < num_rows; row_num++) {
6518     int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num));
6519 
6520     // Has this row a zero receiver_klass, i.e. is it empty?
6521     if (VM_Version::has_CompareBranch()) {
6522       z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata);
6523       // Rmatching_row = Rdata + row_offset
6524       add2reg(Rmatching_row, row_offset, Rdata);
6525       // if (*row_recv == (intptr_t) 0) goto found_free_row
6526       compare64_and_branch(Rwanted_receiver_klass, (intptr_t)0, Assembler::bcondEqual, found_free_row);
6527     } else {
6528       add2reg(Rmatching_row, row_offset, Rdata);
6529       load_and_test_long(Rwanted_receiver_klass, Address(Rdata, row_offset));
6530       z_bre(found_free_row);  // zero -> Found a free row.
6531     }
6532   }
6533 
6534   // No match, no empty row found.
6535   // Increment total counter to indicate polymorphic case.
6536   if (is_virtual_call) {
6537     add2mem_64(Address(Rdata, CounterData::count_offset()), 1, Rmatching_row);
6538   }
6539   z_bru(found_no_slot);
6540 
6541   // Here we found an empty row, but we have not found Rwanted_receiver_klass.
6542   // Rmatching_row holds the address to the first empty row.
6543   bind(found_free_row);
6544   // Store receiver_klass into empty slot.
6545   z_stg(Rreceiver_klass, 0, Z_R0, Rmatching_row);
6546 
6547   // Increment the counter of Rmatching_row.
6548   bind(do_increment);
6549   ByteSize counter_offset = ReceiverTypeData::receiver_count_offset(0) - ReceiverTypeData::receiver_offset(0);
6550   add2mem_64(Address(Rmatching_row, counter_offset), 1, Rdata);
6551 
6552   bind(found_no_slot);
6553 
6554   BLOCK_COMMENT("} type profiling");
6555 }
6556 
6557 //---------------------------------------
6558 // Helpers for Intrinsic Emitters
6559 //---------------------------------------
6560 
6561 /**
6562  * uint32_t crc;
6563  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
6564  */
6565 void MacroAssembler::fold_byte_crc32(Register crc, Register val, Register table, Register tmp) {
6566   assert_different_registers(crc, table, tmp);
6567   assert_different_registers(val, table);
6568   if (crc == val) {      // Must rotate first to use the unmodified value.
6569     rotate_then_insert(tmp, val, 56-2, 63-2, 2, true);  // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
6570     z_srl(crc, 8);       // Unsigned shift, clear leftmost 8 bits.
6571   } else {
6572     z_srl(crc, 8);       // Unsigned shift, clear leftmost 8 bits.
6573     rotate_then_insert(tmp, val, 56-2, 63-2, 2, true);  // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
6574   }
6575   z_x(crc, Address(table, tmp, 0));
6576 }
6577 
6578 /**
6579  * uint32_t crc;
6580  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
6581  */
6582 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
6583   fold_byte_crc32(crc, crc, table, tmp);
6584 }
6585 
6586 /**
6587  * Emits code to update CRC-32 with a byte value according to constants in table.
6588  *
6589  * @param [in,out]crc Register containing the crc.
6590  * @param [in]val     Register containing the byte to fold into the CRC.
6591  * @param [in]table   Register containing the table of crc constants.
6592  *
6593  * uint32_t crc;
6594  * val = crc_table[(val ^ crc) & 0xFF];
6595  * crc = val ^ (crc >> 8);
6596  */
6597 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
6598   z_xr(val, crc);
6599   fold_byte_crc32(crc, val, table, val);
6600 }
6601 
6602 
6603 /**
6604  * @param crc   register containing existing CRC (32-bit)
6605  * @param buf   register pointing to input byte buffer (byte*)
6606  * @param len   register containing number of bytes
6607  * @param table register pointing to CRC table
6608  */
6609 void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, Register data) {
6610   assert_different_registers(crc, buf, len, table, data);
6611 
6612   Label L_mainLoop, L_done;
6613   const int mainLoop_stepping = 1;
6614 
6615   // Process all bytes in a single-byte loop.
6616   z_ltr(len, len);
6617   z_brnh(L_done);
6618 
6619   bind(L_mainLoop);
6620     z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
6621     add2reg(buf, mainLoop_stepping);        // Advance buffer position.
6622     update_byte_crc32(crc, data, table);
6623     z_brct(len, L_mainLoop);                // Iterate.
6624 
6625   bind(L_done);
6626 }
6627 
6628 /**
6629  * Emits code to update CRC-32 with a 4-byte value according to constants in table.
6630  * Implementation according to jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.c.
6631  *
6632  */
6633 void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
6634                                         Register t0,  Register t1,  Register t2,    Register t3) {
6635   // This is what we implement (the DOBIG4 part):
6636   //
6637   // #define DOBIG4 c ^= *++buf4; \
6638   //         c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \
6639   //             crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]
6640   // #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4
6641   // Pre-calculate (constant) column offsets, use columns 4..7 for big-endian.
6642   const int ix0 = 4*(4*CRC32_COLUMN_SIZE);
6643   const int ix1 = 5*(4*CRC32_COLUMN_SIZE);
6644   const int ix2 = 6*(4*CRC32_COLUMN_SIZE);
6645   const int ix3 = 7*(4*CRC32_COLUMN_SIZE);
6646 
6647   // XOR crc with next four bytes of buffer.
6648   lgr_if_needed(t0, crc);
6649   z_x(t0, Address(buf, bufDisp));
6650   if (bufInc != 0) {
6651     add2reg(buf, bufInc);
6652   }
6653 
6654   // Chop crc into 4 single-byte pieces, shifted left 2 bits, to form the table indices.
6655   rotate_then_insert(t3, t0, 56-2, 63-2, 2,    true);  // ((c >>  0) & 0xff) << 2
6656   rotate_then_insert(t2, t0, 56-2, 63-2, 2-8,  true);  // ((c >>  8) & 0xff) << 2
6657   rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true);  // ((c >> 16) & 0xff) << 2
6658   rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true);  // ((c >> 24) & 0xff) << 2
6659 
6660   // XOR indexed table values to calculate updated crc.
6661   z_ly(t2, Address(table, t2, (intptr_t)ix1));
6662   z_ly(t0, Address(table, t0, (intptr_t)ix3));
6663   z_xy(t2, Address(table, t3, (intptr_t)ix0));
6664   z_xy(t0, Address(table, t1, (intptr_t)ix2));
6665   z_xr(t0, t2);           // Now t0 contains the updated CRC value.
6666   lgr_if_needed(crc, t0);
6667 }
6668 
6669 /**
6670  * @param crc   register containing existing CRC (32-bit)
6671  * @param buf   register pointing to input byte buffer (byte*)
6672  * @param len   register containing number of bytes
6673  * @param table register pointing to CRC table
6674  *
6675  * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
6676  */
6677 void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
6678                                         Register t0,  Register t1,  Register t2,  Register t3,
6679                                         bool invertCRC) {
6680   assert_different_registers(crc, buf, len, table);
6681 
6682   Label L_mainLoop, L_tail;
6683   Register  data = t0;
6684   Register  ctr  = Z_R0;
6685   const int mainLoop_stepping = 8;
6686   const int tailLoop_stepping = 1;
6687   const int log_stepping      = exact_log2(mainLoop_stepping);
6688 
6689   // Don't test for len <= 0 here. This pathological case should not occur anyway.
6690   // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
6691   // The situation itself is detected and handled correctly by the conditional branches
6692   // following aghi(len, -stepping) and aghi(len, +stepping).
6693 
6694   if (invertCRC) {
6695     not_(crc, noreg, false);           // 1s complement of crc
6696   }
6697 
6698 #if 0
6699   {
6700     // Pre-mainLoop alignment did not show any positive effect on performance.
6701     // We leave the code in for reference. Maybe the vector instructions in z13 depend on alignment.
6702 
6703     z_cghi(len, mainLoop_stepping);    // Alignment is useless for short data streams.
6704     z_brnh(L_tail);
6705 
6706     // Align buf to word (4-byte) boundary.
6707     z_lcr(ctr, buf);
6708     rotate_then_insert(ctr, ctr, 62, 63, 0, true); // TODO: should set cc
6709     z_sgfr(len, ctr);                  // Remaining len after alignment.
6710 
6711     update_byteLoop_crc32(crc, buf, ctr, table, data);
6712   }
6713 #endif
6714 
6715   // Check for short (<mainLoop_stepping bytes) buffer.
6716   z_srag(ctr, len, log_stepping);
6717   z_brnh(L_tail);
6718 
6719   z_lrvr(crc, crc);          // Revert byte order because we are dealing with big-endian data.
6720   rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop
6721 
6722   BIND(L_mainLoop);
6723     update_1word_crc32(crc, buf, table, 0, 0, crc, t1, t2, t3);
6724     update_1word_crc32(crc, buf, table, 4, mainLoop_stepping, crc, t1, t2, t3);
6725     z_brct(ctr, L_mainLoop); // Iterate.
6726 
6727   z_lrvr(crc, crc);          // Revert byte order back to original.
6728 
6729   // Process last few (<8) bytes of buffer.
6730   BIND(L_tail);
6731   update_byteLoop_crc32(crc, buf, len, table, data);
6732 
6733   if (invertCRC) {
6734     not_(crc, noreg, false);           // 1s complement of crc
6735   }
6736 }
6737 
6738 /**
6739  * @param crc   register containing existing CRC (32-bit)
6740  * @param buf   register pointing to input byte buffer (byte*)
6741  * @param len   register containing number of bytes
6742  * @param table register pointing to CRC table
6743  *
6744  * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
6745  */
6746 void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
6747                                         Register t0,  Register t1,  Register t2,  Register t3,
6748                                         bool invertCRC) {
6749   assert_different_registers(crc, buf, len, table);
6750 
6751   Label L_mainLoop, L_tail;
6752   Register  data = t0;
6753   Register  ctr  = Z_R0;
6754   const int mainLoop_stepping = 4;
6755   const int log_stepping      = exact_log2(mainLoop_stepping);
6756 
6757   // Don't test for len <= 0 here. This pathological case should not occur anyway.
6758   // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
6759   // The situation itself is detected and handled correctly by the conditional branches
6760   // following aghi(len, -stepping) and aghi(len, +stepping).
6761 
6762   if (invertCRC) {
6763     not_(crc, noreg, false);           // 1s complement of crc
6764   }
6765 
6766   // Check for short (<4 bytes) buffer.
6767   z_srag(ctr, len, log_stepping);
6768   z_brnh(L_tail);
6769 
6770   z_lrvr(crc, crc);          // Revert byte order because we are dealing with big-endian data.
6771   rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop
6772 
6773   BIND(L_mainLoop);
6774     update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3);
6775     z_brct(ctr, L_mainLoop); // Iterate.
6776 
6777   z_lrvr(crc, crc);          // Revert byte order back to original.
6778 
6779   // Process last few (<8) bytes of buffer.
6780   BIND(L_tail);
6781   update_byteLoop_crc32(crc, buf, len, table, data);
6782 
6783   if (invertCRC) {
6784     not_(crc, noreg, false);           // 1s complement of crc
6785   }
6786 }
6787 
6788 /**
6789  * @param crc   register containing existing CRC (32-bit)
6790  * @param buf   register pointing to input byte buffer (byte*)
6791  * @param len   register containing number of bytes
6792  * @param table register pointing to CRC table
6793  */
6794 void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
6795                                         Register t0,  Register t1,  Register t2,  Register t3,
6796                                         bool invertCRC) {
6797   assert_different_registers(crc, buf, len, table);
6798   Register data = t0;
6799 
6800   if (invertCRC) {
6801     not_(crc, noreg, false);           // 1s complement of crc
6802   }
6803 
6804   update_byteLoop_crc32(crc, buf, len, table, data);
6805 
6806   if (invertCRC) {
6807     not_(crc, noreg, false);           // 1s complement of crc
6808   }
6809 }
6810 
6811 void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
6812                                              bool invertCRC) {
6813   assert_different_registers(crc, buf, len, table, tmp);
6814 
6815   if (invertCRC) {
6816     not_(crc, noreg, false);           // 1s complement of crc
6817   }
6818 
6819   z_llgc(tmp, Address(buf, (intptr_t)0));  // Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
6820   update_byte_crc32(crc, tmp, table);
6821 
6822   if (invertCRC) {
6823     not_(crc, noreg, false);           // 1s complement of crc
6824   }
6825 }
6826 
6827 void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table,
6828                                                 bool invertCRC) {
6829   assert_different_registers(crc, val, table);
6830 
6831   if (invertCRC) {
6832     not_(crc, noreg, false);           // 1s complement of crc
6833   }
6834 
6835   update_byte_crc32(crc, val, table);
6836 
6837   if (invertCRC) {
6838     not_(crc, noreg, false);           // 1s complement of crc
6839   }
6840 }
6841 
6842 //
6843 // Code for BigInteger::multiplyToLen() intrinsic.
6844 //
6845 
6846 // dest_lo += src1 + src2
6847 // dest_hi += carry1 + carry2
6848 // Z_R7 is destroyed !
6849 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo,
6850                                      Register src1, Register src2) {
6851   clear_reg(Z_R7);
6852   z_algr(dest_lo, src1);
6853   z_alcgr(dest_hi, Z_R7);
6854   z_algr(dest_lo, src2);
6855   z_alcgr(dest_hi, Z_R7);
6856 }
6857 
6858 // Multiply 64 bit by 64 bit first loop.
6859 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart,
6860                                            Register x_xstart,
6861                                            Register y, Register y_idx,
6862                                            Register z,
6863                                            Register carry,
6864                                            Register product,
6865                                            Register idx, Register kdx) {
6866   // jlong carry, x[], y[], z[];
6867   // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) {
6868   //   huge_128 product = y[idx] * x[xstart] + carry;
6869   //   z[kdx] = (jlong)product;
6870   //   carry  = (jlong)(product >>> 64);
6871   // }
6872   // z[xstart] = carry;
6873 
6874   Label L_first_loop, L_first_loop_exit;
6875   Label L_one_x, L_one_y, L_multiply;
6876 
6877   z_aghi(xstart, -1);
6878   z_brl(L_one_x);   // Special case: length of x is 1.
6879 
6880   // Load next two integers of x.
6881   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
6882   mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0));
6883 
6884 
6885   bind(L_first_loop);
6886 
6887   z_aghi(idx, -1);
6888   z_brl(L_first_loop_exit);
6889   z_aghi(idx, -1);
6890   z_brl(L_one_y);
6891 
6892   // Load next two integers of y.
6893   z_sllg(Z_R1_scratch, idx, LogBytesPerInt);
6894   mem2reg_opt(y_idx, Address(y, Z_R1_scratch, 0));
6895 
6896 
6897   bind(L_multiply);
6898 
6899   Register multiplicand = product->successor();
6900   Register product_low = multiplicand;
6901 
6902   lgr_if_needed(multiplicand, x_xstart);
6903   z_mlgr(product, y_idx);     // multiplicand * y_idx -> product::multiplicand
6904   clear_reg(Z_R7);
6905   z_algr(product_low, carry); // Add carry to result.
6906   z_alcgr(product, Z_R7);     // Add carry of the last addition.
6907   add2reg(kdx, -2);
6908 
6909   // Store result.
6910   z_sllg(Z_R7, kdx, LogBytesPerInt);
6911   reg2mem_opt(product_low, Address(z, Z_R7, 0));
6912   lgr_if_needed(carry, product);
6913   z_bru(L_first_loop);
6914 
6915 
6916   bind(L_one_y); // Load one 32 bit portion of y as (0,value).
6917 
6918   clear_reg(y_idx);
6919   mem2reg_opt(y_idx, Address(y, (intptr_t) 0), false);
6920   z_bru(L_multiply);
6921 
6922 
6923   bind(L_one_x); // Load one 32 bit portion of x as (0,value).
6924 
6925   clear_reg(x_xstart);
6926   mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false);
6927   z_bru(L_first_loop);
6928 
6929   bind(L_first_loop_exit);
6930 }
6931 
6932 // Multiply 64 bit by 64 bit and add 128 bit.
6933 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y,
6934                                             Register z,
6935                                             Register yz_idx, Register idx,
6936                                             Register carry, Register product,
6937                                             int offset) {
6938   // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
6939   // z[kdx] = (jlong)product;
6940 
6941   Register multiplicand = product->successor();
6942   Register product_low = multiplicand;
6943 
6944   z_sllg(Z_R7, idx, LogBytesPerInt);
6945   mem2reg_opt(yz_idx, Address(y, Z_R7, offset));
6946 
6947   lgr_if_needed(multiplicand, x_xstart);
6948   z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand
6949   mem2reg_opt(yz_idx, Address(z, Z_R7, offset));
6950 
6951   add2_with_carry(product, product_low, carry, yz_idx);
6952 
6953   z_sllg(Z_R7, idx, LogBytesPerInt);
6954   reg2mem_opt(product_low, Address(z, Z_R7, offset));
6955 
6956 }
6957 
6958 // Multiply 128 bit by 128 bit. Unrolled inner loop.
6959 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart,
6960                                              Register y, Register z,
6961                                              Register yz_idx, Register idx,
6962                                              Register jdx,
6963                                              Register carry, Register product,
6964                                              Register carry2) {
6965   // jlong carry, x[], y[], z[];
6966   // int kdx = ystart+1;
6967   // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
6968   //   huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
6969   //   z[kdx+idx+1] = (jlong)product;
6970   //   jlong carry2 = (jlong)(product >>> 64);
6971   //   product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
6972   //   z[kdx+idx] = (jlong)product;
6973   //   carry = (jlong)(product >>> 64);
6974   // }
6975   // idx += 2;
6976   // if (idx > 0) {
6977   //   product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
6978   //   z[kdx+idx] = (jlong)product;
6979   //   carry = (jlong)(product >>> 64);
6980   // }
6981 
6982   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
6983 
6984   // scale the index
6985   lgr_if_needed(jdx, idx);
6986   and_imm(jdx, 0xfffffffffffffffcL);
6987   rshift(jdx, 2);
6988 
6989 
6990   bind(L_third_loop);
6991 
6992   z_aghi(jdx, -1);
6993   z_brl(L_third_loop_exit);
6994   add2reg(idx, -4);
6995 
6996   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
6997   lgr_if_needed(carry2, product);
6998 
6999   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
7000   lgr_if_needed(carry, product);
7001   z_bru(L_third_loop);
7002 
7003 
7004   bind(L_third_loop_exit);  // Handle any left-over operand parts.
7005 
7006   and_imm(idx, 0x3);
7007   z_brz(L_post_third_loop_done);
7008 
7009   Label L_check_1;
7010 
7011   z_aghi(idx, -2);
7012   z_brl(L_check_1);
7013 
7014   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
7015   lgr_if_needed(carry, product);
7016 
7017 
7018   bind(L_check_1);
7019 
7020   add2reg(idx, 0x2);
7021   and_imm(idx, 0x1);
7022   z_aghi(idx, -1);
7023   z_brl(L_post_third_loop_done);
7024 
7025   Register   multiplicand = product->successor();
7026   Register   product_low = multiplicand;
7027 
7028   z_sllg(Z_R7, idx, LogBytesPerInt);
7029   clear_reg(yz_idx);
7030   mem2reg_opt(yz_idx, Address(y, Z_R7, 0), false);
7031   lgr_if_needed(multiplicand, x_xstart);
7032   z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand
7033   clear_reg(yz_idx);
7034   mem2reg_opt(yz_idx, Address(z, Z_R7, 0), false);
7035 
7036   add2_with_carry(product, product_low, yz_idx, carry);
7037 
7038   z_sllg(Z_R7, idx, LogBytesPerInt);
7039   reg2mem_opt(product_low, Address(z, Z_R7, 0), false);
7040   rshift(product_low, 32);
7041 
7042   lshift(product, 32);
7043   z_ogr(product_low, product);
7044   lgr_if_needed(carry, product_low);
7045 
7046   bind(L_post_third_loop_done);
7047 }
7048 
7049 void MacroAssembler::multiply_to_len(Register x, Register xlen,
7050                                      Register y, Register ylen,
7051                                      Register z,
7052                                      Register tmp1, Register tmp2,
7053                                      Register tmp3, Register tmp4,
7054                                      Register tmp5) {
7055   ShortBranchVerifier sbv(this);
7056 
7057   assert_different_registers(x, xlen, y, ylen, z,
7058                              tmp1, tmp2, tmp3, tmp4, tmp5, Z_R1_scratch, Z_R7);
7059   assert_different_registers(x, xlen, y, ylen, z,
7060                              tmp1, tmp2, tmp3, tmp4, tmp5, Z_R8);
7061 
7062   z_stmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);
7063 
7064   // In openJdk, we store the argument as 32-bit value to slot.
7065   Address zlen(Z_SP, _z_abi(remaining_cargs));  // Int in long on big endian.
7066 
7067   const Register idx = tmp1;
7068   const Register kdx = tmp2;
7069   const Register xstart = tmp3;
7070 
7071   const Register y_idx = tmp4;
7072   const Register carry = tmp5;
7073   const Register product  = Z_R0_scratch;
7074   const Register x_xstart = Z_R8;
7075 
7076   // First Loop.
7077   //
7078   //   final static long LONG_MASK = 0xffffffffL;
7079   //   int xstart = xlen - 1;
7080   //   int ystart = ylen - 1;
7081   //   long carry = 0;
7082   //   for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
7083   //     long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
7084   //     z[kdx] = (int)product;
7085   //     carry = product >>> 32;
7086   //   }
7087   //   z[xstart] = (int)carry;
7088   //
7089 
7090   lgr_if_needed(idx, ylen);  // idx = ylen
7091   z_llgf(kdx, zlen);         // C2 does not respect int to long conversion for stub calls, thus load zero-extended.
7092   clear_reg(carry);          // carry = 0
7093 
7094   Label L_done;
7095 
7096   lgr_if_needed(xstart, xlen);
7097   z_aghi(xstart, -1);
7098   z_brl(L_done);
7099 
7100   multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
7101 
7102   NearLabel L_second_loop;
7103   compare64_and_branch(kdx, RegisterOrConstant((intptr_t) 0), bcondEqual, L_second_loop);
7104 
7105   NearLabel L_carry;
7106   z_aghi(kdx, -1);
7107   z_brz(L_carry);
7108 
7109   // Store lower 32 bits of carry.
7110   z_sllg(Z_R1_scratch, kdx, LogBytesPerInt);
7111   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
7112   rshift(carry, 32);
7113   z_aghi(kdx, -1);
7114 
7115 
7116   bind(L_carry);
7117 
7118   // Store upper 32 bits of carry.
7119   z_sllg(Z_R1_scratch, kdx, LogBytesPerInt);
7120   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
7121 
7122   // Second and third (nested) loops.
7123   //
7124   // for (int i = xstart-1; i >= 0; i--) { // Second loop
7125   //   carry = 0;
7126   //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
7127   //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
7128   //                    (z[k] & LONG_MASK) + carry;
7129   //     z[k] = (int)product;
7130   //     carry = product >>> 32;
7131   //   }
7132   //   z[i] = (int)carry;
7133   // }
7134   //
7135   // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
7136 
7137   const Register jdx = tmp1;
7138 
7139   bind(L_second_loop);
7140 
7141   clear_reg(carry);           // carry = 0;
7142   lgr_if_needed(jdx, ylen);   // j = ystart+1
7143 
7144   z_aghi(xstart, -1);         // i = xstart-1;
7145   z_brl(L_done);
7146 
7147   // Use free slots in the current stackframe instead of push/pop.
7148   Address zsave(Z_SP, _z_abi(carg_1));
7149   reg2mem_opt(z, zsave);
7150 
7151 
7152   Label L_last_x;
7153 
7154   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
7155   load_address(z, Address(z, Z_R1_scratch, 4)); // z = z + k - j
7156   z_aghi(xstart, -1);                           // i = xstart-1;
7157   z_brl(L_last_x);
7158 
7159   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
7160   mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0));
7161 
7162 
7163   Label L_third_loop_prologue;
7164 
7165   bind(L_third_loop_prologue);
7166 
7167   Address xsave(Z_SP, _z_abi(carg_2));
7168   Address xlensave(Z_SP, _z_abi(carg_3));
7169   Address ylensave(Z_SP, _z_abi(carg_4));
7170 
7171   reg2mem_opt(x, xsave);
7172   reg2mem_opt(xstart, xlensave);
7173   reg2mem_opt(ylen, ylensave);
7174 
7175 
7176   multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
7177 
7178   mem2reg_opt(z, zsave);
7179   mem2reg_opt(x, xsave);
7180   mem2reg_opt(xlen, xlensave);   // This is the decrement of the loop counter!
7181   mem2reg_opt(ylen, ylensave);
7182 
7183   add2reg(tmp3, 1, xlen);
7184   z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt);
7185   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
7186   z_aghi(tmp3, -1);
7187   z_brl(L_done);
7188 
7189   rshift(carry, 32);
7190   z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt);
7191   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
7192   z_bru(L_second_loop);
7193 
7194   // Next infrequent code is moved outside loops.
7195   bind(L_last_x);
7196 
7197   clear_reg(x_xstart);
7198   mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false);
7199   z_bru(L_third_loop_prologue);
7200 
7201   bind(L_done);
7202 
7203   z_lmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);
7204 }
7205 
7206 #ifndef PRODUCT
7207 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false).
7208 void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) {
7209   Label ok;
7210   if (check_equal) {
7211     z_bre(ok);
7212   } else {
7213     z_brne(ok);
7214   }
7215   stop(msg, id);
7216   bind(ok);
7217 }
7218 
7219 // Assert if CC indicates "low".
7220 void MacroAssembler::asm_assert_low(const char *msg, int id) {
7221   Label ok;
7222   z_brnl(ok);
7223   stop(msg, id);
7224   bind(ok);
7225 }
7226 
7227 // Assert if CC indicates "high".
7228 void MacroAssembler::asm_assert_high(const char *msg, int id) {
7229   Label ok;
7230   z_brnh(ok);
7231   stop(msg, id);
7232   bind(ok);
7233 }
7234 
7235 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false)
7236 // generate non-relocatable code.
7237 void MacroAssembler::asm_assert_static(bool check_equal, const char *msg, int id) {
7238   Label ok;
7239   if (check_equal) { z_bre(ok); }
7240   else             { z_brne(ok); }
7241   stop_static(msg, id);
7242   bind(ok);
7243 }
7244 
7245 void MacroAssembler::asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset,
7246                                           Register mem_base, const char* msg, int id) {
7247   switch (size) {
7248     case 4:
7249       load_and_test_int(Z_R0, Address(mem_base, mem_offset));
7250       break;
7251     case 8:
7252       load_and_test_long(Z_R0,  Address(mem_base, mem_offset));
7253       break;
7254     default:
7255       ShouldNotReachHere();
7256   }
7257   if (allow_relocation) { asm_assert(check_equal, msg, id); }
7258   else                  { asm_assert_static(check_equal, msg, id); }
7259 }
7260 
7261 // Check the condition
7262 //   expected_size == FP - SP
7263 // after transformation:
7264 //   expected_size - FP + SP == 0
7265 // Destroys Register expected_size if no tmp register is passed.
7266 void MacroAssembler::asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) {
7267   if (tmp == noreg) {
7268     tmp = expected_size;
7269   } else {
7270     if (tmp != expected_size) {
7271       z_lgr(tmp, expected_size);
7272     }
7273     z_algr(tmp, Z_SP);
7274     z_slg(tmp, 0, Z_R0, Z_SP);
7275     asm_assert_eq(msg, id);
7276   }
7277 }
7278 #endif // !PRODUCT
7279 
7280 void MacroAssembler::verify_thread() {
7281   if (VerifyThread) {
7282     unimplemented("", 117);
7283   }
7284 }
7285 
7286 // Plausibility check for oops.
7287 void MacroAssembler::verify_oop(Register oop, const char* msg) {
7288   if (!VerifyOops) return;
7289 
7290   BLOCK_COMMENT("verify_oop {");
7291   Register tmp = Z_R0;
7292   unsigned int nbytes_save = 5*BytesPerWord;
7293   address entry = StubRoutines::verify_oop_subroutine_entry_address();
7294 
7295   save_return_pc();
7296   push_frame_abi160(nbytes_save);
7297   z_stmg(Z_R1, Z_R5, frame::z_abi_160_size, Z_SP);
7298 
7299   z_lgr(Z_ARG2, oop);
7300   load_const(Z_ARG1, (address) msg);
7301   load_const(Z_R1, entry);
7302   z_lg(Z_R1, 0, Z_R1);
7303   call_c(Z_R1);
7304 
7305   z_lmg(Z_R1, Z_R5, frame::z_abi_160_size, Z_SP);
7306   pop_frame();
7307   restore_return_pc();
7308 
7309   BLOCK_COMMENT("} verify_oop ");
7310 }
7311 
7312 const char* MacroAssembler::stop_types[] = {
7313   "stop",
7314   "untested",
7315   "unimplemented",
7316   "shouldnotreachhere"
7317 };
7318 
7319 static void stop_on_request(const char* tp, const char* msg) {
7320   tty->print("Z assembly code requires stop: (%s) %s\n", tp, msg);
7321   guarantee(false, "Z assembly code requires stop: %s", msg);
7322 }
7323 
7324 void MacroAssembler::stop(int type, const char* msg, int id) {
7325   BLOCK_COMMENT(err_msg("stop: %s {", msg));
7326 
7327   // Setup arguments.
7328   load_const(Z_ARG1, (void*) stop_types[type%stop_end]);
7329   load_const(Z_ARG2, (void*) msg);
7330   get_PC(Z_R14);     // Following code pushes a frame without entering a new function. Use current pc as return address.
7331   save_return_pc();  // Saves return pc Z_R14.
7332   push_frame_abi160(0);
7333   call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
7334   // The plain disassembler does not recognize illtrap. It instead displays
7335   // a 32-bit value. Issueing two illtraps assures the disassembler finds
7336   // the proper beginning of the next instruction.
7337   z_illtrap(); // Illegal instruction.
7338   z_illtrap(); // Illegal instruction.
7339 
7340   BLOCK_COMMENT(" } stop");
7341 }
7342 
7343 // Special version of stop() for code size reduction.
7344 // Reuses the previously generated call sequence, if any.
7345 // Generates the call sequence on its own, if necessary.
7346 // Note: This code will work only in non-relocatable code!
7347 //       The relative address of the data elements (arg1, arg2) must not change.
7348 //       The reentry point must not move relative to it's users. This prerequisite
7349 //       should be given for "hand-written" code, if all chain calls are in the same code blob.
7350 //       Generated code must not undergo any transformation, e.g. ShortenBranches, to be safe.
7351 address MacroAssembler::stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation) {
7352   BLOCK_COMMENT(err_msg("stop_chain(%s,%s): %s {", reentry==NULL?"init":"cont", allow_relocation?"reloc ":"static", msg));
7353 
7354   // Setup arguments.
7355   if (allow_relocation) {
7356     // Relocatable version (for comparison purposes). Remove after some time.
7357     load_const(Z_ARG1, (void*) stop_types[type%stop_end]);
7358     load_const(Z_ARG2, (void*) msg);
7359   } else {
7360     load_absolute_address(Z_ARG1, (address)stop_types[type%stop_end]);
7361     load_absolute_address(Z_ARG2, (address)msg);
7362   }
7363   if ((reentry != NULL) && RelAddr::is_in_range_of_RelAddr16(reentry, pc())) {
7364     BLOCK_COMMENT("branch to reentry point:");
7365     z_brc(bcondAlways, reentry);
7366   } else {
7367     BLOCK_COMMENT("reentry point:");
7368     reentry = pc();      // Re-entry point for subsequent stop calls.
7369     save_return_pc();    // Saves return pc Z_R14.
7370     push_frame_abi160(0);
7371     if (allow_relocation) {
7372       reentry = NULL;    // Prevent reentry if code relocation is allowed.
7373       call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
7374     } else {
7375       call_VM_leaf_static(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
7376     }
7377     z_illtrap(); // Illegal instruction as emergency stop, should the above call return.
7378   }
7379   BLOCK_COMMENT(" } stop_chain");
7380 
7381   return reentry;
7382 }
7383 
7384 // Special version of stop() for code size reduction.
7385 // Assumes constant relative addresses for data and runtime call.
7386 void MacroAssembler::stop_static(int type, const char* msg, int id) {
7387   stop_chain(NULL, type, msg, id, false);
7388 }
7389 
7390 void MacroAssembler::stop_subroutine() {
7391   unimplemented("stop_subroutine", 710);
7392 }
7393 
7394 // Prints msg to stdout from within generated code..
7395 void MacroAssembler::warn(const char* msg) {
7396   RegisterSaver::save_live_registers(this, RegisterSaver::all_registers, Z_R14);
7397   load_absolute_address(Z_R1, (address) warning);
7398   load_absolute_address(Z_ARG1, (address) msg);
7399   (void) call(Z_R1);
7400   RegisterSaver::restore_live_registers(this, RegisterSaver::all_registers);
7401 }
7402 
7403 #ifndef PRODUCT
7404 
7405 // Write pattern 0x0101010101010101 in region [low-before, high+after].
7406 void MacroAssembler::zap_from_to(Register low, Register high, Register val, Register addr, int before, int after) {
7407   if (!ZapEmptyStackFields) return;
7408   BLOCK_COMMENT("zap memory region {");
7409   load_const_optimized(val, 0x0101010101010101);
7410   int size = before + after;
7411   if (low == high && size < 5 && size > 0) {
7412     int offset = -before*BytesPerWord;
7413     for (int i = 0; i < size; ++i) {
7414       z_stg(val, Address(low, offset));
7415       offset +=(1*BytesPerWord);
7416     }
7417   } else {
7418     add2reg(addr, -before*BytesPerWord, low);
7419     if (after) {
7420 #ifdef ASSERT
7421       jlong check = after * BytesPerWord;
7422       assert(Immediate::is_simm32(check) && Immediate::is_simm32(-check), "value not encodable !");
7423 #endif
7424       add2reg(high, after * BytesPerWord);
7425     }
7426     NearLabel loop;
7427     bind(loop);
7428     z_stg(val, Address(addr));
7429     add2reg(addr, 8);
7430     compare64_and_branch(addr, high, bcondNotHigh, loop);
7431     if (after) {
7432       add2reg(high, -after * BytesPerWord);
7433     }
7434   }
7435   BLOCK_COMMENT("} zap memory region");
7436 }
7437 #endif // !PRODUCT
7438 
7439 SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value, Register _rscratch) {
7440   _masm = masm;
7441   _masm->load_absolute_address(_rscratch, (address)flag_addr);
7442   _masm->load_and_test_int(_rscratch, Address(_rscratch));
7443   if (value) {
7444     _masm->z_brne(_label); // Skip if true, i.e. != 0.
7445   } else {
7446     _masm->z_bre(_label);  // Skip if false, i.e. == 0.
7447   }
7448 }
7449 
7450 SkipIfEqual::~SkipIfEqual() {
7451   _masm->bind(_label);
7452 }