1 /*
   2  * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/codeBuffer.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "compiler/disassembler.hpp"
  30 #include "gc/shared/cardTable.hpp"
  31 #include "gc/shared/collectedHeap.inline.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "gc/shared/cardTableModRefBS.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "memory/universe.hpp"
  36 #include "oops/klass.inline.hpp"
  37 #include "opto/compile.hpp"
  38 #include "opto/intrinsicnode.hpp"
  39 #include "opto/matcher.hpp"
  40 #include "prims/methodHandles.hpp"
  41 #include "registerSaver_s390.hpp"
  42 #include "runtime/biasedLocking.hpp"
  43 #include "runtime/icache.hpp"
  44 #include "runtime/interfaceSupport.hpp"
  45 #include "runtime/objectMonitor.hpp"
  46 #include "runtime/os.hpp"
  47 #include "runtime/safepoint.hpp"
  48 #include "runtime/safepointMechanism.hpp"
  49 #include "runtime/sharedRuntime.hpp"
  50 #include "runtime/stubRoutines.hpp"
  51 #include "utilities/events.hpp"
  52 #include "utilities/macros.hpp"
  53 #if INCLUDE_ALL_GCS
  54 #include "gc/g1/g1BarrierSet.hpp"
  55 #include "gc/g1/g1CardTable.hpp"
  56 #include "gc/g1/g1CollectedHeap.inline.hpp"
  57 #include "gc/g1/heapRegion.hpp"
  58 #endif
  59 
  60 #include <ucontext.h>
  61 
  62 #define BLOCK_COMMENT(str) block_comment(str)
  63 #define BIND(label)        bind(label); BLOCK_COMMENT(#label ":")
  64 
  65 // Move 32-bit register if destination and source are different.
  66 void MacroAssembler::lr_if_needed(Register rd, Register rs) {
  67   if (rs != rd) { z_lr(rd, rs); }
  68 }
  69 
  70 // Move register if destination and source are different.
  71 void MacroAssembler::lgr_if_needed(Register rd, Register rs) {
  72   if (rs != rd) { z_lgr(rd, rs); }
  73 }
  74 
  75 // Zero-extend 32-bit register into 64-bit register if destination and source are different.
  76 void MacroAssembler::llgfr_if_needed(Register rd, Register rs) {
  77   if (rs != rd) { z_llgfr(rd, rs); }
  78 }
  79 
  80 // Move float register if destination and source are different.
  81 void MacroAssembler::ldr_if_needed(FloatRegister rd, FloatRegister rs) {
  82   if (rs != rd) { z_ldr(rd, rs); }
  83 }
  84 
  85 // Move integer register if destination and source are different.
  86 // It is assumed that shorter-than-int types are already
  87 // appropriately sign-extended.
  88 void MacroAssembler::move_reg_if_needed(Register dst, BasicType dst_type, Register src,
  89                                         BasicType src_type) {
  90   assert((dst_type != T_FLOAT) && (dst_type != T_DOUBLE), "use move_freg for float types");
  91   assert((src_type != T_FLOAT) && (src_type != T_DOUBLE), "use move_freg for float types");
  92 
  93   if (dst_type == src_type) {
  94     lgr_if_needed(dst, src); // Just move all 64 bits.
  95     return;
  96   }
  97 
  98   switch (dst_type) {
  99     // Do not support these types for now.
 100     //  case T_BOOLEAN:
 101     case T_BYTE:  // signed byte
 102       switch (src_type) {
 103         case T_INT:
 104           z_lgbr(dst, src);
 105           break;
 106         default:
 107           ShouldNotReachHere();
 108       }
 109       return;
 110 
 111     case T_CHAR:
 112     case T_SHORT:
 113       switch (src_type) {
 114         case T_INT:
 115           if (dst_type == T_CHAR) {
 116             z_llghr(dst, src);
 117           } else {
 118             z_lghr(dst, src);
 119           }
 120           break;
 121         default:
 122           ShouldNotReachHere();
 123       }
 124       return;
 125 
 126     case T_INT:
 127       switch (src_type) {
 128         case T_BOOLEAN:
 129         case T_BYTE:
 130         case T_CHAR:
 131         case T_SHORT:
 132         case T_INT:
 133         case T_LONG:
 134         case T_OBJECT:
 135         case T_ARRAY:
 136         case T_VOID:
 137         case T_ADDRESS:
 138           lr_if_needed(dst, src);
 139           // llgfr_if_needed(dst, src);  // zero-extend (in case we need to find a bug).
 140           return;
 141 
 142         default:
 143           assert(false, "non-integer src type");
 144           return;
 145       }
 146     case T_LONG:
 147       switch (src_type) {
 148         case T_BOOLEAN:
 149         case T_BYTE:
 150         case T_CHAR:
 151         case T_SHORT:
 152         case T_INT:
 153           z_lgfr(dst, src); // sign extension
 154           return;
 155 
 156         case T_LONG:
 157         case T_OBJECT:
 158         case T_ARRAY:
 159         case T_VOID:
 160         case T_ADDRESS:
 161           lgr_if_needed(dst, src);
 162           return;
 163 
 164         default:
 165           assert(false, "non-integer src type");
 166           return;
 167       }
 168       return;
 169     case T_OBJECT:
 170     case T_ARRAY:
 171     case T_VOID:
 172     case T_ADDRESS:
 173       switch (src_type) {
 174         // These types don't make sense to be converted to pointers:
 175         //      case T_BOOLEAN:
 176         //      case T_BYTE:
 177         //      case T_CHAR:
 178         //      case T_SHORT:
 179 
 180         case T_INT:
 181           z_llgfr(dst, src); // zero extension
 182           return;
 183 
 184         case T_LONG:
 185         case T_OBJECT:
 186         case T_ARRAY:
 187         case T_VOID:
 188         case T_ADDRESS:
 189           lgr_if_needed(dst, src);
 190           return;
 191 
 192         default:
 193           assert(false, "non-integer src type");
 194           return;
 195       }
 196       return;
 197     default:
 198       assert(false, "non-integer dst type");
 199       return;
 200   }
 201 }
 202 
 203 // Move float register if destination and source are different.
 204 void MacroAssembler::move_freg_if_needed(FloatRegister dst, BasicType dst_type,
 205                                          FloatRegister src, BasicType src_type) {
 206   assert((dst_type == T_FLOAT) || (dst_type == T_DOUBLE), "use move_reg for int types");
 207   assert((src_type == T_FLOAT) || (src_type == T_DOUBLE), "use move_reg for int types");
 208   if (dst_type == src_type) {
 209     ldr_if_needed(dst, src); // Just move all 64 bits.
 210   } else {
 211     switch (dst_type) {
 212       case T_FLOAT:
 213         assert(src_type == T_DOUBLE, "invalid float type combination");
 214         z_ledbr(dst, src);
 215         return;
 216       case T_DOUBLE:
 217         assert(src_type == T_FLOAT, "invalid float type combination");
 218         z_ldebr(dst, src);
 219         return;
 220       default:
 221         assert(false, "non-float dst type");
 222         return;
 223     }
 224   }
 225 }
 226 
 227 // Optimized emitter for reg to mem operations.
 228 // Uses modern instructions if running on modern hardware, classic instructions
 229 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 230 // Data register (reg) cannot be used as work register.
 231 //
 232 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default).
 233 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs!
 234 void MacroAssembler::freg2mem_opt(FloatRegister reg,
 235                                   int64_t       disp,
 236                                   Register      index,
 237                                   Register      base,
 238                                   void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
 239                                   void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
 240                                   Register      scratch) {
 241   index = (index == noreg) ? Z_R0 : index;
 242   if (Displacement::is_shortDisp(disp)) {
 243     (this->*classic)(reg, disp, index, base);
 244   } else {
 245     if (Displacement::is_validDisp(disp)) {
 246       (this->*modern)(reg, disp, index, base);
 247     } else {
 248       if (scratch != Z_R0 && scratch != Z_R1) {
 249         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 250       } else {
 251         if (scratch != Z_R0) {   // scratch == Z_R1
 252           if ((scratch == index) || (index == base)) {
 253             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 254           } else {
 255             add2reg(scratch, disp, base);
 256             (this->*classic)(reg, 0, index, scratch);
 257             if (base == scratch) {
 258               add2reg(base, -disp);  // Restore base.
 259             }
 260           }
 261         } else {   // scratch == Z_R0
 262           z_lgr(scratch, base);
 263           add2reg(base, disp);
 264           (this->*classic)(reg, 0, index, base);
 265           z_lgr(base, scratch);      // Restore base.
 266         }
 267       }
 268     }
 269   }
 270 }
 271 
 272 void MacroAssembler::freg2mem_opt(FloatRegister reg, const Address &a, bool is_double) {
 273   if (is_double) {
 274     freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stdy), CLASSIC_FFUN(z_std));
 275   } else {
 276     freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stey), CLASSIC_FFUN(z_ste));
 277   }
 278 }
 279 
 280 // Optimized emitter for mem to reg operations.
 281 // Uses modern instructions if running on modern hardware, classic instructions
 282 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 283 // data register (reg) cannot be used as work register.
 284 //
 285 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default).
 286 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs!
 287 void MacroAssembler::mem2freg_opt(FloatRegister reg,
 288                                   int64_t       disp,
 289                                   Register      index,
 290                                   Register      base,
 291                                   void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
 292                                   void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
 293                                   Register      scratch) {
 294   index = (index == noreg) ? Z_R0 : index;
 295   if (Displacement::is_shortDisp(disp)) {
 296     (this->*classic)(reg, disp, index, base);
 297   } else {
 298     if (Displacement::is_validDisp(disp)) {
 299       (this->*modern)(reg, disp, index, base);
 300     } else {
 301       if (scratch != Z_R0 && scratch != Z_R1) {
 302         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 303       } else {
 304         if (scratch != Z_R0) {   // scratch == Z_R1
 305           if ((scratch == index) || (index == base)) {
 306             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 307           } else {
 308             add2reg(scratch, disp, base);
 309             (this->*classic)(reg, 0, index, scratch);
 310             if (base == scratch) {
 311               add2reg(base, -disp);  // Restore base.
 312             }
 313           }
 314         } else {   // scratch == Z_R0
 315           z_lgr(scratch, base);
 316           add2reg(base, disp);
 317           (this->*classic)(reg, 0, index, base);
 318           z_lgr(base, scratch);      // Restore base.
 319         }
 320       }
 321     }
 322   }
 323 }
 324 
 325 void MacroAssembler::mem2freg_opt(FloatRegister reg, const Address &a, bool is_double) {
 326   if (is_double) {
 327     mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ldy), CLASSIC_FFUN(z_ld));
 328   } else {
 329     mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ley), CLASSIC_FFUN(z_le));
 330   }
 331 }
 332 
 333 // Optimized emitter for reg to mem operations.
 334 // Uses modern instructions if running on modern hardware, classic instructions
 335 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 336 // Data register (reg) cannot be used as work register.
 337 //
 338 // Don't rely on register locking, instead pass a scratch register
 339 // (Z_R0 by default)
 340 // CAUTION! passing registers >= Z_R2 may produce bad results on old CPUs!
 341 void MacroAssembler::reg2mem_opt(Register reg,
 342                                  int64_t  disp,
 343                                  Register index,
 344                                  Register base,
 345                                  void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
 346                                  void (MacroAssembler::*classic)(Register, int64_t, Register, Register),
 347                                  Register scratch) {
 348   index = (index == noreg) ? Z_R0 : index;
 349   if (Displacement::is_shortDisp(disp)) {
 350     (this->*classic)(reg, disp, index, base);
 351   } else {
 352     if (Displacement::is_validDisp(disp)) {
 353       (this->*modern)(reg, disp, index, base);
 354     } else {
 355       if (scratch != Z_R0 && scratch != Z_R1) {
 356         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 357       } else {
 358         if (scratch != Z_R0) {   // scratch == Z_R1
 359           if ((scratch == index) || (index == base)) {
 360             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 361           } else {
 362             add2reg(scratch, disp, base);
 363             (this->*classic)(reg, 0, index, scratch);
 364             if (base == scratch) {
 365               add2reg(base, -disp);  // Restore base.
 366             }
 367           }
 368         } else {   // scratch == Z_R0
 369           if ((scratch == reg) || (scratch == base) || (reg == base)) {
 370             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 371           } else {
 372             z_lgr(scratch, base);
 373             add2reg(base, disp);
 374             (this->*classic)(reg, 0, index, base);
 375             z_lgr(base, scratch);    // Restore base.
 376           }
 377         }
 378       }
 379     }
 380   }
 381 }
 382 
 383 int MacroAssembler::reg2mem_opt(Register reg, const Address &a, bool is_double) {
 384   int store_offset = offset();
 385   if (is_double) {
 386     reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_stg), CLASSIC_IFUN(z_stg));
 387   } else {
 388     reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_sty), CLASSIC_IFUN(z_st));
 389   }
 390   return store_offset;
 391 }
 392 
 393 // Optimized emitter for mem to reg operations.
 394 // Uses modern instructions if running on modern hardware, classic instructions
 395 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 396 // Data register (reg) will be used as work register where possible.
 397 void MacroAssembler::mem2reg_opt(Register reg,
 398                                  int64_t  disp,
 399                                  Register index,
 400                                  Register base,
 401                                  void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
 402                                  void (MacroAssembler::*classic)(Register, int64_t, Register, Register)) {
 403   index = (index == noreg) ? Z_R0 : index;
 404   if (Displacement::is_shortDisp(disp)) {
 405     (this->*classic)(reg, disp, index, base);
 406   } else {
 407     if (Displacement::is_validDisp(disp)) {
 408       (this->*modern)(reg, disp, index, base);
 409     } else {
 410       if ((reg == index) && (reg == base)) {
 411         z_sllg(reg, reg, 1);
 412         add2reg(reg, disp);
 413         (this->*classic)(reg, 0, noreg, reg);
 414       } else if ((reg == index) && (reg != Z_R0)) {
 415         add2reg(reg, disp);
 416         (this->*classic)(reg, 0, reg, base);
 417       } else if (reg == base) {
 418         add2reg(reg, disp);
 419         (this->*classic)(reg, 0, index, reg);
 420       } else if (reg != Z_R0) {
 421         add2reg(reg, disp, base);
 422         (this->*classic)(reg, 0, index, reg);
 423       } else { // reg == Z_R0 && reg != base here
 424         add2reg(base, disp);
 425         (this->*classic)(reg, 0, index, base);
 426         add2reg(base, -disp);
 427       }
 428     }
 429   }
 430 }
 431 
 432 void MacroAssembler::mem2reg_opt(Register reg, const Address &a, bool is_double) {
 433   if (is_double) {
 434     z_lg(reg, a);
 435   } else {
 436     mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_ly), CLASSIC_IFUN(z_l));
 437   }
 438 }
 439 
 440 void MacroAssembler::mem2reg_signed_opt(Register reg, const Address &a) {
 441   mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_lgf), CLASSIC_IFUN(z_lgf));
 442 }
 443 
 444 void MacroAssembler::and_imm(Register r, long mask,
 445                              Register tmp /* = Z_R0 */,
 446                              bool wide    /* = false */) {
 447   assert(wide || Immediate::is_simm32(mask), "mask value too large");
 448 
 449   if (!wide) {
 450     z_nilf(r, mask);
 451     return;
 452   }
 453 
 454   assert(r != tmp, " need a different temporary register !");
 455   load_const_optimized(tmp, mask);
 456   z_ngr(r, tmp);
 457 }
 458 
 459 // Calculate the 1's complement.
 460 // Note: The condition code is neither preserved nor correctly set by this code!!!
 461 // Note: (wide == false) does not protect the high order half of the target register
 462 //       from alteration. It only serves as optimization hint for 32-bit results.
 463 void MacroAssembler::not_(Register r1, Register r2, bool wide) {
 464 
 465   if ((r2 == noreg) || (r2 == r1)) { // Calc 1's complement in place.
 466     z_xilf(r1, -1);
 467     if (wide) {
 468       z_xihf(r1, -1);
 469     }
 470   } else { // Distinct src and dst registers.
 471     if (VM_Version::has_DistinctOpnds()) {
 472       load_const_optimized(r1, -1);
 473       z_xgrk(r1, r2, r1);
 474     } else {
 475       if (wide) {
 476         z_lgr(r1, r2);
 477         z_xilf(r1, -1);
 478         z_xihf(r1, -1);
 479       } else {
 480         z_lr(r1, r2);
 481         z_xilf(r1, -1);
 482       }
 483     }
 484   }
 485 }
 486 
 487 unsigned long MacroAssembler::create_mask(int lBitPos, int rBitPos) {
 488   assert(lBitPos >=  0,      "zero is  leftmost bit position");
 489   assert(rBitPos <= 63,      "63   is rightmost bit position");
 490   assert(lBitPos <= rBitPos, "inverted selection interval");
 491   return (lBitPos == 0 ? (unsigned long)(-1L) : ((1UL<<(63-lBitPos+1))-1)) & (~((1UL<<(63-rBitPos))-1));
 492 }
 493 
 494 // Helper function for the "Rotate_then_<logicalOP>" emitters.
 495 // Rotate src, then mask register contents such that only bits in range survive.
 496 // For oneBits == false, all bits not in range are set to 0. Useful for deleting all bits outside range.
 497 // For oneBits == true,  all bits not in range are set to 1. Useful for preserving all bits outside range.
 498 // The caller must ensure that the selected range only contains bits with defined value.
 499 void MacroAssembler::rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos,
 500                                       int nRotate, bool src32bit, bool dst32bit, bool oneBits) {
 501   assert(!(dst32bit && lBitPos < 32), "selection interval out of range for int destination");
 502   bool sll4rll = (nRotate >= 0) && (nRotate <= (63-rBitPos)); // Substitute SLL(G) for RLL(G).
 503   bool srl4rll = (nRotate <  0) && (-nRotate <= lBitPos);     // Substitute SRL(G) for RLL(G).
 504   //  Pre-determine which parts of dst will be zero after shift/rotate.
 505   bool llZero  =  sll4rll && (nRotate >= 16);
 506   bool lhZero  = (sll4rll && (nRotate >= 32)) || (srl4rll && (nRotate <= -48));
 507   bool lfZero  = llZero && lhZero;
 508   bool hlZero  = (sll4rll && (nRotate >= 48)) || (srl4rll && (nRotate <= -32));
 509   bool hhZero  =                                 (srl4rll && (nRotate <= -16));
 510   bool hfZero  = hlZero && hhZero;
 511 
 512   // rotate then mask src operand.
 513   // if oneBits == true,  all bits outside selected range are 1s.
 514   // if oneBits == false, all bits outside selected range are 0s.
 515   if (src32bit) {   // There might be garbage in the upper 32 bits which will get masked away.
 516     if (dst32bit) {
 517       z_rll(dst, src, nRotate);   // Copy and rotate, upper half of reg remains undisturbed.
 518     } else {
 519       if      (sll4rll) { z_sllg(dst, src,  nRotate); }
 520       else if (srl4rll) { z_srlg(dst, src, -nRotate); }
 521       else              { z_rllg(dst, src,  nRotate); }
 522     }
 523   } else {
 524     if      (sll4rll) { z_sllg(dst, src,  nRotate); }
 525     else if (srl4rll) { z_srlg(dst, src, -nRotate); }
 526     else              { z_rllg(dst, src,  nRotate); }
 527   }
 528 
 529   unsigned long  range_mask    = create_mask(lBitPos, rBitPos);
 530   unsigned int   range_mask_h  = (unsigned int)(range_mask >> 32);
 531   unsigned int   range_mask_l  = (unsigned int)range_mask;
 532   unsigned short range_mask_hh = (unsigned short)(range_mask >> 48);
 533   unsigned short range_mask_hl = (unsigned short)(range_mask >> 32);
 534   unsigned short range_mask_lh = (unsigned short)(range_mask >> 16);
 535   unsigned short range_mask_ll = (unsigned short)range_mask;
 536   // Works for z9 and newer H/W.
 537   if (oneBits) {
 538     if ((~range_mask_l) != 0)                { z_oilf(dst, ~range_mask_l); } // All bits outside range become 1s.
 539     if (((~range_mask_h) != 0) && !dst32bit) { z_oihf(dst, ~range_mask_h); }
 540   } else {
 541     // All bits outside range become 0s
 542     if (((~range_mask_l) != 0) &&              !lfZero) {
 543       z_nilf(dst, range_mask_l);
 544     }
 545     if (((~range_mask_h) != 0) && !dst32bit && !hfZero) {
 546       z_nihf(dst, range_mask_h);
 547     }
 548   }
 549 }
 550 
 551 // Rotate src, then insert selected range from rotated src into dst.
 552 // Clear dst before, if requested.
 553 void MacroAssembler::rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos,
 554                                         int nRotate, bool clear_dst) {
 555   // This version does not depend on src being zero-extended int2long.
 556   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 557   z_risbg(dst, src, lBitPos, rBitPos, nRotate, clear_dst); // Rotate, then insert selected, clear the rest.
 558 }
 559 
 560 // Rotate src, then and selected range from rotated src into dst.
 561 // Set condition code only if so requested. Otherwise it is unpredictable.
 562 // See performance note in macroAssembler_s390.hpp for important information.
 563 void MacroAssembler::rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos,
 564                                      int nRotate, bool test_only) {
 565   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 566   // This version does not depend on src being zero-extended int2long.
 567   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 568   z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 569 }
 570 
 571 // Rotate src, then or selected range from rotated src into dst.
 572 // Set condition code only if so requested. Otherwise it is unpredictable.
 573 // See performance note in macroAssembler_s390.hpp for important information.
 574 void MacroAssembler::rotate_then_or(Register dst, Register src,  int  lBitPos,  int  rBitPos,
 575                                     int nRotate, bool test_only) {
 576   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 577   // This version does not depend on src being zero-extended int2long.
 578   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 579   z_rosbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 580 }
 581 
 582 // Rotate src, then xor selected range from rotated src into dst.
 583 // Set condition code only if so requested. Otherwise it is unpredictable.
 584 // See performance note in macroAssembler_s390.hpp for important information.
 585 void MacroAssembler::rotate_then_xor(Register dst, Register src,  int  lBitPos,  int  rBitPos,
 586                                      int nRotate, bool test_only) {
 587   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 588     // This version does not depend on src being zero-extended int2long.
 589   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 590   z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 591 }
 592 
 593 void MacroAssembler::add64(Register r1, RegisterOrConstant inc) {
 594   if (inc.is_register()) {
 595     z_agr(r1, inc.as_register());
 596   } else { // constant
 597     intptr_t imm = inc.as_constant();
 598     add2reg(r1, imm);
 599   }
 600 }
 601 // Helper function to multiply the 64bit contents of a register by a 16bit constant.
 602 // The optimization tries to avoid the mghi instruction, since it uses the FPU for
 603 // calculation and is thus rather slow.
 604 //
 605 // There is no handling for special cases, e.g. cval==0 or cval==1.
 606 //
 607 // Returns len of generated code block.
 608 unsigned int MacroAssembler::mul_reg64_const16(Register rval, Register work, int cval) {
 609   int block_start = offset();
 610 
 611   bool sign_flip = cval < 0;
 612   cval = sign_flip ? -cval : cval;
 613 
 614   BLOCK_COMMENT("Reg64*Con16 {");
 615 
 616   int bit1 = cval & -cval;
 617   if (bit1 == cval) {
 618     z_sllg(rval, rval, exact_log2(bit1));
 619     if (sign_flip) { z_lcgr(rval, rval); }
 620   } else {
 621     int bit2 = (cval-bit1) & -(cval-bit1);
 622     if ((bit1+bit2) == cval) {
 623       z_sllg(work, rval, exact_log2(bit1));
 624       z_sllg(rval, rval, exact_log2(bit2));
 625       z_agr(rval, work);
 626       if (sign_flip) { z_lcgr(rval, rval); }
 627     } else {
 628       if (sign_flip) { z_mghi(rval, -cval); }
 629       else           { z_mghi(rval,  cval); }
 630     }
 631   }
 632   BLOCK_COMMENT("} Reg64*Con16");
 633 
 634   int block_end = offset();
 635   return block_end - block_start;
 636 }
 637 
 638 // Generic operation r1 := r2 + imm.
 639 //
 640 // Should produce the best code for each supported CPU version.
 641 // r2 == noreg yields r1 := r1 + imm
 642 // imm == 0 emits either no instruction or r1 := r2 !
 643 // NOTES: 1) Don't use this function where fixed sized
 644 //           instruction sequences are required!!!
 645 //        2) Don't use this function if condition code
 646 //           setting is required!
 647 //        3) Despite being declared as int64_t, the parameter imm
 648 //           must be a simm_32 value (= signed 32-bit integer).
 649 void MacroAssembler::add2reg(Register r1, int64_t imm, Register r2) {
 650   assert(Immediate::is_simm32(imm), "probably an implicit conversion went wrong");
 651 
 652   if (r2 == noreg) { r2 = r1; }
 653 
 654   // Handle special case imm == 0.
 655   if (imm == 0) {
 656     lgr_if_needed(r1, r2);
 657     // Nothing else to do.
 658     return;
 659   }
 660 
 661   if (!PreferLAoverADD || (r2 == Z_R0)) {
 662     bool distinctOpnds = VM_Version::has_DistinctOpnds();
 663 
 664     // Can we encode imm in 16 bits signed?
 665     if (Immediate::is_simm16(imm)) {
 666       if (r1 == r2) {
 667         z_aghi(r1, imm);
 668         return;
 669       }
 670       if (distinctOpnds) {
 671         z_aghik(r1, r2, imm);
 672         return;
 673       }
 674       z_lgr(r1, r2);
 675       z_aghi(r1, imm);
 676       return;
 677     }
 678   } else {
 679     // Can we encode imm in 12 bits unsigned?
 680     if (Displacement::is_shortDisp(imm)) {
 681       z_la(r1, imm, r2);
 682       return;
 683     }
 684     // Can we encode imm in 20 bits signed?
 685     if (Displacement::is_validDisp(imm)) {
 686       // Always use LAY instruction, so we don't need the tmp register.
 687       z_lay(r1, imm, r2);
 688       return;
 689     }
 690 
 691   }
 692 
 693   // Can handle it (all possible values) with long immediates.
 694   lgr_if_needed(r1, r2);
 695   z_agfi(r1, imm);
 696 }
 697 
 698 // Generic operation r := b + x + d
 699 //
 700 // Addition of several operands with address generation semantics - sort of:
 701 //  - no restriction on the registers. Any register will do for any operand.
 702 //  - x == noreg: operand will be disregarded.
 703 //  - b == noreg: will use (contents of) result reg as operand (r := r + d).
 704 //  - x == Z_R0:  just disregard
 705 //  - b == Z_R0:  use as operand. This is not address generation semantics!!!
 706 //
 707 // The same restrictions as on add2reg() are valid!!!
 708 void MacroAssembler::add2reg_with_index(Register r, int64_t d, Register x, Register b) {
 709   assert(Immediate::is_simm32(d), "probably an implicit conversion went wrong");
 710 
 711   if (x == noreg) { x = Z_R0; }
 712   if (b == noreg) { b = r; }
 713 
 714   // Handle special case x == R0.
 715   if (x == Z_R0) {
 716     // Can simply add the immediate value to the base register.
 717     add2reg(r, d, b);
 718     return;
 719   }
 720 
 721   if (!PreferLAoverADD || (b == Z_R0)) {
 722     bool distinctOpnds = VM_Version::has_DistinctOpnds();
 723     // Handle special case d == 0.
 724     if (d == 0) {
 725       if (b == x)        { z_sllg(r, b, 1); return; }
 726       if (r == x)        { z_agr(r, b);     return; }
 727       if (r == b)        { z_agr(r, x);     return; }
 728       if (distinctOpnds) { z_agrk(r, x, b); return; }
 729       z_lgr(r, b);
 730       z_agr(r, x);
 731     } else {
 732       if (x == b)             { z_sllg(r, x, 1); }
 733       else if (r == x)        { z_agr(r, b); }
 734       else if (r == b)        { z_agr(r, x); }
 735       else if (distinctOpnds) { z_agrk(r, x, b); }
 736       else {
 737         z_lgr(r, b);
 738         z_agr(r, x);
 739       }
 740       add2reg(r, d);
 741     }
 742   } else {
 743     // Can we encode imm in 12 bits unsigned?
 744     if (Displacement::is_shortDisp(d)) {
 745       z_la(r, d, x, b);
 746       return;
 747     }
 748     // Can we encode imm in 20 bits signed?
 749     if (Displacement::is_validDisp(d)) {
 750       z_lay(r, d, x, b);
 751       return;
 752     }
 753     z_la(r, 0, x, b);
 754     add2reg(r, d);
 755   }
 756 }
 757 
 758 // Generic emitter (32bit) for direct memory increment.
 759 // For optimal code, do not specify Z_R0 as temp register.
 760 void MacroAssembler::add2mem_32(const Address &a, int64_t imm, Register tmp) {
 761   if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) {
 762     z_asi(a, imm);
 763   } else {
 764     z_lgf(tmp, a);
 765     add2reg(tmp, imm);
 766     z_st(tmp, a);
 767   }
 768 }
 769 
 770 void MacroAssembler::add2mem_64(const Address &a, int64_t imm, Register tmp) {
 771   if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) {
 772     z_agsi(a, imm);
 773   } else {
 774     z_lg(tmp, a);
 775     add2reg(tmp, imm);
 776     z_stg(tmp, a);
 777   }
 778 }
 779 
 780 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) {
 781   switch (size_in_bytes) {
 782     case  8: z_lg(dst, src); break;
 783     case  4: is_signed ? z_lgf(dst, src) : z_llgf(dst, src); break;
 784     case  2: is_signed ? z_lgh(dst, src) : z_llgh(dst, src); break;
 785     case  1: is_signed ? z_lgb(dst, src) : z_llgc(dst, src); break;
 786     default: ShouldNotReachHere();
 787   }
 788 }
 789 
 790 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) {
 791   switch (size_in_bytes) {
 792     case  8: z_stg(src, dst); break;
 793     case  4: z_st(src, dst); break;
 794     case  2: z_sth(src, dst); break;
 795     case  1: z_stc(src, dst); break;
 796     default: ShouldNotReachHere();
 797   }
 798 }
 799 
 800 // Split a si20 offset (20bit, signed) into an ui12 offset (12bit, unsigned) and
 801 // a high-order summand in register tmp.
 802 //
 803 // return value: <  0: No split required, si20 actually has property uimm12.
 804 //               >= 0: Split performed. Use return value as uimm12 displacement and
 805 //                     tmp as index register.
 806 int MacroAssembler::split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate) {
 807   assert(Immediate::is_simm20(si20_offset), "sanity");
 808   int lg_off = (int)si20_offset &  0x0fff; // Punch out low-order 12 bits, always positive.
 809   int ll_off = (int)si20_offset & ~0x0fff; // Force low-order 12 bits to zero.
 810   assert((Displacement::is_shortDisp(si20_offset) && (ll_off == 0)) ||
 811          !Displacement::is_shortDisp(si20_offset), "unexpected offset values");
 812   assert((lg_off+ll_off) == si20_offset, "offset splitup error");
 813 
 814   Register work = accumulate? Z_R0 : tmp;
 815 
 816   if (fixed_codelen) {          // Len of code = 10 = 4 + 6.
 817     z_lghi(work, ll_off>>12);   // Implicit sign extension.
 818     z_slag(work, work, 12);
 819   } else {                      // Len of code = 0..10.
 820     if (ll_off == 0) { return -1; }
 821     // ll_off has 8 significant bits (at most) plus sign.
 822     if ((ll_off & 0x0000f000) == 0) {    // Non-zero bits only in upper halfbyte.
 823       z_llilh(work, ll_off >> 16);
 824       if (ll_off < 0) {                  // Sign-extension required.
 825         z_lgfr(work, work);
 826       }
 827     } else {
 828       if ((ll_off & 0x000f0000) == 0) {  // Non-zero bits only in lower halfbyte.
 829         z_llill(work, ll_off);
 830       } else {                           // Non-zero bits in both halfbytes.
 831         z_lghi(work, ll_off>>12);        // Implicit sign extension.
 832         z_slag(work, work, 12);
 833       }
 834     }
 835   }
 836   if (accumulate) { z_algr(tmp, work); } // len of code += 4
 837   return lg_off;
 838 }
 839 
 840 void MacroAssembler::load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) {
 841   if (Displacement::is_validDisp(si20)) {
 842     z_ley(t, si20, a);
 843   } else {
 844     // Fixed_codelen = true is a simple way to ensure that the size of load_float_largeoffset
 845     // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant
 846     // pool loads).
 847     bool accumulate    = true;
 848     bool fixed_codelen = true;
 849     Register work;
 850 
 851     if (fixed_codelen) {
 852       z_lgr(tmp, a);  // Lgr_if_needed not applicable due to fixed_codelen.
 853     } else {
 854       accumulate = (a == tmp);
 855     }
 856     work = tmp;
 857 
 858     int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate);
 859     if (disp12 < 0) {
 860       z_le(t, si20, work);
 861     } else {
 862       if (accumulate) {
 863         z_le(t, disp12, work);
 864       } else {
 865         z_le(t, disp12, work, a);
 866       }
 867     }
 868   }
 869 }
 870 
 871 void MacroAssembler::load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) {
 872   if (Displacement::is_validDisp(si20)) {
 873     z_ldy(t, si20, a);
 874   } else {
 875     // Fixed_codelen = true is a simple way to ensure that the size of load_double_largeoffset
 876     // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant
 877     // pool loads).
 878     bool accumulate    = true;
 879     bool fixed_codelen = true;
 880     Register work;
 881 
 882     if (fixed_codelen) {
 883       z_lgr(tmp, a);  // Lgr_if_needed not applicable due to fixed_codelen.
 884     } else {
 885       accumulate = (a == tmp);
 886     }
 887     work = tmp;
 888 
 889     int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate);
 890     if (disp12 < 0) {
 891       z_ld(t, si20, work);
 892     } else {
 893       if (accumulate) {
 894         z_ld(t, disp12, work);
 895       } else {
 896         z_ld(t, disp12, work, a);
 897       }
 898     }
 899   }
 900 }
 901 
 902 // PCrelative TOC access.
 903 // Returns distance (in bytes) from current position to start of consts section.
 904 // Returns 0 (zero) if no consts section exists or if it has size zero.
 905 long MacroAssembler::toc_distance() {
 906   CodeSection* cs = code()->consts();
 907   return (long)((cs != NULL) ? cs->start()-pc() : 0);
 908 }
 909 
 910 // Implementation on x86/sparc assumes that constant and instruction section are
 911 // adjacent, but this doesn't hold. Two special situations may occur, that we must
 912 // be able to handle:
 913 //   1. const section may be located apart from the inst section.
 914 //   2. const section may be empty
 915 // In both cases, we use the const section's start address to compute the "TOC",
 916 // this seems to occur only temporarily; in the final step we always seem to end up
 917 // with the pc-relatice variant.
 918 //
 919 // PC-relative offset could be +/-2**32 -> use long for disp
 920 // Furthermore: makes no sense to have special code for
 921 // adjacent const and inst sections.
 922 void MacroAssembler::load_toc(Register Rtoc) {
 923   // Simply use distance from start of const section (should be patched in the end).
 924   long disp = toc_distance();
 925 
 926   RelocationHolder rspec = internal_word_Relocation::spec(pc() + disp);
 927   relocate(rspec);
 928   z_larl(Rtoc, RelAddr::pcrel_off32(disp));  // Offset is in halfwords.
 929 }
 930 
 931 // PCrelative TOC access.
 932 // Load from anywhere pcrelative (with relocation of load instr)
 933 void MacroAssembler::load_long_pcrelative(Register Rdst, address dataLocation) {
 934   address          pc             = this->pc();
 935   ptrdiff_t        total_distance = dataLocation - pc;
 936   RelocationHolder rspec          = internal_word_Relocation::spec(dataLocation);
 937 
 938   assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory");
 939   assert(total_distance != 0, "sanity");
 940 
 941   // Some extra safety net.
 942   if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
 943     guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance);
 944   }
 945 
 946   (this)->relocate(rspec, relocInfo::pcrel_addr_format);
 947   z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance));
 948 }
 949 
 950 
 951 // PCrelative TOC access.
 952 // Load from anywhere pcrelative (with relocation of load instr)
 953 // loaded addr has to be relocated when added to constant pool.
 954 void MacroAssembler::load_addr_pcrelative(Register Rdst, address addrLocation) {
 955   address          pc             = this->pc();
 956   ptrdiff_t        total_distance = addrLocation - pc;
 957   RelocationHolder rspec          = internal_word_Relocation::spec(addrLocation);
 958 
 959   assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory");
 960 
 961   // Some extra safety net.
 962   if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
 963     guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance);
 964   }
 965 
 966   (this)->relocate(rspec, relocInfo::pcrel_addr_format);
 967   z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance));
 968 }
 969 
 970 // Generic operation: load a value from memory and test.
 971 // CondCode indicates the sign (<0, ==0, >0) of the loaded value.
 972 void MacroAssembler::load_and_test_byte(Register dst, const Address &a) {
 973   z_lb(dst, a);
 974   z_ltr(dst, dst);
 975 }
 976 
 977 void MacroAssembler::load_and_test_short(Register dst, const Address &a) {
 978   int64_t disp = a.disp20();
 979   if (Displacement::is_shortDisp(disp)) {
 980     z_lh(dst, a);
 981   } else if (Displacement::is_longDisp(disp)) {
 982     z_lhy(dst, a);
 983   } else {
 984     guarantee(false, "displacement out of range");
 985   }
 986   z_ltr(dst, dst);
 987 }
 988 
 989 void MacroAssembler::load_and_test_int(Register dst, const Address &a) {
 990   z_lt(dst, a);
 991 }
 992 
 993 void MacroAssembler::load_and_test_int2long(Register dst, const Address &a) {
 994   z_ltgf(dst, a);
 995 }
 996 
 997 void MacroAssembler::load_and_test_long(Register dst, const Address &a) {
 998   z_ltg(dst, a);
 999 }
1000 
1001 // Test a bit in memory.
1002 void MacroAssembler::testbit(const Address &a, unsigned int bit) {
1003   assert(a.index() == noreg, "no index reg allowed in testbit");
1004   if (bit <= 7) {
1005     z_tm(a.disp() + 3, a.base(), 1 << bit);
1006   } else if (bit <= 15) {
1007     z_tm(a.disp() + 2, a.base(), 1 << (bit - 8));
1008   } else if (bit <= 23) {
1009     z_tm(a.disp() + 1, a.base(), 1 << (bit - 16));
1010   } else if (bit <= 31) {
1011     z_tm(a.disp() + 0, a.base(), 1 << (bit - 24));
1012   } else {
1013     ShouldNotReachHere();
1014   }
1015 }
1016 
1017 // Test a bit in a register. Result is reflected in CC.
1018 void MacroAssembler::testbit(Register r, unsigned int bitPos) {
1019   if (bitPos < 16) {
1020     z_tmll(r, 1U<<bitPos);
1021   } else if (bitPos < 32) {
1022     z_tmlh(r, 1U<<(bitPos-16));
1023   } else if (bitPos < 48) {
1024     z_tmhl(r, 1U<<(bitPos-32));
1025   } else if (bitPos < 64) {
1026     z_tmhh(r, 1U<<(bitPos-48));
1027   } else {
1028     ShouldNotReachHere();
1029   }
1030 }
1031 
1032 void MacroAssembler::prefetch_read(Address a) {
1033   z_pfd(1, a.disp20(), a.indexOrR0(), a.base());
1034 }
1035 void MacroAssembler::prefetch_update(Address a) {
1036   z_pfd(2, a.disp20(), a.indexOrR0(), a.base());
1037 }
1038 
1039 // Clear a register, i.e. load const zero into reg.
1040 // Return len (in bytes) of generated instruction(s).
1041 // whole_reg: Clear 64 bits if true, 32 bits otherwise.
1042 // set_cc:    Use instruction that sets the condition code, if true.
1043 int MacroAssembler::clear_reg(Register r, bool whole_reg, bool set_cc) {
1044   unsigned int start_off = offset();
1045   if (whole_reg) {
1046     set_cc ? z_xgr(r, r) : z_laz(r, 0, Z_R0);
1047   } else {  // Only 32bit register.
1048     set_cc ? z_xr(r, r) : z_lhi(r, 0);
1049   }
1050   return offset() - start_off;
1051 }
1052 
1053 #ifdef ASSERT
1054 int MacroAssembler::preset_reg(Register r, unsigned long pattern, int pattern_len) {
1055   switch (pattern_len) {
1056     case 1:
1057       pattern = (pattern & 0x000000ff)  | ((pattern & 0x000000ff)<<8);
1058     case 2:
1059       pattern = (pattern & 0x0000ffff)  | ((pattern & 0x0000ffff)<<16);
1060     case 4:
1061       pattern = (pattern & 0xffffffffL) | ((pattern & 0xffffffffL)<<32);
1062     case 8:
1063       return load_const_optimized_rtn_len(r, pattern, true);
1064       break;
1065     default:
1066       guarantee(false, "preset_reg: bad len");
1067   }
1068   return 0;
1069 }
1070 #endif
1071 
1072 // addr: Address descriptor of memory to clear index register will not be used !
1073 // size: Number of bytes to clear.
1074 //    !!! DO NOT USE THEM FOR ATOMIC MEMORY CLEARING !!!
1075 //    !!! Use store_const() instead                  !!!
1076 void MacroAssembler::clear_mem(const Address& addr, unsigned size) {
1077   guarantee(size <= 256, "MacroAssembler::clear_mem: size too large");
1078 
1079   if (size == 1) {
1080     z_mvi(addr, 0);
1081     return;
1082   }
1083 
1084   switch (size) {
1085     case 2: z_mvhhi(addr, 0);
1086       return;
1087     case 4: z_mvhi(addr, 0);
1088       return;
1089     case 8: z_mvghi(addr, 0);
1090       return;
1091     default: ; // Fallthru to xc.
1092   }
1093 
1094   z_xc(addr, size, addr);
1095 }
1096 
1097 void MacroAssembler::align(int modulus) {
1098   while (offset() % modulus != 0) z_nop();
1099 }
1100 
1101 // Special version for non-relocateable code if required alignment
1102 // is larger than CodeEntryAlignment.
1103 void MacroAssembler::align_address(int modulus) {
1104   while ((uintptr_t)pc() % modulus != 0) z_nop();
1105 }
1106 
1107 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
1108                                          Register temp_reg,
1109                                          int64_t extra_slot_offset) {
1110   // On Z, we can have index and disp in an Address. So don't call argument_offset,
1111   // which issues an unnecessary add instruction.
1112   int stackElementSize = Interpreter::stackElementSize;
1113   int64_t offset = extra_slot_offset * stackElementSize;
1114   const Register argbase = Z_esp;
1115   if (arg_slot.is_constant()) {
1116     offset += arg_slot.as_constant() * stackElementSize;
1117     return Address(argbase, offset);
1118   }
1119   // else
1120   assert(temp_reg != noreg, "must specify");
1121   assert(temp_reg != Z_ARG1, "base and index are conflicting");
1122   z_sllg(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize)); // tempreg = arg_slot << 3
1123   return Address(argbase, temp_reg, offset);
1124 }
1125 
1126 
1127 //===================================================================
1128 //===   START   C O N S T A N T S   I N   C O D E   S T R E A M   ===
1129 //===================================================================
1130 //===            P A T CH A B L E   C O N S T A N T S             ===
1131 //===================================================================
1132 
1133 
1134 //---------------------------------------------------
1135 //  Load (patchable) constant into register
1136 //---------------------------------------------------
1137 
1138 
1139 // Load absolute address (and try to optimize).
1140 //   Note: This method is usable only for position-fixed code,
1141 //         referring to a position-fixed target location.
1142 //         If not so, relocations and patching must be used.
1143 void MacroAssembler::load_absolute_address(Register d, address addr) {
1144   assert(addr != NULL, "should not happen");
1145   BLOCK_COMMENT("load_absolute_address:");
1146   if (addr == NULL) {
1147     z_larl(d, pc()); // Dummy emit for size calc.
1148     return;
1149   }
1150 
1151   if (RelAddr::is_in_range_of_RelAddr32(addr, pc())) {
1152     z_larl(d, addr);
1153     return;
1154   }
1155 
1156   load_const_optimized(d, (long)addr);
1157 }
1158 
1159 // Load a 64bit constant.
1160 // Patchable code sequence, but not atomically patchable.
1161 // Make sure to keep code size constant -> no value-dependent optimizations.
1162 // Do not kill condition code.
1163 void MacroAssembler::load_const(Register t, long x) {
1164   Assembler::z_iihf(t, (int)(x >> 32));
1165   Assembler::z_iilf(t, (int)(x & 0xffffffff));
1166 }
1167 
1168 // Load a 32bit constant into a 64bit register, sign-extend or zero-extend.
1169 // Patchable code sequence, but not atomically patchable.
1170 // Make sure to keep code size constant -> no value-dependent optimizations.
1171 // Do not kill condition code.
1172 void MacroAssembler::load_const_32to64(Register t, int64_t x, bool sign_extend) {
1173   if (sign_extend) { Assembler::z_lgfi(t, x); }
1174   else             { Assembler::z_llilf(t, x); }
1175 }
1176 
1177 // Load narrow oop constant, no decompression.
1178 void MacroAssembler::load_narrow_oop(Register t, narrowOop a) {
1179   assert(UseCompressedOops, "must be on to call this method");
1180   load_const_32to64(t, a, false /*sign_extend*/);
1181 }
1182 
1183 // Load narrow klass constant, compression required.
1184 void MacroAssembler::load_narrow_klass(Register t, Klass* k) {
1185   assert(UseCompressedClassPointers, "must be on to call this method");
1186   narrowKlass encoded_k = Klass::encode_klass(k);
1187   load_const_32to64(t, encoded_k, false /*sign_extend*/);
1188 }
1189 
1190 //------------------------------------------------------
1191 //  Compare (patchable) constant with register.
1192 //------------------------------------------------------
1193 
1194 // Compare narrow oop in reg with narrow oop constant, no decompression.
1195 void MacroAssembler::compare_immediate_narrow_oop(Register oop1, narrowOop oop2) {
1196   assert(UseCompressedOops, "must be on to call this method");
1197 
1198   Assembler::z_clfi(oop1, oop2);
1199 }
1200 
1201 // Compare narrow oop in reg with narrow oop constant, no decompression.
1202 void MacroAssembler::compare_immediate_narrow_klass(Register klass1, Klass* klass2) {
1203   assert(UseCompressedClassPointers, "must be on to call this method");
1204   narrowKlass encoded_k = Klass::encode_klass(klass2);
1205 
1206   Assembler::z_clfi(klass1, encoded_k);
1207 }
1208 
1209 //----------------------------------------------------------
1210 //  Check which kind of load_constant we have here.
1211 //----------------------------------------------------------
1212 
1213 // Detection of CPU version dependent load_const sequence.
1214 // The detection is valid only for code sequences generated by load_const,
1215 // not load_const_optimized.
1216 bool MacroAssembler::is_load_const(address a) {
1217   unsigned long inst1, inst2;
1218   unsigned int  len1,  len2;
1219 
1220   len1 = get_instruction(a, &inst1);
1221   len2 = get_instruction(a + len1, &inst2);
1222 
1223   return is_z_iihf(inst1) && is_z_iilf(inst2);
1224 }
1225 
1226 // Detection of CPU version dependent load_const_32to64 sequence.
1227 // Mostly used for narrow oops and narrow Klass pointers.
1228 // The detection is valid only for code sequences generated by load_const_32to64.
1229 bool MacroAssembler::is_load_const_32to64(address pos) {
1230   unsigned long inst1, inst2;
1231   unsigned int len1;
1232 
1233   len1 = get_instruction(pos, &inst1);
1234   return is_z_llilf(inst1);
1235 }
1236 
1237 // Detection of compare_immediate_narrow sequence.
1238 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop.
1239 bool MacroAssembler::is_compare_immediate32(address pos) {
1240   return is_equal(pos, CLFI_ZOPC, RIL_MASK);
1241 }
1242 
1243 // Detection of compare_immediate_narrow sequence.
1244 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop.
1245 bool MacroAssembler::is_compare_immediate_narrow_oop(address pos) {
1246   return is_compare_immediate32(pos);
1247   }
1248 
1249 // Detection of compare_immediate_narrow sequence.
1250 // The detection is valid only for code sequences generated by compare_immediate_narrow_klass.
1251 bool MacroAssembler::is_compare_immediate_narrow_klass(address pos) {
1252   return is_compare_immediate32(pos);
1253 }
1254 
1255 //-----------------------------------
1256 //  patch the load_constant
1257 //-----------------------------------
1258 
1259 // CPU-version dependend patching of load_const.
1260 void MacroAssembler::patch_const(address a, long x) {
1261   assert(is_load_const(a), "not a load of a constant");
1262   set_imm32((address)a, (int) ((x >> 32) & 0xffffffff));
1263   set_imm32((address)(a + 6), (int)(x & 0xffffffff));
1264 }
1265 
1266 // Patching the value of CPU version dependent load_const_32to64 sequence.
1267 // The passed ptr MUST be in compressed format!
1268 int MacroAssembler::patch_load_const_32to64(address pos, int64_t np) {
1269   assert(is_load_const_32to64(pos), "not a load of a narrow ptr (oop or klass)");
1270 
1271   set_imm32(pos, np);
1272   return 6;
1273 }
1274 
1275 // Patching the value of CPU version dependent compare_immediate_narrow sequence.
1276 // The passed ptr MUST be in compressed format!
1277 int MacroAssembler::patch_compare_immediate_32(address pos, int64_t np) {
1278   assert(is_compare_immediate32(pos), "not a compressed ptr compare");
1279 
1280   set_imm32(pos, np);
1281   return 6;
1282 }
1283 
1284 // Patching the immediate value of CPU version dependent load_narrow_oop sequence.
1285 // The passed ptr must NOT be in compressed format!
1286 int MacroAssembler::patch_load_narrow_oop(address pos, oop o) {
1287   assert(UseCompressedOops, "Can only patch compressed oops");
1288 
1289   narrowOop no = oopDesc::encode_heap_oop(o);
1290   return patch_load_const_32to64(pos, no);
1291 }
1292 
1293 // Patching the immediate value of CPU version dependent load_narrow_klass sequence.
1294 // The passed ptr must NOT be in compressed format!
1295 int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) {
1296   assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
1297 
1298   narrowKlass nk = Klass::encode_klass(k);
1299   return patch_load_const_32to64(pos, nk);
1300 }
1301 
1302 // Patching the immediate value of CPU version dependent compare_immediate_narrow_oop sequence.
1303 // The passed ptr must NOT be in compressed format!
1304 int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) {
1305   assert(UseCompressedOops, "Can only patch compressed oops");
1306 
1307   narrowOop no = oopDesc::encode_heap_oop(o);
1308   return patch_compare_immediate_32(pos, no);
1309 }
1310 
1311 // Patching the immediate value of CPU version dependent compare_immediate_narrow_klass sequence.
1312 // The passed ptr must NOT be in compressed format!
1313 int MacroAssembler::patch_compare_immediate_narrow_klass(address pos, Klass* k) {
1314   assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
1315 
1316   narrowKlass nk = Klass::encode_klass(k);
1317   return patch_compare_immediate_32(pos, nk);
1318 }
1319 
1320 //------------------------------------------------------------------------
1321 //  Extract the constant from a load_constant instruction stream.
1322 //------------------------------------------------------------------------
1323 
1324 // Get constant from a load_const sequence.
1325 long MacroAssembler::get_const(address a) {
1326   assert(is_load_const(a), "not a load of a constant");
1327   unsigned long x;
1328   x =  (((unsigned long) (get_imm32(a,0) & 0xffffffff)) << 32);
1329   x |= (((unsigned long) (get_imm32(a,1) & 0xffffffff)));
1330   return (long) x;
1331 }
1332 
1333 //--------------------------------------
1334 //  Store a constant in memory.
1335 //--------------------------------------
1336 
1337 // General emitter to move a constant to memory.
1338 // The store is atomic.
1339 //  o Address must be given in RS format (no index register)
1340 //  o Displacement should be 12bit unsigned for efficiency. 20bit signed also supported.
1341 //  o Constant can be 1, 2, 4, or 8 bytes, signed or unsigned.
1342 //  o Memory slot can be 1, 2, 4, or 8 bytes, signed or unsigned.
1343 //  o Memory slot must be at least as wide as constant, will assert otherwise.
1344 //  o Signed constants will sign-extend, unsigned constants will zero-extend to slot width.
1345 int MacroAssembler::store_const(const Address &dest, long imm,
1346                                 unsigned int lm, unsigned int lc,
1347                                 Register scratch) {
1348   int64_t  disp = dest.disp();
1349   Register base = dest.base();
1350   assert(!dest.has_index(), "not supported");
1351   assert((lm==1)||(lm==2)||(lm==4)||(lm==8), "memory   length not supported");
1352   assert((lc==1)||(lc==2)||(lc==4)||(lc==8), "constant length not supported");
1353   assert(lm>=lc, "memory slot too small");
1354   assert(lc==8 || Immediate::is_simm(imm, lc*8), "const out of range");
1355   assert(Displacement::is_validDisp(disp), "displacement out of range");
1356 
1357   bool is_shortDisp = Displacement::is_shortDisp(disp);
1358   int store_offset = -1;
1359 
1360   // For target len == 1 it's easy.
1361   if (lm == 1) {
1362     store_offset = offset();
1363     if (is_shortDisp) {
1364       z_mvi(disp, base, imm);
1365       return store_offset;
1366     } else {
1367       z_mviy(disp, base, imm);
1368       return store_offset;
1369     }
1370   }
1371 
1372   // All the "good stuff" takes an unsigned displacement.
1373   if (is_shortDisp) {
1374     // NOTE: Cannot use clear_mem for imm==0, because it is not atomic.
1375 
1376     store_offset = offset();
1377     switch (lm) {
1378       case 2:  // Lc == 1 handled correctly here, even for unsigned. Instruction does no widening.
1379         z_mvhhi(disp, base, imm);
1380         return store_offset;
1381       case 4:
1382         if (Immediate::is_simm16(imm)) {
1383           z_mvhi(disp, base, imm);
1384           return store_offset;
1385         }
1386         break;
1387       case 8:
1388         if (Immediate::is_simm16(imm)) {
1389           z_mvghi(disp, base, imm);
1390           return store_offset;
1391         }
1392         break;
1393       default:
1394         ShouldNotReachHere();
1395         break;
1396     }
1397   }
1398 
1399   //  Can't optimize, so load value and store it.
1400   guarantee(scratch != noreg, " need a scratch register here !");
1401   if (imm != 0) {
1402     load_const_optimized(scratch, imm);  // Preserves CC anyway.
1403   } else {
1404     // Leave CC alone!!
1405     (void) clear_reg(scratch, true, false); // Indicate unused result.
1406   }
1407 
1408   store_offset = offset();
1409   if (is_shortDisp) {
1410     switch (lm) {
1411       case 2:
1412         z_sth(scratch, disp, Z_R0, base);
1413         return store_offset;
1414       case 4:
1415         z_st(scratch, disp, Z_R0, base);
1416         return store_offset;
1417       case 8:
1418         z_stg(scratch, disp, Z_R0, base);
1419         return store_offset;
1420       default:
1421         ShouldNotReachHere();
1422         break;
1423     }
1424   } else {
1425     switch (lm) {
1426       case 2:
1427         z_sthy(scratch, disp, Z_R0, base);
1428         return store_offset;
1429       case 4:
1430         z_sty(scratch, disp, Z_R0, base);
1431         return store_offset;
1432       case 8:
1433         z_stg(scratch, disp, Z_R0, base);
1434         return store_offset;
1435       default:
1436         ShouldNotReachHere();
1437         break;
1438     }
1439   }
1440   return -1; // should not reach here
1441 }
1442 
1443 //===================================================================
1444 //===       N O T   P A T CH A B L E   C O N S T A N T S          ===
1445 //===================================================================
1446 
1447 // Load constant x into register t with a fast instrcution sequence
1448 // depending on the bits in x. Preserves CC under all circumstances.
1449 int MacroAssembler::load_const_optimized_rtn_len(Register t, long x, bool emit) {
1450   if (x == 0) {
1451     int len;
1452     if (emit) {
1453       len = clear_reg(t, true, false);
1454     } else {
1455       len = 4;
1456     }
1457     return len;
1458   }
1459 
1460   if (Immediate::is_simm16(x)) {
1461     if (emit) { z_lghi(t, x); }
1462     return 4;
1463   }
1464 
1465   // 64 bit value: | part1 | part2 | part3 | part4 |
1466   // At least one part is not zero!
1467   int part1 = ((x >> 32) & 0xffff0000) >> 16;
1468   int part2 = (x >> 32) & 0x0000ffff;
1469   int part3 = (x & 0xffff0000) >> 16;
1470   int part4 = (x & 0x0000ffff);
1471 
1472   // Lower word only (unsigned).
1473   if ((part1 == 0) && (part2 == 0)) {
1474     if (part3 == 0) {
1475       if (emit) z_llill(t, part4);
1476       return 4;
1477     }
1478     if (part4 == 0) {
1479       if (emit) z_llilh(t, part3);
1480       return 4;
1481     }
1482     if (emit) z_llilf(t, (int)(x & 0xffffffff));
1483     return 6;
1484   }
1485 
1486   // Upper word only.
1487   if ((part3 == 0) && (part4 == 0)) {
1488     if (part1 == 0) {
1489       if (emit) z_llihl(t, part2);
1490       return 4;
1491     }
1492     if (part2 == 0) {
1493       if (emit) z_llihh(t, part1);
1494       return 4;
1495     }
1496     if (emit) z_llihf(t, (int)(x >> 32));
1497     return 6;
1498   }
1499 
1500   // Lower word only (signed).
1501   if ((part1 == 0x0000ffff) && (part2 == 0x0000ffff) && ((part3 & 0x00008000) != 0)) {
1502     if (emit) z_lgfi(t, (int)(x & 0xffffffff));
1503     return 6;
1504   }
1505 
1506   int len = 0;
1507 
1508   if ((part1 == 0) || (part2 == 0)) {
1509     if (part1 == 0) {
1510       if (emit) z_llihl(t, part2);
1511       len += 4;
1512     } else {
1513       if (emit) z_llihh(t, part1);
1514       len += 4;
1515     }
1516   } else {
1517     if (emit) z_llihf(t, (int)(x >> 32));
1518     len += 6;
1519   }
1520 
1521   if ((part3 == 0) || (part4 == 0)) {
1522     if (part3 == 0) {
1523       if (emit) z_iill(t, part4);
1524       len += 4;
1525     } else {
1526       if (emit) z_iilh(t, part3);
1527       len += 4;
1528     }
1529   } else {
1530     if (emit) z_iilf(t, (int)(x & 0xffffffff));
1531     len += 6;
1532   }
1533   return len;
1534 }
1535 
1536 //=====================================================================
1537 //===     H I G H E R   L E V E L   B R A N C H   E M I T T E R S   ===
1538 //=====================================================================
1539 
1540 // Note: In the worst case, one of the scratch registers is destroyed!!!
1541 void MacroAssembler::compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1542   // Right operand is constant.
1543   if (x2.is_constant()) {
1544     jlong value = x2.as_constant();
1545     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/true);
1546     return;
1547   }
1548 
1549   // Right operand is in register.
1550   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/true);
1551 }
1552 
1553 // Note: In the worst case, one of the scratch registers is destroyed!!!
1554 void MacroAssembler::compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1555   // Right operand is constant.
1556   if (x2.is_constant()) {
1557     jlong value = x2.as_constant();
1558     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/false);
1559     return;
1560   }
1561 
1562   // Right operand is in register.
1563   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/false);
1564 }
1565 
1566 // Note: In the worst case, one of the scratch registers is destroyed!!!
1567 void MacroAssembler::compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1568   // Right operand is constant.
1569   if (x2.is_constant()) {
1570     jlong value = x2.as_constant();
1571     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/true);
1572     return;
1573   }
1574 
1575   // Right operand is in register.
1576   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/true);
1577 }
1578 
1579 void MacroAssembler::compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1580   // Right operand is constant.
1581   if (x2.is_constant()) {
1582     jlong value = x2.as_constant();
1583     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/false);
1584     return;
1585   }
1586 
1587   // Right operand is in register.
1588   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/false);
1589 }
1590 
1591 // Generate an optimal branch to the branch target.
1592 // Optimal means that a relative branch (brc or brcl) is used if the
1593 // branch distance is short enough. Loading the target address into a
1594 // register and branching via reg is used as fallback only.
1595 //
1596 // Used registers:
1597 //   Z_R1 - work reg. Holds branch target address.
1598 //          Used in fallback case only.
1599 //
1600 // This version of branch_optimized is good for cases where the target address is known
1601 // and constant, i.e. is never changed (no relocation, no patching).
1602 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, address branch_addr) {
1603   address branch_origin = pc();
1604 
1605   if (RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) {
1606     z_brc(cond, branch_addr);
1607   } else if (RelAddr::is_in_range_of_RelAddr32(branch_addr, branch_origin)) {
1608     z_brcl(cond, branch_addr);
1609   } else {
1610     load_const_optimized(Z_R1, branch_addr);  // CC must not get killed by load_const_optimized.
1611     z_bcr(cond, Z_R1);
1612   }
1613 }
1614 
1615 // This version of branch_optimized is good for cases where the target address
1616 // is potentially not yet known at the time the code is emitted.
1617 //
1618 // One very common case is a branch to an unbound label which is handled here.
1619 // The caller might know (or hope) that the branch distance is short enough
1620 // to be encoded in a 16bit relative address. In this case he will pass a
1621 // NearLabel branch_target.
1622 // Care must be taken with unbound labels. Each call to target(label) creates
1623 // an entry in the patch queue for that label to patch all references of the label
1624 // once it gets bound. Those recorded patch locations must be patchable. Otherwise,
1625 // an assertion fires at patch time.
1626 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& branch_target) {
1627   if (branch_target.is_bound()) {
1628     address branch_addr = target(branch_target);
1629     branch_optimized(cond, branch_addr);
1630   } else if (branch_target.is_near()) {
1631     z_brc(cond, branch_target);  // Caller assures that the target will be in range for z_brc.
1632   } else {
1633     z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time.
1634   }
1635 }
1636 
1637 // Generate an optimal compare and branch to the branch target.
1638 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the
1639 // branch distance is short enough. Loading the target address into a
1640 // register and branching via reg is used as fallback only.
1641 //
1642 // Input:
1643 //   r1 - left compare operand
1644 //   r2 - right compare operand
1645 void MacroAssembler::compare_and_branch_optimized(Register r1,
1646                                                   Register r2,
1647                                                   Assembler::branch_condition cond,
1648                                                   address  branch_addr,
1649                                                   bool     len64,
1650                                                   bool     has_sign) {
1651   unsigned int casenum = (len64?2:0)+(has_sign?0:1);
1652 
1653   address branch_origin = pc();
1654   if (VM_Version::has_CompareBranch() && RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) {
1655     switch (casenum) {
1656       case 0: z_crj( r1, r2, cond, branch_addr); break;
1657       case 1: z_clrj (r1, r2, cond, branch_addr); break;
1658       case 2: z_cgrj(r1, r2, cond, branch_addr); break;
1659       case 3: z_clgrj(r1, r2, cond, branch_addr); break;
1660       default: ShouldNotReachHere(); break;
1661     }
1662   } else {
1663     switch (casenum) {
1664       case 0: z_cr( r1, r2); break;
1665       case 1: z_clr(r1, r2); break;
1666       case 2: z_cgr(r1, r2); break;
1667       case 3: z_clgr(r1, r2); break;
1668       default: ShouldNotReachHere(); break;
1669     }
1670     branch_optimized(cond, branch_addr);
1671   }
1672 }
1673 
1674 // Generate an optimal compare and branch to the branch target.
1675 // Optimal means that a relative branch (clgij, brc or brcl) is used if the
1676 // branch distance is short enough. Loading the target address into a
1677 // register and branching via reg is used as fallback only.
1678 //
1679 // Input:
1680 //   r1 - left compare operand (in register)
1681 //   x2 - right compare operand (immediate)
1682 void MacroAssembler::compare_and_branch_optimized(Register r1,
1683                                                   jlong    x2,
1684                                                   Assembler::branch_condition cond,
1685                                                   Label&   branch_target,
1686                                                   bool     len64,
1687                                                   bool     has_sign) {
1688   address      branch_origin = pc();
1689   bool         x2_imm8       = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2));
1690   bool         is_RelAddr16  = branch_target.is_near() ||
1691                                (branch_target.is_bound() &&
1692                                 RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin));
1693   unsigned int casenum       = (len64?2:0)+(has_sign?0:1);
1694 
1695   if (VM_Version::has_CompareBranch() && is_RelAddr16 && x2_imm8) {
1696     switch (casenum) {
1697       case 0: z_cij( r1, x2, cond, branch_target); break;
1698       case 1: z_clij(r1, x2, cond, branch_target); break;
1699       case 2: z_cgij(r1, x2, cond, branch_target); break;
1700       case 3: z_clgij(r1, x2, cond, branch_target); break;
1701       default: ShouldNotReachHere(); break;
1702     }
1703     return;
1704   }
1705 
1706   if (x2 == 0) {
1707     switch (casenum) {
1708       case 0: z_ltr(r1, r1); break;
1709       case 1: z_ltr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication!
1710       case 2: z_ltgr(r1, r1); break;
1711       case 3: z_ltgr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication!
1712       default: ShouldNotReachHere(); break;
1713     }
1714   } else {
1715     if ((has_sign && Immediate::is_simm16(x2)) || (!has_sign && Immediate::is_uimm(x2, 15))) {
1716       switch (casenum) {
1717         case 0: z_chi(r1, x2); break;
1718         case 1: z_chi(r1, x2); break; // positive immediate < 2**15
1719         case 2: z_cghi(r1, x2); break;
1720         case 3: z_cghi(r1, x2); break; // positive immediate < 2**15
1721         default: break;
1722       }
1723     } else if ( (has_sign && Immediate::is_simm32(x2)) || (!has_sign && Immediate::is_uimm32(x2)) ) {
1724       switch (casenum) {
1725         case 0: z_cfi( r1, x2); break;
1726         case 1: z_clfi(r1, x2); break;
1727         case 2: z_cgfi(r1, x2); break;
1728         case 3: z_clgfi(r1, x2); break;
1729         default: ShouldNotReachHere(); break;
1730       }
1731     } else {
1732       // No instruction with immediate operand possible, so load into register.
1733       Register scratch = (r1 != Z_R0) ? Z_R0 : Z_R1;
1734       load_const_optimized(scratch, x2);
1735       switch (casenum) {
1736         case 0: z_cr( r1, scratch); break;
1737         case 1: z_clr(r1, scratch); break;
1738         case 2: z_cgr(r1, scratch); break;
1739         case 3: z_clgr(r1, scratch); break;
1740         default: ShouldNotReachHere(); break;
1741       }
1742     }
1743   }
1744   branch_optimized(cond, branch_target);
1745 }
1746 
1747 // Generate an optimal compare and branch to the branch target.
1748 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the
1749 // branch distance is short enough. Loading the target address into a
1750 // register and branching via reg is used as fallback only.
1751 //
1752 // Input:
1753 //   r1 - left compare operand
1754 //   r2 - right compare operand
1755 void MacroAssembler::compare_and_branch_optimized(Register r1,
1756                                                   Register r2,
1757                                                   Assembler::branch_condition cond,
1758                                                   Label&   branch_target,
1759                                                   bool     len64,
1760                                                   bool     has_sign) {
1761   unsigned int casenum = (len64 ? 2 : 0) + (has_sign ? 0 : 1);
1762 
1763   if (branch_target.is_bound()) {
1764     address branch_addr = target(branch_target);
1765     compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign);
1766   } else {
1767     if (VM_Version::has_CompareBranch() && branch_target.is_near()) {
1768       switch (casenum) {
1769         case 0: z_crj(  r1, r2, cond, branch_target); break;
1770         case 1: z_clrj( r1, r2, cond, branch_target); break;
1771         case 2: z_cgrj( r1, r2, cond, branch_target); break;
1772         case 3: z_clgrj(r1, r2, cond, branch_target); break;
1773         default: ShouldNotReachHere(); break;
1774       }
1775     } else {
1776       switch (casenum) {
1777         case 0: z_cr( r1, r2); break;
1778         case 1: z_clr(r1, r2); break;
1779         case 2: z_cgr(r1, r2); break;
1780         case 3: z_clgr(r1, r2); break;
1781         default: ShouldNotReachHere(); break;
1782       }
1783       branch_optimized(cond, branch_target);
1784     }
1785   }
1786 }
1787 
1788 //===========================================================================
1789 //===   END     H I G H E R   L E V E L   B R A N C H   E M I T T E R S   ===
1790 //===========================================================================
1791 
1792 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
1793   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1794   int index = oop_recorder()->allocate_metadata_index(obj);
1795   RelocationHolder rspec = metadata_Relocation::spec(index);
1796   return AddressLiteral((address)obj, rspec);
1797 }
1798 
1799 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
1800   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1801   int index = oop_recorder()->find_index(obj);
1802   RelocationHolder rspec = metadata_Relocation::spec(index);
1803   return AddressLiteral((address)obj, rspec);
1804 }
1805 
1806 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
1807   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1808   int oop_index = oop_recorder()->allocate_oop_index(obj);
1809   return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
1810 }
1811 
1812 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
1813   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1814   int oop_index = oop_recorder()->find_index(obj);
1815   return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
1816 }
1817 
1818 // NOTE: destroys r
1819 void MacroAssembler::c2bool(Register r, Register t) {
1820   z_lcr(t, r);   // t = -r
1821   z_or(r, t);    // r = -r OR r
1822   z_srl(r, 31);  // Yields 0 if r was 0, 1 otherwise.
1823 }
1824 
1825 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
1826                                                       Register tmp,
1827                                                       int offset) {
1828   intptr_t value = *delayed_value_addr;
1829   if (value != 0) {
1830     return RegisterOrConstant(value + offset);
1831   }
1832 
1833   BLOCK_COMMENT("delayed_value {");
1834   // Load indirectly to solve generation ordering problem.
1835   load_absolute_address(tmp, (address) delayed_value_addr); // tmp = a;
1836   z_lg(tmp, 0, tmp);                   // tmp = *tmp;
1837 
1838 #ifdef ASSERT
1839   NearLabel L;
1840   compare64_and_branch(tmp, (intptr_t)0L, Assembler::bcondNotEqual, L);
1841   z_illtrap();
1842   bind(L);
1843 #endif
1844 
1845   if (offset != 0) {
1846     z_agfi(tmp, offset);               // tmp = tmp + offset;
1847   }
1848 
1849   BLOCK_COMMENT("} delayed_value");
1850   return RegisterOrConstant(tmp);
1851 }
1852 
1853 // Patch instruction `inst' at offset `inst_pos' to refer to `dest_pos'
1854 // and return the resulting instruction.
1855 // Dest_pos and inst_pos are 32 bit only. These parms can only designate
1856 // relative positions.
1857 // Use correct argument types. Do not pre-calculate distance.
1858 unsigned long MacroAssembler::patched_branch(address dest_pos, unsigned long inst, address inst_pos) {
1859   int c = 0;
1860   unsigned long patched_inst = 0;
1861   if (is_call_pcrelative_short(inst) ||
1862       is_branch_pcrelative_short(inst) ||
1863       is_branchoncount_pcrelative_short(inst) ||
1864       is_branchonindex32_pcrelative_short(inst)) {
1865     c = 1;
1866     int m = fmask(15, 0);    // simm16(-1, 16, 32);
1867     int v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 32);
1868     patched_inst = (inst & ~m) | v;
1869   } else if (is_compareandbranch_pcrelative_short(inst)) {
1870     c = 2;
1871     long m = fmask(31, 16);  // simm16(-1, 16, 48);
1872     long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48);
1873     patched_inst = (inst & ~m) | v;
1874   } else if (is_branchonindex64_pcrelative_short(inst)) {
1875     c = 3;
1876     long m = fmask(31, 16);  // simm16(-1, 16, 48);
1877     long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48);
1878     patched_inst = (inst & ~m) | v;
1879   } else if (is_call_pcrelative_long(inst) || is_branch_pcrelative_long(inst)) {
1880     c = 4;
1881     long m = fmask(31, 0);  // simm32(-1, 16, 48);
1882     long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48);
1883     patched_inst = (inst & ~m) | v;
1884   } else if (is_pcrelative_long(inst)) { // These are the non-branch pc-relative instructions.
1885     c = 5;
1886     long m = fmask(31, 0);  // simm32(-1, 16, 48);
1887     long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48);
1888     patched_inst = (inst & ~m) | v;
1889   } else {
1890     print_dbg_msg(tty, inst, "not a relative branch", 0);
1891     dump_code_range(tty, inst_pos, 32, "not a pcrelative branch");
1892     ShouldNotReachHere();
1893   }
1894 
1895   long new_off = get_pcrel_offset(patched_inst);
1896   if (new_off != (dest_pos-inst_pos)) {
1897     tty->print_cr("case %d: dest_pos = %p, inst_pos = %p, disp = %ld(%12.12lx)", c, dest_pos, inst_pos, new_off, new_off);
1898     print_dbg_msg(tty, inst,         "<- original instruction: branch patching error", 0);
1899     print_dbg_msg(tty, patched_inst, "<- patched  instruction: branch patching error", 0);
1900 #ifdef LUCY_DBG
1901     VM_Version::z_SIGSEGV();
1902 #endif
1903     ShouldNotReachHere();
1904   }
1905   return patched_inst;
1906 }
1907 
1908 // Only called when binding labels (share/vm/asm/assembler.cpp)
1909 // Pass arguments as intended. Do not pre-calculate distance.
1910 void MacroAssembler::pd_patch_instruction(address branch, address target) {
1911   unsigned long stub_inst;
1912   int           inst_len = get_instruction(branch, &stub_inst);
1913 
1914   set_instruction(branch, patched_branch(target, stub_inst, branch), inst_len);
1915 }
1916 
1917 
1918 // Extract relative address (aka offset).
1919 // inv_simm16 works for 4-byte instructions only.
1920 // compare and branch instructions are 6-byte and have a 16bit offset "in the middle".
1921 long MacroAssembler::get_pcrel_offset(unsigned long inst) {
1922 
1923   if (MacroAssembler::is_pcrelative_short(inst)) {
1924     if (((inst&0xFFFFffff00000000UL) == 0) && ((inst&0x00000000FFFF0000UL) != 0)) {
1925       return RelAddr::inv_pcrel_off16(inv_simm16(inst));
1926     } else {
1927       return RelAddr::inv_pcrel_off16(inv_simm16_48(inst));
1928     }
1929   }
1930 
1931   if (MacroAssembler::is_pcrelative_long(inst)) {
1932     return RelAddr::inv_pcrel_off32(inv_simm32(inst));
1933   }
1934 
1935   print_dbg_msg(tty, inst, "not a pcrelative instruction", 6);
1936 #ifdef LUCY_DBG
1937   VM_Version::z_SIGSEGV();
1938 #else
1939   ShouldNotReachHere();
1940 #endif
1941   return -1;
1942 }
1943 
1944 long MacroAssembler::get_pcrel_offset(address pc) {
1945   unsigned long inst;
1946   unsigned int  len = get_instruction(pc, &inst);
1947 
1948 #ifdef ASSERT
1949   long offset;
1950   if (MacroAssembler::is_pcrelative_short(inst) || MacroAssembler::is_pcrelative_long(inst)) {
1951     offset = get_pcrel_offset(inst);
1952   } else {
1953     offset = -1;
1954   }
1955 
1956   if (offset == -1) {
1957     dump_code_range(tty, pc, 32, "not a pcrelative instruction");
1958 #ifdef LUCY_DBG
1959     VM_Version::z_SIGSEGV();
1960 #else
1961     ShouldNotReachHere();
1962 #endif
1963   }
1964   return offset;
1965 #else
1966   return get_pcrel_offset(inst);
1967 #endif // ASSERT
1968 }
1969 
1970 // Get target address from pc-relative instructions.
1971 address MacroAssembler::get_target_addr_pcrel(address pc) {
1972   assert(is_pcrelative_long(pc), "not a pcrelative instruction");
1973   return pc + get_pcrel_offset(pc);
1974 }
1975 
1976 // Patch pc relative load address.
1977 void MacroAssembler::patch_target_addr_pcrel(address pc, address con) {
1978   unsigned long inst;
1979   // Offset is +/- 2**32 -> use long.
1980   ptrdiff_t distance = con - pc;
1981 
1982   get_instruction(pc, &inst);
1983 
1984   if (is_pcrelative_short(inst)) {
1985     *(short *)(pc+2) = RelAddr::pcrel_off16(con, pc);  // Instructions are at least 2-byte aligned, no test required.
1986 
1987     // Some extra safety net.
1988     if (!RelAddr::is_in_range_of_RelAddr16(distance)) {
1989       print_dbg_msg(tty, inst, "distance out of range (16bit)", 4);
1990       dump_code_range(tty, pc, 32, "distance out of range (16bit)");
1991       guarantee(RelAddr::is_in_range_of_RelAddr16(distance), "too far away (more than +/- 2**16");
1992     }
1993     return;
1994   }
1995 
1996   if (is_pcrelative_long(inst)) {
1997     *(int *)(pc+2)   = RelAddr::pcrel_off32(con, pc);
1998 
1999     // Some Extra safety net.
2000     if (!RelAddr::is_in_range_of_RelAddr32(distance)) {
2001       print_dbg_msg(tty, inst, "distance out of range (32bit)", 6);
2002       dump_code_range(tty, pc, 32, "distance out of range (32bit)");
2003       guarantee(RelAddr::is_in_range_of_RelAddr32(distance), "too far away (more than +/- 2**32");
2004     }
2005     return;
2006   }
2007 
2008   guarantee(false, "not a pcrelative instruction to patch!");
2009 }
2010 
2011 // "Current PC" here means the address just behind the basr instruction.
2012 address MacroAssembler::get_PC(Register result) {
2013   z_basr(result, Z_R0); // Don't branch, just save next instruction address in result.
2014   return pc();
2015 }
2016 
2017 // Get current PC + offset.
2018 // Offset given in bytes, must be even!
2019 // "Current PC" here means the address of the larl instruction plus the given offset.
2020 address MacroAssembler::get_PC(Register result, int64_t offset) {
2021   address here = pc();
2022   z_larl(result, offset/2); // Save target instruction address in result.
2023   return here + offset;
2024 }
2025 
2026 void MacroAssembler::instr_size(Register size, Register pc) {
2027   // Extract 2 most significant bits of current instruction.
2028   z_llgc(size, Address(pc));
2029   z_srl(size, 6);
2030   // Compute (x+3)&6 which translates 0->2, 1->4, 2->4, 3->6.
2031   z_ahi(size, 3);
2032   z_nill(size, 6);
2033 }
2034 
2035 // Resize_frame with SP(new) = SP(old) - [offset].
2036 void MacroAssembler::resize_frame_sub(Register offset, Register fp, bool load_fp)
2037 {
2038   assert_different_registers(offset, fp, Z_SP);
2039   if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); }
2040 
2041   z_sgr(Z_SP, offset);
2042   z_stg(fp, _z_abi(callers_sp), Z_SP);
2043 }
2044 
2045 // Resize_frame with SP(new) = [newSP] + offset.
2046 //   This emitter is useful if we already have calculated a pointer
2047 //   into the to-be-allocated stack space, e.g. with special alignment properties,
2048 //   but need some additional space, e.g. for spilling.
2049 //   newSP    is the pre-calculated pointer. It must not be modified.
2050 //   fp       holds, or is filled with, the frame pointer.
2051 //   offset   is the additional increment which is added to addr to form the new SP.
2052 //            Note: specify a negative value to reserve more space!
2053 //   load_fp == true  only indicates that fp is not pre-filled with the frame pointer.
2054 //                    It does not guarantee that fp contains the frame pointer at the end.
2055 void MacroAssembler::resize_frame_abs_with_offset(Register newSP, Register fp, int offset, bool load_fp) {
2056   assert_different_registers(newSP, fp, Z_SP);
2057 
2058   if (load_fp) {
2059     z_lg(fp, _z_abi(callers_sp), Z_SP);
2060   }
2061 
2062   add2reg(Z_SP, offset, newSP);
2063   z_stg(fp, _z_abi(callers_sp), Z_SP);
2064 }
2065 
2066 // Resize_frame with SP(new) = [newSP].
2067 //   load_fp == true  only indicates that fp is not pre-filled with the frame pointer.
2068 //                    It does not guarantee that fp contains the frame pointer at the end.
2069 void MacroAssembler::resize_frame_absolute(Register newSP, Register fp, bool load_fp) {
2070   assert_different_registers(newSP, fp, Z_SP);
2071 
2072   if (load_fp) {
2073     z_lg(fp, _z_abi(callers_sp), Z_SP); // need to use load/store.
2074   }
2075 
2076   z_lgr(Z_SP, newSP);
2077   if (newSP != Z_R0) { // make sure we generate correct code, no matter what register newSP uses.
2078     z_stg(fp, _z_abi(callers_sp), newSP);
2079   } else {
2080     z_stg(fp, _z_abi(callers_sp), Z_SP);
2081   }
2082 }
2083 
2084 // Resize_frame with SP(new) = SP(old) + offset.
2085 void MacroAssembler::resize_frame(RegisterOrConstant offset, Register fp, bool load_fp) {
2086   assert_different_registers(fp, Z_SP);
2087 
2088   if (load_fp) {
2089     z_lg(fp, _z_abi(callers_sp), Z_SP);
2090   }
2091   add64(Z_SP, offset);
2092   z_stg(fp, _z_abi(callers_sp), Z_SP);
2093 }
2094 
2095 void MacroAssembler::push_frame(Register bytes, Register old_sp, bool copy_sp, bool bytes_with_inverted_sign) {
2096 #ifdef ASSERT
2097   assert_different_registers(bytes, old_sp, Z_SP);
2098   if (!copy_sp) {
2099     z_cgr(old_sp, Z_SP);
2100     asm_assert_eq("[old_sp]!=[Z_SP]", 0x211);
2101   }
2102 #endif
2103   if (copy_sp) { z_lgr(old_sp, Z_SP); }
2104   if (bytes_with_inverted_sign) {
2105     z_agr(Z_SP, bytes);
2106   } else {
2107     z_sgr(Z_SP, bytes); // Z_sgfr sufficient, but probably not faster.
2108   }
2109   z_stg(old_sp, _z_abi(callers_sp), Z_SP);
2110 }
2111 
2112 unsigned int MacroAssembler::push_frame(unsigned int bytes, Register scratch) {
2113   long offset = Assembler::align(bytes, frame::alignment_in_bytes);
2114   assert(offset > 0, "should push a frame with positive size, size = %ld.", offset);
2115   assert(Displacement::is_validDisp(-offset), "frame size out of range, size = %ld", offset);
2116 
2117   // We must not write outside the current stack bounds (given by Z_SP).
2118   // Thus, we have to first update Z_SP and then store the previous SP as stack linkage.
2119   // We rely on Z_R0 by default to be available as scratch.
2120   z_lgr(scratch, Z_SP);
2121   add2reg(Z_SP, -offset);
2122   z_stg(scratch, _z_abi(callers_sp), Z_SP);
2123 #ifdef ASSERT
2124   // Just make sure nobody uses the value in the default scratch register.
2125   // When another register is used, the caller might rely on it containing the frame pointer.
2126   if (scratch == Z_R0) {
2127     z_iihf(scratch, 0xbaadbabe);
2128     z_iilf(scratch, 0xdeadbeef);
2129   }
2130 #endif
2131   return offset;
2132 }
2133 
2134 // Push a frame of size `bytes' plus abi160 on top.
2135 unsigned int MacroAssembler::push_frame_abi160(unsigned int bytes) {
2136   BLOCK_COMMENT("push_frame_abi160 {");
2137   unsigned int res = push_frame(bytes + frame::z_abi_160_size);
2138   BLOCK_COMMENT("} push_frame_abi160");
2139   return res;
2140 }
2141 
2142 // Pop current C frame.
2143 void MacroAssembler::pop_frame() {
2144   BLOCK_COMMENT("pop_frame:");
2145   Assembler::z_lg(Z_SP, _z_abi(callers_sp), Z_SP);
2146 }
2147 
2148 // Pop current C frame and restore return PC register (Z_R14).
2149 void MacroAssembler::pop_frame_restore_retPC(int frame_size_in_bytes) {
2150   BLOCK_COMMENT("pop_frame_restore_retPC:");
2151   int retPC_offset = _z_abi16(return_pc) + frame_size_in_bytes;
2152   // If possible, pop frame by add instead of load (a penny saved is a penny got :-).
2153   if (Displacement::is_validDisp(retPC_offset)) {
2154     z_lg(Z_R14, retPC_offset, Z_SP);
2155     add2reg(Z_SP, frame_size_in_bytes);
2156   } else {
2157     add2reg(Z_SP, frame_size_in_bytes);
2158     restore_return_pc();
2159   }
2160 }
2161 
2162 void MacroAssembler::call_VM_leaf_base(address entry_point, bool allow_relocation) {
2163   if (allow_relocation) {
2164     call_c(entry_point);
2165   } else {
2166     call_c_static(entry_point);
2167   }
2168 }
2169 
2170 void MacroAssembler::call_VM_leaf_base(address entry_point) {
2171   bool allow_relocation = true;
2172   call_VM_leaf_base(entry_point, allow_relocation);
2173 }
2174 
2175 void MacroAssembler::call_VM_base(Register oop_result,
2176                                   Register last_java_sp,
2177                                   address  entry_point,
2178                                   bool     allow_relocation,
2179                                   bool     check_exceptions) { // Defaults to true.
2180   // Allow_relocation indicates, if true, that the generated code shall
2181   // be fit for code relocation or referenced data relocation. In other
2182   // words: all addresses must be considered variable. PC-relative addressing
2183   // is not possible then.
2184   // On the other hand, if (allow_relocation == false), addresses and offsets
2185   // may be considered stable, enabling us to take advantage of some PC-relative
2186   // addressing tweaks. These might improve performance and reduce code size.
2187 
2188   // Determine last_java_sp register.
2189   if (!last_java_sp->is_valid()) {
2190     last_java_sp = Z_SP;  // Load Z_SP as SP.
2191   }
2192 
2193   set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, Z_R1, allow_relocation);
2194 
2195   // ARG1 must hold thread address.
2196   z_lgr(Z_ARG1, Z_thread);
2197 
2198   address return_pc = NULL;
2199   if (allow_relocation) {
2200     return_pc = call_c(entry_point);
2201   } else {
2202     return_pc = call_c_static(entry_point);
2203   }
2204 
2205   reset_last_Java_frame(allow_relocation);
2206 
2207   // C++ interp handles this in the interpreter.
2208   check_and_handle_popframe(Z_thread);
2209   check_and_handle_earlyret(Z_thread);
2210 
2211   // Check for pending exceptions.
2212   if (check_exceptions) {
2213     // Check for pending exceptions (java_thread is set upon return).
2214     load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset()));
2215 
2216     // This used to conditionally jump to forward_exception however it is
2217     // possible if we relocate that the branch will not reach. So we must jump
2218     // around so we can always reach.
2219 
2220     Label ok;
2221     z_bre(ok); // Bcondequal is the same as bcondZero.
2222     call_stub(StubRoutines::forward_exception_entry());
2223     bind(ok);
2224   }
2225 
2226   // Get oop result if there is one and reset the value in the thread.
2227   if (oop_result->is_valid()) {
2228     get_vm_result(oop_result);
2229   }
2230 
2231   _last_calls_return_pc = return_pc;  // Wipe out other (error handling) calls.
2232 }
2233 
2234 void MacroAssembler::call_VM_base(Register oop_result,
2235                                   Register last_java_sp,
2236                                   address  entry_point,
2237                                   bool     check_exceptions) { // Defaults to true.
2238   bool allow_relocation = true;
2239   call_VM_base(oop_result, last_java_sp, entry_point, allow_relocation, check_exceptions);
2240 }
2241 
2242 // VM calls without explicit last_java_sp.
2243 
2244 void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
2245   // Call takes possible detour via InterpreterMacroAssembler.
2246   call_VM_base(oop_result, noreg, entry_point, true, check_exceptions);
2247 }
2248 
2249 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
2250   // Z_ARG1 is reserved for the thread.
2251   lgr_if_needed(Z_ARG2, arg_1);
2252   call_VM(oop_result, entry_point, check_exceptions);
2253 }
2254 
2255 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
2256   // Z_ARG1 is reserved for the thread.
2257   lgr_if_needed(Z_ARG2, arg_1);
2258   assert(arg_2 != Z_ARG2, "smashed argument");
2259   lgr_if_needed(Z_ARG3, arg_2);
2260   call_VM(oop_result, entry_point, check_exceptions);
2261 }
2262 
2263 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2,
2264                              Register arg_3, bool check_exceptions) {
2265   // Z_ARG1 is reserved for the thread.
2266   lgr_if_needed(Z_ARG2, arg_1);
2267   assert(arg_2 != Z_ARG2, "smashed argument");
2268   lgr_if_needed(Z_ARG3, arg_2);
2269   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2270   lgr_if_needed(Z_ARG4, arg_3);
2271   call_VM(oop_result, entry_point, check_exceptions);
2272 }
2273 
2274 // VM static calls without explicit last_java_sp.
2275 
2276 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, bool check_exceptions) {
2277   // Call takes possible detour via InterpreterMacroAssembler.
2278   call_VM_base(oop_result, noreg, entry_point, false, check_exceptions);
2279 }
2280 
2281 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2,
2282                                     Register arg_3, bool check_exceptions) {
2283   // Z_ARG1 is reserved for the thread.
2284   lgr_if_needed(Z_ARG2, arg_1);
2285   assert(arg_2 != Z_ARG2, "smashed argument");
2286   lgr_if_needed(Z_ARG3, arg_2);
2287   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2288   lgr_if_needed(Z_ARG4, arg_3);
2289   call_VM_static(oop_result, entry_point, check_exceptions);
2290 }
2291 
2292 // VM calls with explicit last_java_sp.
2293 
2294 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions) {
2295   // Call takes possible detour via InterpreterMacroAssembler.
2296   call_VM_base(oop_result, last_java_sp, entry_point, true, check_exceptions);
2297 }
2298 
2299 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
2300    // Z_ARG1 is reserved for the thread.
2301    lgr_if_needed(Z_ARG2, arg_1);
2302    call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2303 }
2304 
2305 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1,
2306                              Register arg_2, bool check_exceptions) {
2307    // Z_ARG1 is reserved for the thread.
2308    lgr_if_needed(Z_ARG2, arg_1);
2309    assert(arg_2 != Z_ARG2, "smashed argument");
2310    lgr_if_needed(Z_ARG3, arg_2);
2311    call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2312 }
2313 
2314 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1,
2315                              Register arg_2, Register arg_3, bool check_exceptions) {
2316   // Z_ARG1 is reserved for the thread.
2317   lgr_if_needed(Z_ARG2, arg_1);
2318   assert(arg_2 != Z_ARG2, "smashed argument");
2319   lgr_if_needed(Z_ARG3, arg_2);
2320   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2321   lgr_if_needed(Z_ARG4, arg_3);
2322   call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2323 }
2324 
2325 // VM leaf calls.
2326 
2327 void MacroAssembler::call_VM_leaf(address entry_point) {
2328   // Call takes possible detour via InterpreterMacroAssembler.
2329   call_VM_leaf_base(entry_point, true);
2330 }
2331 
2332 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {
2333   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2334   call_VM_leaf(entry_point);
2335 }
2336 
2337 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
2338   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2339   assert(arg_2 != Z_ARG1, "smashed argument");
2340   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2341   call_VM_leaf(entry_point);
2342 }
2343 
2344 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
2345   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2346   assert(arg_2 != Z_ARG1, "smashed argument");
2347   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2348   assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument");
2349   if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3);
2350   call_VM_leaf(entry_point);
2351 }
2352 
2353 // Static VM leaf calls.
2354 // Really static VM leaf calls are never patched.
2355 
2356 void MacroAssembler::call_VM_leaf_static(address entry_point) {
2357   // Call takes possible detour via InterpreterMacroAssembler.
2358   call_VM_leaf_base(entry_point, false);
2359 }
2360 
2361 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1) {
2362   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2363   call_VM_leaf_static(entry_point);
2364 }
2365 
2366 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2) {
2367   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2368   assert(arg_2 != Z_ARG1, "smashed argument");
2369   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2370   call_VM_leaf_static(entry_point);
2371 }
2372 
2373 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
2374   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2375   assert(arg_2 != Z_ARG1, "smashed argument");
2376   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2377   assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument");
2378   if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3);
2379   call_VM_leaf_static(entry_point);
2380 }
2381 
2382 // Don't use detour via call_c(reg).
2383 address MacroAssembler::call_c(address function_entry) {
2384   load_const(Z_R1, function_entry);
2385   return call(Z_R1);
2386 }
2387 
2388 // Variant for really static (non-relocatable) calls which are never patched.
2389 address MacroAssembler::call_c_static(address function_entry) {
2390   load_absolute_address(Z_R1, function_entry);
2391 #if 0 // def ASSERT
2392   // Verify that call site did not move.
2393   load_const_optimized(Z_R0, function_entry);
2394   z_cgr(Z_R1, Z_R0);
2395   z_brc(bcondEqual, 3);
2396   z_illtrap(0xba);
2397 #endif
2398   return call(Z_R1);
2399 }
2400 
2401 address MacroAssembler::call_c_opt(address function_entry) {
2402   bool success = call_far_patchable(function_entry, -2 /* emit relocation + constant */);
2403   _last_calls_return_pc = success ? pc() : NULL;
2404   return _last_calls_return_pc;
2405 }
2406 
2407 // Identify a call_far_patchable instruction: LARL + LG + BASR
2408 //
2409 //    nop                   ; optionally, if required for alignment
2410 //    lgrl rx,A(TOC entry)  ; PC-relative access into constant pool
2411 //    basr Z_R14,rx         ; end of this instruction must be aligned to a word boundary
2412 //
2413 // Code pattern will eventually get patched into variant2 (see below for detection code).
2414 //
2415 bool MacroAssembler::is_call_far_patchable_variant0_at(address instruction_addr) {
2416   address iaddr = instruction_addr;
2417 
2418   // Check for the actual load instruction.
2419   if (!is_load_const_from_toc(iaddr)) { return false; }
2420   iaddr += load_const_from_toc_size();
2421 
2422   // Check for the call (BASR) instruction, finally.
2423   assert(iaddr-instruction_addr+call_byregister_size() == call_far_patchable_size(), "size mismatch");
2424   return is_call_byregister(iaddr);
2425 }
2426 
2427 // Identify a call_far_patchable instruction: BRASL
2428 //
2429 // Code pattern to suits atomic patching:
2430 //    nop                       ; Optionally, if required for alignment.
2431 //    nop    ...                ; Multiple filler nops to compensate for size difference (variant0 is longer).
2432 //    nop                       ; For code pattern detection: Prepend each BRASL with a nop.
2433 //    brasl  Z_R14,<reladdr>    ; End of code must be 4-byte aligned !
2434 bool MacroAssembler::is_call_far_patchable_variant2_at(address instruction_addr) {
2435   const address call_addr = (address)((intptr_t)instruction_addr + call_far_patchable_size() - call_far_pcrelative_size());
2436 
2437   // Check for correct number of leading nops.
2438   address iaddr;
2439   for (iaddr = instruction_addr; iaddr < call_addr; iaddr += nop_size()) {
2440     if (!is_z_nop(iaddr)) { return false; }
2441   }
2442   assert(iaddr == call_addr, "sanity");
2443 
2444   // --> Check for call instruction.
2445   if (is_call_far_pcrelative(call_addr)) {
2446     assert(call_addr-instruction_addr+call_far_pcrelative_size() == call_far_patchable_size(), "size mismatch");
2447     return true;
2448   }
2449 
2450   return false;
2451 }
2452 
2453 // Emit a NOT mt-safely patchable 64 bit absolute call.
2454 // If toc_offset == -2, then the destination of the call (= target) is emitted
2455 //                      to the constant pool and a runtime_call relocation is added
2456 //                      to the code buffer.
2457 // If toc_offset != -2, target must already be in the constant pool at
2458 //                      _ctableStart+toc_offset (a caller can retrieve toc_offset
2459 //                      from the runtime_call relocation).
2460 // Special handling of emitting to scratch buffer when there is no constant pool.
2461 // Slightly changed code pattern. We emit an additional nop if we would
2462 // not end emitting at a word aligned address. This is to ensure
2463 // an atomically patchable displacement in brasl instructions.
2464 //
2465 // A call_far_patchable comes in different flavors:
2466 //  - LARL(CP) / LG(CP) / BR (address in constant pool, access via CP register)
2467 //  - LGRL(CP) / BR          (address in constant pool, pc-relative accesss)
2468 //  - BRASL                  (relative address of call target coded in instruction)
2469 // All flavors occupy the same amount of space. Length differences are compensated
2470 // by leading nops, such that the instruction sequence always ends at the same
2471 // byte offset. This is required to keep the return offset constant.
2472 // Furthermore, the return address (the end of the instruction sequence) is forced
2473 // to be on a 4-byte boundary. This is required for atomic patching, should we ever
2474 // need to patch the call target of the BRASL flavor.
2475 // RETURN value: false, if no constant pool entry could be allocated, true otherwise.
2476 bool MacroAssembler::call_far_patchable(address target, int64_t tocOffset) {
2477   // Get current pc and ensure word alignment for end of instr sequence.
2478   const address start_pc = pc();
2479   const intptr_t       start_off = offset();
2480   assert(!call_far_patchable_requires_alignment_nop(start_pc), "call_far_patchable requires aligned address");
2481   const ptrdiff_t      dist      = (ptrdiff_t)(target - (start_pc + 2)); // Prepend each BRASL with a nop.
2482   const bool emit_target_to_pool = (tocOffset == -2) && !code_section()->scratch_emit();
2483   const bool emit_relative_call  = !emit_target_to_pool &&
2484                                    RelAddr::is_in_range_of_RelAddr32(dist) &&
2485                                    ReoptimizeCallSequences &&
2486                                    !code_section()->scratch_emit();
2487 
2488   if (emit_relative_call) {
2489     // Add padding to get the same size as below.
2490     const unsigned int padding = call_far_patchable_size() - call_far_pcrelative_size();
2491     unsigned int current_padding;
2492     for (current_padding = 0; current_padding < padding; current_padding += nop_size()) { z_nop(); }
2493     assert(current_padding == padding, "sanity");
2494 
2495     // relative call: len = 2(nop) + 6 (brasl)
2496     // CodeBlob resize cannot occur in this case because
2497     // this call is emitted into pre-existing space.
2498     z_nop(); // Prepend each BRASL with a nop.
2499     z_brasl(Z_R14, target);
2500   } else {
2501     // absolute call: Get address from TOC.
2502     // len = (load TOC){6|0} + (load from TOC){6} + (basr){2} = {14|8}
2503     if (emit_target_to_pool) {
2504       // When emitting the call for the first time, we do not need to use
2505       // the pc-relative version. It will be patched anyway, when the code
2506       // buffer is copied.
2507       // Relocation is not needed when !ReoptimizeCallSequences.
2508       relocInfo::relocType rt = ReoptimizeCallSequences ? relocInfo::runtime_call_w_cp_type : relocInfo::none;
2509       AddressLiteral dest(target, rt);
2510       // Store_oop_in_toc() adds dest to the constant table. As side effect, this kills
2511       // inst_mark(). Reset if possible.
2512       bool reset_mark = (inst_mark() == pc());
2513       tocOffset = store_oop_in_toc(dest);
2514       if (reset_mark) { set_inst_mark(); }
2515       if (tocOffset == -1) {
2516         return false; // Couldn't create constant pool entry.
2517       }
2518     }
2519     assert(offset() == start_off, "emit no code before this point!");
2520 
2521     address tocPos = pc() + tocOffset;
2522     if (emit_target_to_pool) {
2523       tocPos = code()->consts()->start() + tocOffset;
2524     }
2525     load_long_pcrelative(Z_R14, tocPos);
2526     z_basr(Z_R14, Z_R14);
2527   }
2528 
2529 #ifdef ASSERT
2530   // Assert that we can identify the emitted call.
2531   assert(is_call_far_patchable_at(addr_at(start_off)), "can't identify emitted call");
2532   assert(offset() == start_off+call_far_patchable_size(), "wrong size");
2533 
2534   if (emit_target_to_pool) {
2535     assert(get_dest_of_call_far_patchable_at(addr_at(start_off), code()->consts()->start()) == target,
2536            "wrong encoding of dest address");
2537   }
2538 #endif
2539   return true; // success
2540 }
2541 
2542 // Identify a call_far_patchable instruction.
2543 // For more detailed information see header comment of call_far_patchable.
2544 bool MacroAssembler::is_call_far_patchable_at(address instruction_addr) {
2545   return is_call_far_patchable_variant2_at(instruction_addr)  || // short version: BRASL
2546          is_call_far_patchable_variant0_at(instruction_addr);    // long version LARL + LG + BASR
2547 }
2548 
2549 // Does the call_far_patchable instruction use a pc-relative encoding
2550 // of the call destination?
2551 bool MacroAssembler::is_call_far_patchable_pcrelative_at(address instruction_addr) {
2552   // Variant 2 is pc-relative.
2553   return is_call_far_patchable_variant2_at(instruction_addr);
2554 }
2555 
2556 bool MacroAssembler::is_call_far_pcrelative(address instruction_addr) {
2557   // Prepend each BRASL with a nop.
2558   return is_z_nop(instruction_addr) && is_z_brasl(instruction_addr + nop_size());  // Match at position after one nop required.
2559 }
2560 
2561 // Set destination address of a call_far_patchable instruction.
2562 void MacroAssembler::set_dest_of_call_far_patchable_at(address instruction_addr, address dest, int64_t tocOffset) {
2563   ResourceMark rm;
2564 
2565   // Now that CP entry is verified, patch call to a pc-relative call (if circumstances permit).
2566   int code_size = MacroAssembler::call_far_patchable_size();
2567   CodeBuffer buf(instruction_addr, code_size);
2568   MacroAssembler masm(&buf);
2569   masm.call_far_patchable(dest, tocOffset);
2570   ICache::invalidate_range(instruction_addr, code_size); // Empty on z.
2571 }
2572 
2573 // Get dest address of a call_far_patchable instruction.
2574 address MacroAssembler::get_dest_of_call_far_patchable_at(address instruction_addr, address ctable) {
2575   // Dynamic TOC: absolute address in constant pool.
2576   // Check variant2 first, it is more frequent.
2577 
2578   // Relative address encoded in call instruction.
2579   if (is_call_far_patchable_variant2_at(instruction_addr)) {
2580     return MacroAssembler::get_target_addr_pcrel(instruction_addr + nop_size()); // Prepend each BRASL with a nop.
2581 
2582   // Absolute address in constant pool.
2583   } else if (is_call_far_patchable_variant0_at(instruction_addr)) {
2584     address iaddr = instruction_addr;
2585 
2586     long    tocOffset = get_load_const_from_toc_offset(iaddr);
2587     address tocLoc    = iaddr + tocOffset;
2588     return *(address *)(tocLoc);
2589   } else {
2590     fprintf(stderr, "MacroAssembler::get_dest_of_call_far_patchable_at has a problem at %p:\n", instruction_addr);
2591     fprintf(stderr, "not a call_far_patchable: %16.16lx %16.16lx, len = %d\n",
2592             *(unsigned long*)instruction_addr,
2593             *(unsigned long*)(instruction_addr+8),
2594             call_far_patchable_size());
2595     Disassembler::decode(instruction_addr, instruction_addr+call_far_patchable_size());
2596     ShouldNotReachHere();
2597     return NULL;
2598   }
2599 }
2600 
2601 void MacroAssembler::align_call_far_patchable(address pc) {
2602   if (call_far_patchable_requires_alignment_nop(pc)) { z_nop(); }
2603 }
2604 
2605 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
2606 }
2607 
2608 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
2609 }
2610 
2611 // Read from the polling page.
2612 // Use TM or TMY instruction, depending on read offset.
2613 //   offset = 0: Use TM, safepoint polling.
2614 //   offset < 0: Use TMY, profiling safepoint polling.
2615 void MacroAssembler::load_from_polling_page(Register polling_page_address, int64_t offset) {
2616   if (Immediate::is_uimm12(offset)) {
2617     z_tm(offset, polling_page_address, mask_safepoint);
2618   } else {
2619     z_tmy(offset, polling_page_address, mask_profiling);
2620   }
2621 }
2622 
2623 // Check whether z_instruction is a read access to the polling page
2624 // which was emitted by load_from_polling_page(..).
2625 bool MacroAssembler::is_load_from_polling_page(address instr_loc) {
2626   unsigned long z_instruction;
2627   unsigned int  ilen = get_instruction(instr_loc, &z_instruction);
2628 
2629   if (ilen == 2) { return false; } // It's none of the allowed instructions.
2630 
2631   if (ilen == 4) {
2632     if (!is_z_tm(z_instruction)) { return false; } // It's len=4, but not a z_tm. fail.
2633 
2634     int ms = inv_mask(z_instruction,8,32);  // mask
2635     int ra = inv_reg(z_instruction,16,32);  // base register
2636     int ds = inv_uimm12(z_instruction);     // displacement
2637 
2638     if (!(ds == 0 && ra != 0 && ms == mask_safepoint)) {
2639       return false; // It's not a z_tm(0, ra, mask_safepoint). Fail.
2640     }
2641 
2642   } else { /* if (ilen == 6) */
2643 
2644     assert(!is_z_lg(z_instruction), "old form (LG) polling page access. Please fix and use TM(Y).");
2645 
2646     if (!is_z_tmy(z_instruction)) { return false; } // It's len=6, but not a z_tmy. fail.
2647 
2648     int ms = inv_mask(z_instruction,8,48);  // mask
2649     int ra = inv_reg(z_instruction,16,48);  // base register
2650     int ds = inv_simm20(z_instruction);     // displacement
2651   }
2652 
2653   return true;
2654 }
2655 
2656 // Extract poll address from instruction and ucontext.
2657 address MacroAssembler::get_poll_address(address instr_loc, void* ucontext) {
2658   assert(ucontext != NULL, "must have ucontext");
2659   ucontext_t* uc = (ucontext_t*) ucontext;
2660   unsigned long z_instruction;
2661   unsigned int ilen = get_instruction(instr_loc, &z_instruction);
2662 
2663   if (ilen == 4 && is_z_tm(z_instruction)) {
2664     int ra = inv_reg(z_instruction, 16, 32);  // base register
2665     int ds = inv_uimm12(z_instruction);       // displacement
2666     address addr = (address)uc->uc_mcontext.gregs[ra];
2667     return addr + ds;
2668   } else if (ilen == 6 && is_z_tmy(z_instruction)) {
2669     int ra = inv_reg(z_instruction, 16, 48);  // base register
2670     int ds = inv_simm20(z_instruction);       // displacement
2671     address addr = (address)uc->uc_mcontext.gregs[ra];
2672     return addr + ds;
2673   }
2674 
2675   ShouldNotReachHere();
2676   return NULL;
2677 }
2678 
2679 // Extract poll register from instruction.
2680 uint MacroAssembler::get_poll_register(address instr_loc) {
2681   unsigned long z_instruction;
2682   unsigned int ilen = get_instruction(instr_loc, &z_instruction);
2683 
2684   if (ilen == 4 && is_z_tm(z_instruction)) {
2685     return (uint)inv_reg(z_instruction, 16, 32);  // base register
2686   } else if (ilen == 6 && is_z_tmy(z_instruction)) {
2687     return (uint)inv_reg(z_instruction, 16, 48);  // base register
2688   }
2689 
2690   ShouldNotReachHere();
2691   return 0;
2692 }
2693 
2694 bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) {
2695   ShouldNotCallThis();
2696   return false;
2697 }
2698 
2699 // Write serialization page so VM thread can do a pseudo remote membar
2700 // We use the current thread pointer to calculate a thread specific
2701 // offset to write to within the page. This minimizes bus traffic
2702 // due to cache line collision.
2703 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
2704   assert_different_registers(tmp1, tmp2);
2705   z_sllg(tmp2, thread, os::get_serialize_page_shift_count());
2706   load_const_optimized(tmp1, (long) os::get_memory_serialize_page());
2707 
2708   int mask = os::get_serialize_page_mask();
2709   if (Immediate::is_uimm16(mask)) {
2710     z_nill(tmp2, mask);
2711     z_llghr(tmp2, tmp2);
2712   } else {
2713     z_nilf(tmp2, mask);
2714     z_llgfr(tmp2, tmp2);
2715   }
2716 
2717   z_release();
2718   z_st(Z_R0, 0, tmp2, tmp1);
2719 }
2720 
2721 void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) {
2722   if (SafepointMechanism::uses_thread_local_poll()) {
2723     const Address poll_byte_addr(Z_thread, in_bytes(Thread::polling_page_offset()) + 7 /* Big Endian */);
2724     // Armed page has poll_bit set.
2725     z_tm(poll_byte_addr, SafepointMechanism::poll_bit());
2726     z_brnaz(slow_path);
2727   } else {
2728     load_const_optimized(temp_reg, SafepointSynchronize::address_of_state());
2729     z_cli(/*SafepointSynchronize::sz_state()*/4-1, temp_reg, SafepointSynchronize::_not_synchronized);
2730     z_brne(slow_path);
2731   }
2732 }
2733 
2734 // Don't rely on register locking, always use Z_R1 as scratch register instead.
2735 void MacroAssembler::bang_stack_with_offset(int offset) {
2736   // Stack grows down, caller passes positive offset.
2737   assert(offset > 0, "must bang with positive offset");
2738   if (Displacement::is_validDisp(-offset)) {
2739     z_tmy(-offset, Z_SP, mask_stackbang);
2740   } else {
2741     add2reg(Z_R1, -offset, Z_SP);    // Do not destroy Z_SP!!!
2742     z_tm(0, Z_R1, mask_stackbang);  // Just banging.
2743   }
2744 }
2745 
2746 void MacroAssembler::reserved_stack_check(Register return_pc) {
2747   // Test if reserved zone needs to be enabled.
2748   Label no_reserved_zone_enabling;
2749   assert(return_pc == Z_R14, "Return pc must be in R14 before z_br() to StackOverflow stub.");
2750   BLOCK_COMMENT("reserved_stack_check {");
2751 
2752   z_clg(Z_SP, Address(Z_thread, JavaThread::reserved_stack_activation_offset()));
2753   z_brl(no_reserved_zone_enabling);
2754 
2755   // Enable reserved zone again, throw stack overflow exception.
2756   save_return_pc();
2757   push_frame_abi160(0);
2758   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), Z_thread);
2759   pop_frame();
2760   restore_return_pc();
2761 
2762   load_const_optimized(Z_R1, StubRoutines::throw_delayed_StackOverflowError_entry());
2763   // Don't use call() or z_basr(), they will invalidate Z_R14 which contains the return pc.
2764   z_br(Z_R1);
2765 
2766   should_not_reach_here();
2767 
2768   bind(no_reserved_zone_enabling);
2769   BLOCK_COMMENT("} reserved_stack_check");
2770 }
2771 
2772 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
2773 void MacroAssembler::tlab_allocate(Register obj,
2774                                    Register var_size_in_bytes,
2775                                    int con_size_in_bytes,
2776                                    Register t1,
2777                                    Label& slow_case) {
2778   assert_different_registers(obj, var_size_in_bytes, t1);
2779   Register end = t1;
2780   Register thread = Z_thread;
2781 
2782   z_lg(obj, Address(thread, JavaThread::tlab_top_offset()));
2783   if (var_size_in_bytes == noreg) {
2784     z_lay(end, Address(obj, con_size_in_bytes));
2785   } else {
2786     z_lay(end, Address(obj, var_size_in_bytes));
2787   }
2788   z_cg(end, Address(thread, JavaThread::tlab_end_offset()));
2789   branch_optimized(bcondHigh, slow_case);
2790 
2791   // Update the tlab top pointer.
2792   z_stg(end, Address(thread, JavaThread::tlab_top_offset()));
2793 
2794   // Recover var_size_in_bytes if necessary.
2795   if (var_size_in_bytes == end) {
2796     z_sgr(var_size_in_bytes, obj);
2797   }
2798 }
2799 
2800 // Emitter for interface method lookup.
2801 //   input: recv_klass, intf_klass, itable_index
2802 //   output: method_result
2803 //   kills: itable_index, temp1_reg, Z_R0, Z_R1
2804 // TODO: Temp2_reg is unused. we may use this emitter also in the itable stubs.
2805 // If the register is still not needed then, remove it.
2806 void MacroAssembler::lookup_interface_method(Register           recv_klass,
2807                                              Register           intf_klass,
2808                                              RegisterOrConstant itable_index,
2809                                              Register           method_result,
2810                                              Register           temp1_reg,
2811                                              Label&             no_such_interface,
2812                                              bool               return_method) {
2813 
2814   const Register vtable_len = temp1_reg;    // Used to compute itable_entry_addr.
2815   const Register itable_entry_addr = Z_R1_scratch;
2816   const Register itable_interface = Z_R0_scratch;
2817 
2818   BLOCK_COMMENT("lookup_interface_method {");
2819 
2820   // Load start of itable entries into itable_entry_addr.
2821   z_llgf(vtable_len, Address(recv_klass, Klass::vtable_length_offset()));
2822   z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
2823 
2824   // Loop over all itable entries until desired interfaceOop(Rinterface) found.
2825   const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
2826 
2827   add2reg_with_index(itable_entry_addr,
2828                      vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(),
2829                      recv_klass, vtable_len);
2830 
2831   const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize;
2832   Label     search;
2833 
2834   bind(search);
2835 
2836   // Handle IncompatibleClassChangeError.
2837   // If the entry is NULL then we've reached the end of the table
2838   // without finding the expected interface, so throw an exception.
2839   load_and_test_long(itable_interface, Address(itable_entry_addr));
2840   z_bre(no_such_interface);
2841 
2842   add2reg(itable_entry_addr, itable_offset_search_inc);
2843   z_cgr(itable_interface, intf_klass);
2844   z_brne(search);
2845 
2846   // Entry found and itable_entry_addr points to it, get offset of vtable for interface.
2847   if (return_method) {
2848     const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() -
2849                                       itableOffsetEntry::interface_offset_in_bytes()) -
2850                                      itable_offset_search_inc;
2851 
2852     // Compute itableMethodEntry and get method and entry point
2853     // we use addressing with index and displacement, since the formula
2854     // for computing the entry's offset has a fixed and a dynamic part,
2855     // the latter depending on the matched interface entry and on the case,
2856     // that the itable index has been passed as a register, not a constant value.
2857     int method_offset = itableMethodEntry::method_offset_in_bytes();
2858                              // Fixed part (displacement), common operand.
2859     Register itable_offset = method_result;  // Dynamic part (index register).
2860 
2861     if (itable_index.is_register()) {
2862        // Compute the method's offset in that register, for the formula, see the
2863        // else-clause below.
2864        z_sllg(itable_offset, itable_index.as_register(), exact_log2(itableMethodEntry::size() * wordSize));
2865        z_agf(itable_offset, vtable_offset_offset, itable_entry_addr);
2866     } else {
2867       // Displacement increases.
2868       method_offset += itableMethodEntry::size() * wordSize * itable_index.as_constant();
2869 
2870       // Load index from itable.
2871       z_llgf(itable_offset, vtable_offset_offset, itable_entry_addr);
2872     }
2873 
2874     // Finally load the method's oop.
2875     z_lg(method_result, method_offset, itable_offset, recv_klass);
2876   }
2877   BLOCK_COMMENT("} lookup_interface_method");
2878 }
2879 
2880 // Lookup for virtual method invocation.
2881 void MacroAssembler::lookup_virtual_method(Register           recv_klass,
2882                                            RegisterOrConstant vtable_index,
2883                                            Register           method_result) {
2884   assert_different_registers(recv_klass, vtable_index.register_or_noreg());
2885   assert(vtableEntry::size() * wordSize == wordSize,
2886          "else adjust the scaling in the code below");
2887 
2888   BLOCK_COMMENT("lookup_virtual_method {");
2889 
2890   const int base = in_bytes(Klass::vtable_start_offset());
2891 
2892   if (vtable_index.is_constant()) {
2893     // Load with base + disp.
2894     Address vtable_entry_addr(recv_klass,
2895                               vtable_index.as_constant() * wordSize +
2896                               base +
2897                               vtableEntry::method_offset_in_bytes());
2898 
2899     z_lg(method_result, vtable_entry_addr);
2900   } else {
2901     // Shift index properly and load with base + index + disp.
2902     Register vindex = vtable_index.as_register();
2903     Address  vtable_entry_addr(recv_klass, vindex,
2904                                base + vtableEntry::method_offset_in_bytes());
2905 
2906     z_sllg(vindex, vindex, exact_log2(wordSize));
2907     z_lg(method_result, vtable_entry_addr);
2908   }
2909   BLOCK_COMMENT("} lookup_virtual_method");
2910 }
2911 
2912 // Factor out code to call ic_miss_handler.
2913 // Generate code to call the inline cache miss handler.
2914 //
2915 // In most cases, this code will be generated out-of-line.
2916 // The method parameters are intended to provide some variability.
2917 //   ICM          - Label which has to be bound to the start of useful code (past any traps).
2918 //   trapMarker   - Marking byte for the generated illtrap instructions (if any).
2919 //                  Any value except 0x00 is supported.
2920 //                  = 0x00 - do not generate illtrap instructions.
2921 //                         use nops to fill ununsed space.
2922 //   requiredSize - required size of the generated code. If the actually
2923 //                  generated code is smaller, use padding instructions to fill up.
2924 //                  = 0 - no size requirement, no padding.
2925 //   scratch      - scratch register to hold branch target address.
2926 //
2927 //  The method returns the code offset of the bound label.
2928 unsigned int MacroAssembler::call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch) {
2929   intptr_t startOffset = offset();
2930 
2931   // Prevent entry at content_begin().
2932   if (trapMarker != 0) {
2933     z_illtrap(trapMarker);
2934   }
2935 
2936   // Load address of inline cache miss code into scratch register
2937   // and branch to cache miss handler.
2938   BLOCK_COMMENT("IC miss handler {");
2939   BIND(ICM);
2940   unsigned int   labelOffset = offset();
2941   AddressLiteral icmiss(SharedRuntime::get_ic_miss_stub());
2942 
2943   load_const_optimized(scratch, icmiss);
2944   z_br(scratch);
2945 
2946   // Fill unused space.
2947   if (requiredSize > 0) {
2948     while ((offset() - startOffset) < requiredSize) {
2949       if (trapMarker == 0) {
2950         z_nop();
2951       } else {
2952         z_illtrap(trapMarker);
2953       }
2954     }
2955   }
2956   BLOCK_COMMENT("} IC miss handler");
2957   return labelOffset;
2958 }
2959 
2960 void MacroAssembler::nmethod_UEP(Label& ic_miss) {
2961   Register ic_reg       = as_Register(Matcher::inline_cache_reg_encode());
2962   int      klass_offset = oopDesc::klass_offset_in_bytes();
2963   if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) {
2964     if (VM_Version::has_CompareBranch()) {
2965       z_cgij(Z_ARG1, 0, Assembler::bcondEqual, ic_miss);
2966     } else {
2967       z_ltgr(Z_ARG1, Z_ARG1);
2968       z_bre(ic_miss);
2969     }
2970   }
2971   // Compare cached class against klass from receiver.
2972   compare_klass_ptr(ic_reg, klass_offset, Z_ARG1, false);
2973   z_brne(ic_miss);
2974 }
2975 
2976 void MacroAssembler::check_klass_subtype_fast_path(Register   sub_klass,
2977                                                    Register   super_klass,
2978                                                    Register   temp1_reg,
2979                                                    Label*     L_success,
2980                                                    Label*     L_failure,
2981                                                    Label*     L_slow_path,
2982                                                    RegisterOrConstant super_check_offset) {
2983 
2984   const int sc_offset  = in_bytes(Klass::secondary_super_cache_offset());
2985   const int sco_offset = in_bytes(Klass::super_check_offset_offset());
2986 
2987   bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
2988   bool need_slow_path = (must_load_sco ||
2989                          super_check_offset.constant_or_zero() == sc_offset);
2990 
2991   // Input registers must not overlap.
2992   assert_different_registers(sub_klass, super_klass, temp1_reg);
2993   if (super_check_offset.is_register()) {
2994     assert_different_registers(sub_klass, super_klass,
2995                                super_check_offset.as_register());
2996   } else if (must_load_sco) {
2997     assert(temp1_reg != noreg, "supply either a temp or a register offset");
2998   }
2999 
3000   const Register Rsuper_check_offset = temp1_reg;
3001 
3002   NearLabel L_fallthrough;
3003   int label_nulls = 0;
3004   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
3005   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
3006   if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
3007   assert(label_nulls <= 1 ||
3008          (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
3009          "at most one NULL in the batch, usually");
3010 
3011   BLOCK_COMMENT("check_klass_subtype_fast_path {");
3012   // If the pointers are equal, we are done (e.g., String[] elements).
3013   // This self-check enables sharing of secondary supertype arrays among
3014   // non-primary types such as array-of-interface. Otherwise, each such
3015   // type would need its own customized SSA.
3016   // We move this check to the front of the fast path because many
3017   // type checks are in fact trivially successful in this manner,
3018   // so we get a nicely predicted branch right at the start of the check.
3019   compare64_and_branch(sub_klass, super_klass, bcondEqual, *L_success);
3020 
3021   // Check the supertype display, which is uint.
3022   if (must_load_sco) {
3023     z_llgf(Rsuper_check_offset, sco_offset, super_klass);
3024     super_check_offset = RegisterOrConstant(Rsuper_check_offset);
3025   }
3026   Address super_check_addr(sub_klass, super_check_offset, 0);
3027   z_cg(super_klass, super_check_addr); // compare w/ displayed supertype
3028 
3029   // This check has worked decisively for primary supers.
3030   // Secondary supers are sought in the super_cache ('super_cache_addr').
3031   // (Secondary supers are interfaces and very deeply nested subtypes.)
3032   // This works in the same check above because of a tricky aliasing
3033   // between the super_cache and the primary super display elements.
3034   // (The 'super_check_addr' can address either, as the case requires.)
3035   // Note that the cache is updated below if it does not help us find
3036   // what we need immediately.
3037   // So if it was a primary super, we can just fail immediately.
3038   // Otherwise, it's the slow path for us (no success at this point).
3039 
3040   // Hacked jmp, which may only be used just before L_fallthrough.
3041 #define final_jmp(label)                                                \
3042   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
3043   else                            { branch_optimized(Assembler::bcondAlways, label); } /*omit semicolon*/
3044 
3045   if (super_check_offset.is_register()) {
3046     branch_optimized(Assembler::bcondEqual, *L_success);
3047     z_cfi(super_check_offset.as_register(), sc_offset);
3048     if (L_failure == &L_fallthrough) {
3049       branch_optimized(Assembler::bcondEqual, *L_slow_path);
3050     } else {
3051       branch_optimized(Assembler::bcondNotEqual, *L_failure);
3052       final_jmp(*L_slow_path);
3053     }
3054   } else if (super_check_offset.as_constant() == sc_offset) {
3055     // Need a slow path; fast failure is impossible.
3056     if (L_slow_path == &L_fallthrough) {
3057       branch_optimized(Assembler::bcondEqual, *L_success);
3058     } else {
3059       branch_optimized(Assembler::bcondNotEqual, *L_slow_path);
3060       final_jmp(*L_success);
3061     }
3062   } else {
3063     // No slow path; it's a fast decision.
3064     if (L_failure == &L_fallthrough) {
3065       branch_optimized(Assembler::bcondEqual, *L_success);
3066     } else {
3067       branch_optimized(Assembler::bcondNotEqual, *L_failure);
3068       final_jmp(*L_success);
3069     }
3070   }
3071 
3072   bind(L_fallthrough);
3073 #undef local_brc
3074 #undef final_jmp
3075   BLOCK_COMMENT("} check_klass_subtype_fast_path");
3076   // fallthru (to slow path)
3077 }
3078 
3079 void MacroAssembler::check_klass_subtype_slow_path(Register Rsubklass,
3080                                                    Register Rsuperklass,
3081                                                    Register Rarray_ptr,  // tmp
3082                                                    Register Rlength,     // tmp
3083                                                    Label* L_success,
3084                                                    Label* L_failure) {
3085   // Input registers must not overlap.
3086   // Also check for R1 which is explicitely used here.
3087   assert_different_registers(Z_R1, Rsubklass, Rsuperklass, Rarray_ptr, Rlength);
3088   NearLabel L_fallthrough, L_loop;
3089   int label_nulls = 0;
3090   if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
3091   if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
3092   assert(label_nulls <= 1, "at most one NULL in the batch");
3093 
3094   const int ss_offset = in_bytes(Klass::secondary_supers_offset());
3095   const int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
3096 
3097   const int length_offset = Array<Klass*>::length_offset_in_bytes();
3098   const int base_offset   = Array<Klass*>::base_offset_in_bytes();
3099 
3100   // Hacked jmp, which may only be used just before L_fallthrough.
3101 #define final_jmp(label)                                                \
3102   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
3103   else                            branch_optimized(Assembler::bcondAlways, label) /*omit semicolon*/
3104 
3105   NearLabel loop_iterate, loop_count, match;
3106 
3107   BLOCK_COMMENT("check_klass_subtype_slow_path {");
3108   z_lg(Rarray_ptr, ss_offset, Rsubklass);
3109 
3110   load_and_test_int(Rlength, Address(Rarray_ptr, length_offset));
3111   branch_optimized(Assembler::bcondZero, *L_failure);
3112 
3113   // Oops in table are NO MORE compressed.
3114   z_cg(Rsuperklass, base_offset, Rarray_ptr); // Check array element for match.
3115   z_bre(match);                               // Shortcut for array length = 1.
3116 
3117   // No match yet, so we must walk the array's elements.
3118   z_lngfr(Rlength, Rlength);
3119   z_sllg(Rlength, Rlength, LogBytesPerWord); // -#bytes of cache array
3120   z_llill(Z_R1, BytesPerWord);               // Set increment/end index.
3121   add2reg(Rlength, 2 * BytesPerWord);        // start index  = -(n-2)*BytesPerWord
3122   z_slgr(Rarray_ptr, Rlength);               // start addr: +=  (n-2)*BytesPerWord
3123   z_bru(loop_count);
3124 
3125   BIND(loop_iterate);
3126   z_cg(Rsuperklass, base_offset, Rlength, Rarray_ptr); // Check array element for match.
3127   z_bre(match);
3128   BIND(loop_count);
3129   z_brxlg(Rlength, Z_R1, loop_iterate);
3130 
3131   // Rsuperklass not found among secondary super classes -> failure.
3132   branch_optimized(Assembler::bcondAlways, *L_failure);
3133 
3134   // Got a hit. Return success (zero result). Set cache.
3135   // Cache load doesn't happen here. For speed it is directly emitted by the compiler.
3136 
3137   BIND(match);
3138 
3139   z_stg(Rsuperklass, sc_offset, Rsubklass); // Save result to cache.
3140 
3141   final_jmp(*L_success);
3142 
3143   // Exit to the surrounding code.
3144   BIND(L_fallthrough);
3145 #undef local_brc
3146 #undef final_jmp
3147   BLOCK_COMMENT("} check_klass_subtype_slow_path");
3148 }
3149 
3150 // Emitter for combining fast and slow path.
3151 void MacroAssembler::check_klass_subtype(Register sub_klass,
3152                                          Register super_klass,
3153                                          Register temp1_reg,
3154                                          Register temp2_reg,
3155                                          Label&   L_success) {
3156   NearLabel failure;
3157   BLOCK_COMMENT(err_msg("check_klass_subtype(%s subclass of %s) {", sub_klass->name(), super_klass->name()));
3158   check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg,
3159                                 &L_success, &failure, NULL);
3160   check_klass_subtype_slow_path(sub_klass, super_klass,
3161                                 temp1_reg, temp2_reg, &L_success, NULL);
3162   BIND(failure);
3163   BLOCK_COMMENT("} check_klass_subtype");
3164 }
3165 
3166 // Increment a counter at counter_address when the eq condition code is
3167 // set. Kills registers tmp1_reg and tmp2_reg and preserves the condition code.
3168 void MacroAssembler::increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg) {
3169   Label l;
3170   z_brne(l);
3171   load_const(tmp1_reg, counter_address);
3172   add2mem_32(Address(tmp1_reg), 1, tmp2_reg);
3173   z_cr(tmp1_reg, tmp1_reg); // Set cc to eq.
3174   bind(l);
3175 }
3176 
3177 // Semantics are dependent on the slow_case label:
3178 //   If the slow_case label is not NULL, failure to biased-lock the object
3179 //   transfers control to the location of the slow_case label. If the
3180 //   object could be biased-locked, control is transferred to the done label.
3181 //   The condition code is unpredictable.
3182 //
3183 //   If the slow_case label is NULL, failure to biased-lock the object results
3184 //   in a transfer of control to the done label with a condition code of not_equal.
3185 //   If the biased-lock could be successfully obtained, control is transfered to
3186 //   the done label with a condition code of equal.
3187 //   It is mandatory to react on the condition code At the done label.
3188 //
3189 void MacroAssembler::biased_locking_enter(Register  obj_reg,
3190                                           Register  mark_reg,
3191                                           Register  temp_reg,
3192                                           Register  temp2_reg,    // May be Z_RO!
3193                                           Label    &done,
3194                                           Label    *slow_case) {
3195   assert(UseBiasedLocking, "why call this otherwise?");
3196   assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg);
3197 
3198   Label cas_label; // Try, if implemented, CAS locking. Fall thru to slow path otherwise.
3199 
3200   BLOCK_COMMENT("biased_locking_enter {");
3201 
3202   // Biased locking
3203   // See whether the lock is currently biased toward our thread and
3204   // whether the epoch is still valid.
3205   // Note that the runtime guarantees sufficient alignment of JavaThread
3206   // pointers to allow age to be placed into low bits.
3207   assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits,
3208          "biased locking makes assumptions about bit layout");
3209   z_lr(temp_reg, mark_reg);
3210   z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place);
3211   z_chi(temp_reg, markOopDesc::biased_lock_pattern);
3212   z_brne(cas_label);  // Try cas if object is not biased, i.e. cannot be biased locked.
3213 
3214   load_prototype_header(temp_reg, obj_reg);
3215   load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
3216 
3217   z_ogr(temp_reg, Z_thread);
3218   z_xgr(temp_reg, mark_reg);
3219   z_ngr(temp_reg, temp2_reg);
3220   if (PrintBiasedLockingStatistics) {
3221     increment_counter_eq((address) BiasedLocking::biased_lock_entry_count_addr(), mark_reg, temp2_reg);
3222     // Restore mark_reg.
3223     z_lg(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
3224   }
3225   branch_optimized(Assembler::bcondEqual, done);  // Biased lock obtained, return success.
3226 
3227   Label try_revoke_bias;
3228   Label try_rebias;
3229   Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes());
3230 
3231   //----------------------------------------------------------------------------
3232   // At this point we know that the header has the bias pattern and
3233   // that we are not the bias owner in the current epoch. We need to
3234   // figure out more details about the state of the header in order to
3235   // know what operations can be legally performed on the object's
3236   // header.
3237 
3238   // If the low three bits in the xor result aren't clear, that means
3239   // the prototype header is no longer biased and we have to revoke
3240   // the bias on this object.
3241   z_tmll(temp_reg, markOopDesc::biased_lock_mask_in_place);
3242   z_brnaz(try_revoke_bias);
3243 
3244   // Biasing is still enabled for this data type. See whether the
3245   // epoch of the current bias is still valid, meaning that the epoch
3246   // bits of the mark word are equal to the epoch bits of the
3247   // prototype header. (Note that the prototype header's epoch bits
3248   // only change at a safepoint.) If not, attempt to rebias the object
3249   // toward the current thread. Note that we must be absolutely sure
3250   // that the current epoch is invalid in order to do this because
3251   // otherwise the manipulations it performs on the mark word are
3252   // illegal.
3253   z_tmll(temp_reg, markOopDesc::epoch_mask_in_place);
3254   z_brnaz(try_rebias);
3255 
3256   //----------------------------------------------------------------------------
3257   // The epoch of the current bias is still valid but we know nothing
3258   // about the owner; it might be set or it might be clear. Try to
3259   // acquire the bias of the object using an atomic operation. If this
3260   // fails we will go in to the runtime to revoke the object's bias.
3261   // Note that we first construct the presumed unbiased header so we
3262   // don't accidentally blow away another thread's valid bias.
3263   z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place |
3264          markOopDesc::epoch_mask_in_place);
3265   z_lgr(temp_reg, Z_thread);
3266   z_llgfr(mark_reg, mark_reg);
3267   z_ogr(temp_reg, mark_reg);
3268 
3269   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3270 
3271   z_csg(mark_reg, temp_reg, 0, obj_reg);
3272 
3273   // If the biasing toward our thread failed, this means that
3274   // another thread succeeded in biasing it toward itself and we
3275   // need to revoke that bias. The revocation will occur in the
3276   // interpreter runtime in the slow case.
3277 
3278   if (PrintBiasedLockingStatistics) {
3279     increment_counter_eq((address) BiasedLocking::anonymously_biased_lock_entry_count_addr(),
3280                          temp_reg, temp2_reg);
3281   }
3282   if (slow_case != NULL) {
3283     branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way.
3284   }
3285   branch_optimized(Assembler::bcondAlways, done);           // Biased lock status given in condition code.
3286 
3287   //----------------------------------------------------------------------------
3288   bind(try_rebias);
3289   // At this point we know the epoch has expired, meaning that the
3290   // current "bias owner", if any, is actually invalid. Under these
3291   // circumstances _only_, we are allowed to use the current header's
3292   // value as the comparison value when doing the cas to acquire the
3293   // bias in the current epoch. In other words, we allow transfer of
3294   // the bias from one thread to another directly in this situation.
3295 
3296   z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
3297   load_prototype_header(temp_reg, obj_reg);
3298   z_llgfr(mark_reg, mark_reg);
3299 
3300   z_ogr(temp_reg, Z_thread);
3301 
3302   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3303 
3304   z_csg(mark_reg, temp_reg, 0, obj_reg);
3305 
3306   // If the biasing toward our thread failed, this means that
3307   // another thread succeeded in biasing it toward itself and we
3308   // need to revoke that bias. The revocation will occur in the
3309   // interpreter runtime in the slow case.
3310 
3311   if (PrintBiasedLockingStatistics) {
3312     increment_counter_eq((address) BiasedLocking::rebiased_lock_entry_count_addr(), temp_reg, temp2_reg);
3313   }
3314   if (slow_case != NULL) {
3315     branch_optimized(Assembler::bcondNotEqual, *slow_case);  // Biased lock not obtained, need to go the long way.
3316   }
3317   z_bru(done);           // Biased lock status given in condition code.
3318 
3319   //----------------------------------------------------------------------------
3320   bind(try_revoke_bias);
3321   // The prototype mark in the klass doesn't have the bias bit set any
3322   // more, indicating that objects of this data type are not supposed
3323   // to be biased any more. We are going to try to reset the mark of
3324   // this object to the prototype value and fall through to the
3325   // CAS-based locking scheme. Note that if our CAS fails, it means
3326   // that another thread raced us for the privilege of revoking the
3327   // bias of this particular object, so it's okay to continue in the
3328   // normal locking code.
3329   load_prototype_header(temp_reg, obj_reg);
3330 
3331   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3332 
3333   z_csg(mark_reg, temp_reg, 0, obj_reg);
3334 
3335   // Fall through to the normal CAS-based lock, because no matter what
3336   // the result of the above CAS, some thread must have succeeded in
3337   // removing the bias bit from the object's header.
3338   if (PrintBiasedLockingStatistics) {
3339     // z_cgr(mark_reg, temp2_reg);
3340     increment_counter_eq((address) BiasedLocking::revoked_lock_entry_count_addr(), temp_reg, temp2_reg);
3341   }
3342 
3343   bind(cas_label);
3344   BLOCK_COMMENT("} biased_locking_enter");
3345 }
3346 
3347 void MacroAssembler::biased_locking_exit(Register mark_addr, Register temp_reg, Label& done) {
3348   // Check for biased locking unlock case, which is a no-op
3349   // Note: we do not have to check the thread ID for two reasons.
3350   // First, the interpreter checks for IllegalMonitorStateException at
3351   // a higher level. Second, if the bias was revoked while we held the
3352   // lock, the object could not be rebiased toward another thread, so
3353   // the bias bit would be clear.
3354   BLOCK_COMMENT("biased_locking_exit {");
3355 
3356   z_lg(temp_reg, 0, mark_addr);
3357   z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place);
3358 
3359   z_chi(temp_reg, markOopDesc::biased_lock_pattern);
3360   z_bre(done);
3361   BLOCK_COMMENT("} biased_locking_exit");
3362 }
3363 
3364 void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) {
3365   Register displacedHeader = temp1;
3366   Register currentHeader = temp1;
3367   Register temp = temp2;
3368   NearLabel done, object_has_monitor;
3369 
3370   BLOCK_COMMENT("compiler_fast_lock_object {");
3371 
3372   // Load markOop from oop into mark.
3373   z_lg(displacedHeader, 0, oop);
3374 
3375   if (try_bias) {
3376     biased_locking_enter(oop, displacedHeader, temp, Z_R0, done);
3377   }
3378 
3379   // Handle existing monitor.
3380   if ((EmitSync & 0x01) == 0) {
3381     // The object has an existing monitor iff (mark & monitor_value) != 0.
3382     guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word");
3383     z_lr(temp, displacedHeader);
3384     z_nill(temp, markOopDesc::monitor_value);
3385     z_brne(object_has_monitor);
3386   }
3387 
3388   // Set mark to markOop | markOopDesc::unlocked_value.
3389   z_oill(displacedHeader, markOopDesc::unlocked_value);
3390 
3391   // Load Compare Value application register.
3392 
3393   // Initialize the box (must happen before we update the object mark).
3394   z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box);
3395 
3396   // Memory Fence (in cmpxchgd)
3397   // Compare object markOop with mark and if equal exchange scratch1 with object markOop.
3398 
3399   // If the compare-and-swap succeeded, then we found an unlocked object and we
3400   // have now locked it.
3401   z_csg(displacedHeader, box, 0, oop);
3402   assert(currentHeader==displacedHeader, "must be same register"); // Identified two registers from z/Architecture.
3403   z_bre(done);
3404 
3405   // We did not see an unlocked object so try the fast recursive case.
3406 
3407   z_sgr(currentHeader, Z_SP);
3408   load_const_optimized(temp, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
3409 
3410   z_ngr(currentHeader, temp);
3411   //   z_brne(done);
3412   //   z_release();
3413   z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box);
3414 
3415   z_bru(done);
3416 
3417   if ((EmitSync & 0x01) == 0) {
3418     Register zero = temp;
3419     Register monitor_tagged = displacedHeader; // Tagged with markOopDesc::monitor_value.
3420     bind(object_has_monitor);
3421     // The object's monitor m is unlocked iff m->owner == NULL,
3422     // otherwise m->owner may contain a thread or a stack address.
3423     //
3424     // Try to CAS m->owner from NULL to current thread.
3425     z_lghi(zero, 0);
3426     // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ.
3427     z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged);
3428     // Store a non-null value into the box.
3429     z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box);
3430 #ifdef ASSERT
3431       z_brne(done);
3432       // We've acquired the monitor, check some invariants.
3433       // Invariant 1: _recursions should be 0.
3434       asm_assert_mem8_is_zero(OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), monitor_tagged,
3435                               "monitor->_recursions should be 0", -1);
3436       z_ltgr(zero, zero); // Set CR=EQ.
3437 #endif
3438   }
3439   bind(done);
3440 
3441   BLOCK_COMMENT("} compiler_fast_lock_object");
3442   // If locking was successful, CR should indicate 'EQ'.
3443   // The compiler or the native wrapper generates a branch to the runtime call
3444   // _complete_monitor_locking_Java.
3445 }
3446 
3447 void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) {
3448   Register displacedHeader = temp1;
3449   Register currentHeader = temp2;
3450   Register temp = temp1;
3451   Register monitor = temp2;
3452 
3453   Label done, object_has_monitor;
3454 
3455   BLOCK_COMMENT("compiler_fast_unlock_object {");
3456 
3457   if (try_bias) {
3458     biased_locking_exit(oop, currentHeader, done);
3459   }
3460 
3461   // Find the lock address and load the displaced header from the stack.
3462   // if the displaced header is zero, we have a recursive unlock.
3463   load_and_test_long(displacedHeader, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3464   z_bre(done);
3465 
3466   // Handle existing monitor.
3467   if ((EmitSync & 0x02) == 0) {
3468     // The object has an existing monitor iff (mark & monitor_value) != 0.
3469     z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);
3470     guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word");
3471     z_nill(currentHeader, markOopDesc::monitor_value);
3472     z_brne(object_has_monitor);
3473   }
3474 
3475   // Check if it is still a light weight lock, this is true if we see
3476   // the stack address of the basicLock in the markOop of the object
3477   // copy box to currentHeader such that csg does not kill it.
3478   z_lgr(currentHeader, box);
3479   z_csg(currentHeader, displacedHeader, 0, oop);
3480   z_bru(done); // Csg sets CR as desired.
3481 
3482   // Handle existing monitor.
3483   if ((EmitSync & 0x02) == 0) {
3484     bind(object_has_monitor);
3485     z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);    // CurrentHeader is tagged with monitor_value set.
3486     load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
3487     z_brne(done);
3488     load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
3489     z_brne(done);
3490     load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
3491     z_brne(done);
3492     load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
3493     z_brne(done);
3494     z_release();
3495     z_stg(temp/*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), currentHeader);
3496   }
3497 
3498   bind(done);
3499 
3500   BLOCK_COMMENT("} compiler_fast_unlock_object");
3501   // flag == EQ indicates success
3502   // flag == NE indicates failure
3503 }
3504 
3505 // Write to card table for modification at store_addr - register is destroyed afterwards.
3506 void MacroAssembler::card_write_barrier_post(Register store_addr, Register tmp) {
3507   BarrierSet* bs = Universe::heap()->barrier_set();
3508   CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
3509   CardTable* ct = ctbs->card_table();
3510   assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier");
3511   assert_different_registers(store_addr, tmp);
3512   z_srlg(store_addr, store_addr, CardTable::card_shift);
3513   load_absolute_address(tmp, (address)ct->byte_map_base());
3514   z_agr(store_addr, tmp);
3515   z_mvi(0, store_addr, 0); // Store byte 0.
3516 }
3517 
3518 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) {
3519   NearLabel Ldone;
3520   z_ltgr(tmp1, value);
3521   z_bre(Ldone);          // Use NULL result as-is.
3522 
3523   z_nill(value, ~JNIHandles::weak_tag_mask);
3524   z_lg(value, 0, value); // Resolve (untagged) jobject.
3525 
3526 #if INCLUDE_ALL_GCS
3527   if (UseG1GC) {
3528     NearLabel Lnot_weak;
3529     z_tmll(tmp1, JNIHandles::weak_tag_mask); // Test for jweak tag.
3530     z_braz(Lnot_weak);
3531     verify_oop(value);
3532     g1_write_barrier_pre(noreg /* obj */,
3533                          noreg /* offset */,
3534                          value /* pre_val */,
3535                          noreg /* val */,
3536                          tmp1  /* tmp1 */,
3537                          tmp2  /* tmp2 */,
3538                          true  /* pre_val_needed */);
3539     bind(Lnot_weak);
3540   }
3541 #endif // INCLUDE_ALL_GCS
3542   verify_oop(value);
3543   bind(Ldone);
3544 }
3545 
3546 #if INCLUDE_ALL_GCS
3547 
3548 //------------------------------------------------------
3549 // General G1 pre-barrier generator.
3550 // Purpose: record the previous value if it is not null.
3551 // All non-tmps are preserved.
3552 //------------------------------------------------------
3553 // Note: Rpre_val needs special attention.
3554 //   The flag pre_val_needed indicated that the caller of this emitter function
3555 //   relies on Rpre_val containing the correct value, that is:
3556 //     either the value it contained on entry to this code segment
3557 //     or the value that was loaded into the register from (Robj+offset).
3558 //
3559 //   Independent from this requirement, the contents of Rpre_val must survive
3560 //   the push_frame() operation. push_frame() uses Z_R0_scratch by default
3561 //   to temporarily remember the frame pointer.
3562 //   If Rpre_val is assigned Z_R0_scratch by the caller, code must be emitted to
3563 //   save it's value.
3564 void MacroAssembler::g1_write_barrier_pre(Register           Robj,
3565                                           RegisterOrConstant offset,
3566                                           Register           Rpre_val,      // Ideally, this is a non-volatile register.
3567                                           Register           Rval,          // Will be preserved.
3568                                           Register           Rtmp1,         // If Rpre_val is volatile, either Rtmp1
3569                                           Register           Rtmp2,         // or Rtmp2 has to be non-volatile..
3570                                           bool               pre_val_needed // Save Rpre_val across runtime call, caller uses it.
3571                                        ) {
3572   Label callRuntime, filtered;
3573   const int active_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active());
3574   const int buffer_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_buf());
3575   const int index_offset  = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_index());
3576   assert_different_registers(Rtmp1, Rtmp2, Z_R0_scratch); // None of the Rtmp<i> must be Z_R0!!
3577   assert_different_registers(Robj, Z_R0_scratch);         // Used for addressing. Furthermore, push_frame destroys Z_R0!!
3578   assert_different_registers(Rval, Z_R0_scratch);         // push_frame destroys Z_R0!!
3579 
3580 #ifdef ASSERT
3581   // make sure the register is not Z_R0. Used for addressing. Furthermore, would be destroyed by push_frame.
3582   if (offset.is_register() && offset.as_register()->encoding() == 0) {
3583     tty->print_cr("Roffset(g1_write_barrier_pre)  = %%r%d", offset.as_register()->encoding());
3584     assert(false, "bad register for offset");
3585   }
3586 #endif
3587 
3588   BLOCK_COMMENT("g1_write_barrier_pre {");
3589 
3590   // Is marking active?
3591   // Note: value is loaded for test purposes only. No further use here.
3592   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
3593     load_and_test_int(Rtmp1, Address(Z_thread, active_offset));
3594   } else {
3595     guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
3596     load_and_test_byte(Rtmp1, Address(Z_thread, active_offset));
3597   }
3598   z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently.
3599 
3600   assert(Rpre_val != noreg, "must have a real register");
3601 
3602 
3603   // If an object is given, we need to load the previous value into Rpre_val.
3604   if (Robj != noreg) {
3605     // Load the previous value...
3606     Register ixReg = offset.is_register() ? offset.register_or_noreg() : Z_R0;
3607     if (UseCompressedOops) {
3608       z_llgf(Rpre_val, offset.constant_or_zero(), ixReg, Robj);
3609     } else {
3610       z_lg(Rpre_val, offset.constant_or_zero(), ixReg, Robj);
3611     }
3612   }
3613 
3614   // Is the previous value NULL?
3615   // If so, we don't need to record it and we're done.
3616   // Note: pre_val is loaded, decompressed and stored (directly or via runtime call).
3617   //       Register contents is preserved across runtime call if caller requests to do so.
3618   z_ltgr(Rpre_val, Rpre_val);
3619   z_bre(filtered); // previous value is NULL, so we don't need to record it.
3620 
3621   // Decode the oop now. We know it's not NULL.
3622   if (Robj != noreg && UseCompressedOops) {
3623     oop_decoder(Rpre_val, Rpre_val, /*maybeNULL=*/false);
3624   }
3625 
3626   // OK, it's not filtered, so we'll need to call enqueue.
3627 
3628   // We can store the original value in the thread's buffer
3629   // only if index > 0. Otherwise, we need runtime to handle.
3630   // (The index field is typed as size_t.)
3631   Register Rbuffer = Rtmp1, Rindex = Rtmp2;
3632   assert_different_registers(Rbuffer, Rindex, Rpre_val);
3633 
3634   z_lg(Rbuffer, buffer_offset, Z_thread);
3635 
3636   load_and_test_long(Rindex, Address(Z_thread, index_offset));
3637   z_bre(callRuntime); // If index == 0, goto runtime.
3638 
3639   add2reg(Rindex, -wordSize); // Decrement index.
3640   z_stg(Rindex, index_offset, Z_thread);
3641 
3642   // Record the previous value.
3643   z_stg(Rpre_val, 0, Rbuffer, Rindex);
3644   z_bru(filtered);  // We are done.
3645 
3646   Rbuffer = noreg;  // end of life
3647   Rindex  = noreg;  // end of life
3648 
3649   bind(callRuntime);
3650 
3651   // Save some registers (inputs and result) over runtime call
3652   // by spilling them into the top frame.
3653   if (Robj != noreg && Robj->is_volatile()) {
3654     z_stg(Robj, Robj->encoding()*BytesPerWord, Z_SP);
3655   }
3656   if (offset.is_register() && offset.as_register()->is_volatile()) {
3657     Register Roff = offset.as_register();
3658     z_stg(Roff, Roff->encoding()*BytesPerWord, Z_SP);
3659   }
3660   if (Rval != noreg && Rval->is_volatile()) {
3661     z_stg(Rval, Rval->encoding()*BytesPerWord, Z_SP);
3662   }
3663 
3664   // Save Rpre_val (result) over runtime call.
3665   Register Rpre_save = Rpre_val;
3666   if ((Rpre_val == Z_R0_scratch) || (pre_val_needed && Rpre_val->is_volatile())) {
3667     guarantee(!Rtmp1->is_volatile() || !Rtmp2->is_volatile(), "oops!");
3668     Rpre_save = !Rtmp1->is_volatile() ? Rtmp1 : Rtmp2;
3669   }
3670   lgr_if_needed(Rpre_save, Rpre_val);
3671 
3672   // Push frame to protect top frame with return pc and spilled register values.
3673   save_return_pc();
3674   push_frame_abi160(0); // Will use Z_R0 as tmp.
3675 
3676   // Rpre_val may be destroyed by push_frame().
3677   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_save, Z_thread);
3678 
3679   pop_frame();
3680   restore_return_pc();
3681 
3682   // Restore spilled values.
3683   if (Robj != noreg && Robj->is_volatile()) {
3684     z_lg(Robj, Robj->encoding()*BytesPerWord, Z_SP);
3685   }
3686   if (offset.is_register() && offset.as_register()->is_volatile()) {
3687     Register Roff = offset.as_register();
3688     z_lg(Roff, Roff->encoding()*BytesPerWord, Z_SP);
3689   }
3690   if (Rval != noreg && Rval->is_volatile()) {
3691     z_lg(Rval, Rval->encoding()*BytesPerWord, Z_SP);
3692   }
3693   if (pre_val_needed && Rpre_val->is_volatile()) {
3694     lgr_if_needed(Rpre_val, Rpre_save);
3695   }
3696 
3697   bind(filtered);
3698   BLOCK_COMMENT("} g1_write_barrier_pre");
3699 }
3700 
3701 // General G1 post-barrier generator.
3702 // Purpose: Store cross-region card.
3703 void MacroAssembler::g1_write_barrier_post(Register Rstore_addr,
3704                                            Register Rnew_val,
3705                                            Register Rtmp1,
3706                                            Register Rtmp2,
3707                                            Register Rtmp3) {
3708   Label callRuntime, filtered;
3709 
3710   assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); // Most probably, Rnew_val == Rtmp3.
3711 
3712   G1BarrierSet* bs = barrier_set_cast<G1BarrierSet>(Universe::heap()->barrier_set());
3713   CardTable* ct = bs->card_table();
3714   assert(bs->kind() == BarrierSet::G1BarrierSet, "wrong barrier");
3715 
3716   BLOCK_COMMENT("g1_write_barrier_post {");
3717 
3718   // Does store cross heap regions?
3719   // It does if the two addresses specify different grain addresses.
3720   if (G1RSBarrierRegionFilter) {
3721     if (VM_Version::has_DistinctOpnds()) {
3722       z_xgrk(Rtmp1, Rstore_addr, Rnew_val);
3723     } else {
3724       z_lgr(Rtmp1, Rstore_addr);
3725       z_xgr(Rtmp1, Rnew_val);
3726     }
3727     z_srag(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes);
3728     z_bre(filtered);
3729   }
3730 
3731   // Crosses regions, storing NULL?
3732 #ifdef ASSERT
3733   z_ltgr(Rnew_val, Rnew_val);
3734   asm_assert_ne("null oop not allowed (G1)", 0x255); // TODO: also on z? Checked by caller on PPC64, so following branch is obsolete:
3735   z_bre(filtered);  // Safety net: don't break if we have a NULL oop.
3736 #endif
3737   Rnew_val = noreg; // end of lifetime
3738 
3739   // Storing region crossing non-NULL, is card already dirty?
3740   assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
3741   assert_different_registers(Rtmp1, Rtmp2, Rtmp3);
3742   // Make sure not to use Z_R0 for any of these registers.
3743   Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3;
3744   Register Rbase      = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp3;
3745 
3746   // calculate address of card
3747   load_const_optimized(Rbase, (address)ct->byte_map_base());      // Card table base.
3748   z_srlg(Rcard_addr, Rstore_addr, CardTable::card_shift);         // Index into card table.
3749   z_algr(Rcard_addr, Rbase);                                      // Explicit calculation needed for cli.
3750   Rbase = noreg; // end of lifetime
3751 
3752   // Filter young.
3753   assert((unsigned int)G1CardTable::g1_young_card_val() <= 255, "otherwise check this code");
3754   z_cli(0, Rcard_addr, (int)G1CardTable::g1_young_card_val());
3755   z_bre(filtered);
3756 
3757   // Check the card value. If dirty, we're done.
3758   // This also avoids false sharing of the (already dirty) card.
3759   z_sync(); // Required to support concurrent cleaning.
3760   assert((unsigned int)CardTable::dirty_card_val() <= 255, "otherwise check this code");
3761   z_cli(0, Rcard_addr, CardTable::dirty_card_val()); // Reload after membar.
3762   z_bre(filtered);
3763 
3764   // Storing a region crossing, non-NULL oop, card is clean.
3765   // Dirty card and log.
3766   z_mvi(0, Rcard_addr, CardTable::dirty_card_val());
3767 
3768   Register Rcard_addr_x = Rcard_addr;
3769   Register Rqueue_index = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp1;
3770   Register Rqueue_buf   = (Rtmp3 != Z_R0_scratch) ? Rtmp3 : Rtmp1;
3771   const int qidx_off    = in_bytes(JavaThread::dirty_card_queue_offset() + SATBMarkQueue::byte_offset_of_index());
3772   const int qbuf_off    = in_bytes(JavaThread::dirty_card_queue_offset() + SATBMarkQueue::byte_offset_of_buf());
3773   if ((Rcard_addr == Rqueue_buf) || (Rcard_addr == Rqueue_index)) {
3774     Rcard_addr_x = Z_R0_scratch;  // Register shortage. We have to use Z_R0.
3775   }
3776   lgr_if_needed(Rcard_addr_x, Rcard_addr);
3777 
3778   load_and_test_long(Rqueue_index, Address(Z_thread, qidx_off));
3779   z_bre(callRuntime); // Index == 0 then jump to runtime.
3780 
3781   z_lg(Rqueue_buf, qbuf_off, Z_thread);
3782 
3783   add2reg(Rqueue_index, -wordSize); // Decrement index.
3784   z_stg(Rqueue_index, qidx_off, Z_thread);
3785 
3786   z_stg(Rcard_addr_x, 0, Rqueue_index, Rqueue_buf); // Store card.
3787   z_bru(filtered);
3788 
3789   bind(callRuntime);
3790 
3791   // TODO: do we need a frame? Introduced to be on the safe side.
3792   bool needs_frame = true;
3793   lgr_if_needed(Rcard_addr, Rcard_addr_x); // copy back asap. push_frame will destroy Z_R0_scratch!
3794 
3795   // VM call need frame to access(write) O register.
3796   if (needs_frame) {
3797     save_return_pc();
3798     push_frame_abi160(0); // Will use Z_R0 as tmp on old CPUs.
3799   }
3800 
3801   // Save the live input values.
3802   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr, Z_thread);
3803 
3804   if (needs_frame) {
3805     pop_frame();
3806     restore_return_pc();
3807   }
3808 
3809   bind(filtered);
3810 
3811   BLOCK_COMMENT("} g1_write_barrier_post");
3812 }
3813 #endif // INCLUDE_ALL_GCS
3814 
3815 // Last_Java_sp must comply to the rules in frame_s390.hpp.
3816 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation) {
3817   BLOCK_COMMENT("set_last_Java_frame {");
3818 
3819   // Always set last_Java_pc and flags first because once last_Java_sp
3820   // is visible has_last_Java_frame is true and users will look at the
3821   // rest of the fields. (Note: flags should always be zero before we
3822   // get here so doesn't need to be set.)
3823 
3824   // Verify that last_Java_pc was zeroed on return to Java.
3825   if (allow_relocation) {
3826     asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()),
3827                             Z_thread,
3828                             "last_Java_pc not zeroed before leaving Java",
3829                             0x200);
3830   } else {
3831     asm_assert_mem8_is_zero_static(in_bytes(JavaThread::last_Java_pc_offset()),
3832                                    Z_thread,
3833                                    "last_Java_pc not zeroed before leaving Java",
3834                                    0x200);
3835   }
3836 
3837   // When returning from calling out from Java mode the frame anchor's
3838   // last_Java_pc will always be set to NULL. It is set here so that
3839   // if we are doing a call to native (not VM) that we capture the
3840   // known pc and don't have to rely on the native call having a
3841   // standard frame linkage where we can find the pc.
3842   if (last_Java_pc!=noreg) {
3843     z_stg(last_Java_pc, Address(Z_thread, JavaThread::last_Java_pc_offset()));
3844   }
3845 
3846   // This membar release is not required on z/Architecture, since the sequence of stores
3847   // in maintained. Nevertheless, we leave it in to document the required ordering.
3848   // The implementation of z_release() should be empty.
3849   // z_release();
3850 
3851   z_stg(last_Java_sp, Address(Z_thread, JavaThread::last_Java_sp_offset()));
3852   BLOCK_COMMENT("} set_last_Java_frame");
3853 }
3854 
3855 void MacroAssembler::reset_last_Java_frame(bool allow_relocation) {
3856   BLOCK_COMMENT("reset_last_Java_frame {");
3857 
3858   if (allow_relocation) {
3859     asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()),
3860                                Z_thread,
3861                                "SP was not set, still zero",
3862                                0x202);
3863   } else {
3864     asm_assert_mem8_isnot_zero_static(in_bytes(JavaThread::last_Java_sp_offset()),
3865                                       Z_thread,
3866                                       "SP was not set, still zero",
3867                                       0x202);
3868   }
3869 
3870   // _last_Java_sp = 0
3871   // Clearing storage must be atomic here, so don't use clear_mem()!
3872   store_const(Address(Z_thread, JavaThread::last_Java_sp_offset()), 0);
3873 
3874   // _last_Java_pc = 0
3875   store_const(Address(Z_thread, JavaThread::last_Java_pc_offset()), 0);
3876 
3877   BLOCK_COMMENT("} reset_last_Java_frame");
3878   return;
3879 }
3880 
3881 void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation) {
3882   assert_different_registers(sp, tmp1);
3883 
3884   // We cannot trust that code generated by the C++ compiler saves R14
3885   // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at
3886   // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()).
3887   // Therefore we load the PC into tmp1 and let set_last_Java_frame() save
3888   // it into the frame anchor.
3889   get_PC(tmp1);
3890   set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1, allow_relocation);
3891 }
3892 
3893 void MacroAssembler::set_thread_state(JavaThreadState new_state) {
3894   z_release();
3895 
3896   assert(Immediate::is_uimm16(_thread_max_state), "enum value out of range for instruction");
3897   assert(sizeof(JavaThreadState) == sizeof(int), "enum value must have base type int");
3898   store_const(Address(Z_thread, JavaThread::thread_state_offset()), new_state, Z_R0, false);
3899 }
3900 
3901 void MacroAssembler::get_vm_result(Register oop_result) {
3902   verify_thread();
3903 
3904   z_lg(oop_result, Address(Z_thread, JavaThread::vm_result_offset()));
3905   clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(void*));
3906 
3907   verify_oop(oop_result);
3908 }
3909 
3910 void MacroAssembler::get_vm_result_2(Register result) {
3911   verify_thread();
3912 
3913   z_lg(result, Address(Z_thread, JavaThread::vm_result_2_offset()));
3914   clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(void*));
3915 }
3916 
3917 // We require that C code which does not return a value in vm_result will
3918 // leave it undisturbed.
3919 void MacroAssembler::set_vm_result(Register oop_result) {
3920   z_stg(oop_result, Address(Z_thread, JavaThread::vm_result_offset()));
3921 }
3922 
3923 // Explicit null checks (used for method handle code).
3924 void MacroAssembler::null_check(Register reg, Register tmp, int64_t offset) {
3925   if (!ImplicitNullChecks) {
3926     NearLabel ok;
3927 
3928     compare64_and_branch(reg, (intptr_t) 0, Assembler::bcondNotEqual, ok);
3929 
3930     // We just put the address into reg if it was 0 (tmp==Z_R0 is allowed so we can't use it for the address).
3931     address exception_entry = Interpreter::throw_NullPointerException_entry();
3932     load_absolute_address(reg, exception_entry);
3933     z_br(reg);
3934 
3935     bind(ok);
3936   } else {
3937     if (needs_explicit_null_check((intptr_t)offset)) {
3938       // Provoke OS NULL exception if reg = NULL by
3939       // accessing M[reg] w/o changing any registers.
3940       z_lg(tmp, 0, reg);
3941     }
3942     // else
3943       // Nothing to do, (later) access of M[reg + offset]
3944       // will provoke OS NULL exception if reg = NULL.
3945   }
3946 }
3947 
3948 //-------------------------------------
3949 //  Compressed Klass Pointers
3950 //-------------------------------------
3951 
3952 // Klass oop manipulations if compressed.
3953 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
3954   Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible.
3955   address  base    = Universe::narrow_klass_base();
3956   int      shift   = Universe::narrow_klass_shift();
3957   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3958 
3959   BLOCK_COMMENT("cKlass encoder {");
3960 
3961 #ifdef ASSERT
3962   Label ok;
3963   z_tmll(current, KlassAlignmentInBytes-1); // Check alignment.
3964   z_brc(Assembler::bcondAllZero, ok);
3965   // The plain disassembler does not recognize illtrap. It instead displays
3966   // a 32-bit value. Issueing two illtraps assures the disassembler finds
3967   // the proper beginning of the next instruction.
3968   z_illtrap(0xee);
3969   z_illtrap(0xee);
3970   bind(ok);
3971 #endif
3972 
3973   if (base != NULL) {
3974     unsigned int base_h = ((unsigned long)base)>>32;
3975     unsigned int base_l = (unsigned int)((unsigned long)base);
3976     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3977       lgr_if_needed(dst, current);
3978       z_aih(dst, -((int)base_h));     // Base has no set bits in lower half.
3979     } else if ((base_h == 0) && (base_l != 0)) {
3980       lgr_if_needed(dst, current);
3981       z_agfi(dst, -(int)base_l);
3982     } else {
3983       load_const(Z_R0, base);
3984       lgr_if_needed(dst, current);
3985       z_sgr(dst, Z_R0);
3986     }
3987     current = dst;
3988   }
3989   if (shift != 0) {
3990     assert (LogKlassAlignmentInBytes == shift, "decode alg wrong");
3991     z_srlg(dst, current, shift);
3992     current = dst;
3993   }
3994   lgr_if_needed(dst, current); // Move may be required (if neither base nor shift != 0).
3995 
3996   BLOCK_COMMENT("} cKlass encoder");
3997 }
3998 
3999 // This function calculates the size of the code generated by
4000 //   decode_klass_not_null(register dst, Register src)
4001 // when (Universe::heap() != NULL). Hence, if the instructions
4002 // it generates change, then this method needs to be updated.
4003 int MacroAssembler::instr_size_for_decode_klass_not_null() {
4004   address  base    = Universe::narrow_klass_base();
4005   int shift_size   = Universe::narrow_klass_shift() == 0 ? 0 : 6; /* sllg */
4006   int addbase_size = 0;
4007   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
4008 
4009   if (base != NULL) {
4010     unsigned int base_h = ((unsigned long)base)>>32;
4011     unsigned int base_l = (unsigned int)((unsigned long)base);
4012     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
4013       addbase_size += 6; /* aih */
4014     } else if ((base_h == 0) && (base_l != 0)) {
4015       addbase_size += 6; /* algfi */
4016     } else {
4017       addbase_size += load_const_size();
4018       addbase_size += 4; /* algr */
4019     }
4020   }
4021 #ifdef ASSERT
4022   addbase_size += 10;
4023   addbase_size += 2; // Extra sigill.
4024 #endif
4025   return addbase_size + shift_size;
4026 }
4027 
4028 // !!! If the instructions that get generated here change
4029 //     then function instr_size_for_decode_klass_not_null()
4030 //     needs to get updated.
4031 // This variant of decode_klass_not_null() must generate predictable code!
4032 // The code must only depend on globally known parameters.
4033 void MacroAssembler::decode_klass_not_null(Register dst) {
4034   address  base    = Universe::narrow_klass_base();
4035   int      shift   = Universe::narrow_klass_shift();
4036   int      beg_off = offset();
4037   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
4038 
4039   BLOCK_COMMENT("cKlass decoder (const size) {");
4040 
4041   if (shift != 0) { // Shift required?
4042     z_sllg(dst, dst, shift);
4043   }
4044   if (base != NULL) {
4045     unsigned int base_h = ((unsigned long)base)>>32;
4046     unsigned int base_l = (unsigned int)((unsigned long)base);
4047     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
4048       z_aih(dst, base_h);     // Base has no set bits in lower half.
4049     } else if ((base_h == 0) && (base_l != 0)) {
4050       z_algfi(dst, base_l);   // Base has no set bits in upper half.
4051     } else {
4052       load_const(Z_R0, base); // Base has set bits everywhere.
4053       z_algr(dst, Z_R0);
4054     }
4055   }
4056 
4057 #ifdef ASSERT
4058   Label ok;
4059   z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
4060   z_brc(Assembler::bcondAllZero, ok);
4061   // The plain disassembler does not recognize illtrap. It instead displays
4062   // a 32-bit value. Issueing two illtraps assures the disassembler finds
4063   // the proper beginning of the next instruction.
4064   z_illtrap(0xd1);
4065   z_illtrap(0xd1);
4066   bind(ok);
4067 #endif
4068   assert(offset() == beg_off + instr_size_for_decode_klass_not_null(), "Code gen mismatch.");
4069 
4070   BLOCK_COMMENT("} cKlass decoder (const size)");
4071 }
4072 
4073 // This variant of decode_klass_not_null() is for cases where
4074 //  1) the size of the generated instructions may vary
4075 //  2) the result is (potentially) stored in a register different from the source.
4076 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
4077   address base  = Universe::narrow_klass_base();
4078   int     shift = Universe::narrow_klass_shift();
4079   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
4080 
4081   BLOCK_COMMENT("cKlass decoder {");
4082 
4083   if (src == noreg) src = dst;
4084 
4085   if (shift != 0) { // Shift or at least move required?
4086     z_sllg(dst, src, shift);
4087   } else {
4088     lgr_if_needed(dst, src);
4089   }
4090 
4091   if (base != NULL) {
4092     unsigned int base_h = ((unsigned long)base)>>32;
4093     unsigned int base_l = (unsigned int)((unsigned long)base);
4094     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
4095       z_aih(dst, base_h);     // Base has not set bits in lower half.
4096     } else if ((base_h == 0) && (base_l != 0)) {
4097       z_algfi(dst, base_l);   // Base has no set bits in upper half.
4098     } else {
4099       load_const_optimized(Z_R0, base); // Base has set bits everywhere.
4100       z_algr(dst, Z_R0);
4101     }
4102   }
4103 
4104 #ifdef ASSERT
4105   Label ok;
4106   z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
4107   z_brc(Assembler::bcondAllZero, ok);
4108   // The plain disassembler does not recognize illtrap. It instead displays
4109   // a 32-bit value. Issueing two illtraps assures the disassembler finds
4110   // the proper beginning of the next instruction.
4111   z_illtrap(0xd2);
4112   z_illtrap(0xd2);
4113   bind(ok);
4114 #endif
4115   BLOCK_COMMENT("} cKlass decoder");
4116 }
4117 
4118 void MacroAssembler::load_klass(Register klass, Address mem) {
4119   if (UseCompressedClassPointers) {
4120     z_llgf(klass, mem);
4121     // Attention: no null check here!
4122     decode_klass_not_null(klass);
4123   } else {
4124     z_lg(klass, mem);
4125   }
4126 }
4127 
4128 void MacroAssembler::load_klass(Register klass, Register src_oop) {
4129   if (UseCompressedClassPointers) {
4130     z_llgf(klass, oopDesc::klass_offset_in_bytes(), src_oop);
4131     // Attention: no null check here!
4132     decode_klass_not_null(klass);
4133   } else {
4134     z_lg(klass, oopDesc::klass_offset_in_bytes(), src_oop);
4135   }
4136 }
4137 
4138 void MacroAssembler::load_prototype_header(Register Rheader, Register Rsrc_oop) {
4139   assert_different_registers(Rheader, Rsrc_oop);
4140   load_klass(Rheader, Rsrc_oop);
4141   z_lg(Rheader, Address(Rheader, Klass::prototype_header_offset()));
4142 }
4143 
4144 void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) {
4145   if (UseCompressedClassPointers) {
4146     assert_different_registers(dst_oop, klass, Z_R0);
4147     if (ck == noreg) ck = klass;
4148     encode_klass_not_null(ck, klass);
4149     z_st(ck, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
4150   } else {
4151     z_stg(klass, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
4152   }
4153 }
4154 
4155 void MacroAssembler::store_klass_gap(Register s, Register d) {
4156   if (UseCompressedClassPointers) {
4157     assert(s != d, "not enough registers");
4158     // Support s = noreg.
4159     if (s != noreg) {
4160       z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes()));
4161     } else {
4162       z_mvhi(Address(d, oopDesc::klass_gap_offset_in_bytes()), 0);
4163     }
4164   }
4165 }
4166 
4167 // Compare klass ptr in memory against klass ptr in register.
4168 //
4169 // Rop1            - klass in register, always uncompressed.
4170 // disp            - Offset of klass in memory, compressed/uncompressed, depending on runtime flag.
4171 // Rbase           - Base address of cKlass in memory.
4172 // maybeNULL       - True if Rop1 possibly is a NULL.
4173 void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL) {
4174 
4175   BLOCK_COMMENT("compare klass ptr {");
4176 
4177   if (UseCompressedClassPointers) {
4178     const int shift = Universe::narrow_klass_shift();
4179     address   base  = Universe::narrow_klass_base();
4180 
4181     assert((shift == 0) || (shift == LogKlassAlignmentInBytes), "cKlass encoder detected bad shift");
4182     assert_different_registers(Rop1, Z_R0);
4183     assert_different_registers(Rop1, Rbase, Z_R1);
4184 
4185     // First encode register oop and then compare with cOop in memory.
4186     // This sequence saves an unnecessary cOop load and decode.
4187     if (base == NULL) {
4188       if (shift == 0) {
4189         z_cl(Rop1, disp, Rbase);     // Unscaled
4190       } else {
4191         z_srlg(Z_R0, Rop1, shift);   // ZeroBased
4192         z_cl(Z_R0, disp, Rbase);
4193       }
4194     } else {                         // HeapBased
4195 #ifdef ASSERT
4196       bool     used_R0 = true;
4197       bool     used_R1 = true;
4198 #endif
4199       Register current = Rop1;
4200       Label    done;
4201 
4202       if (maybeNULL) {       // NULL ptr must be preserved!
4203         z_ltgr(Z_R0, current);
4204         z_bre(done);
4205         current = Z_R0;
4206       }
4207 
4208       unsigned int base_h = ((unsigned long)base)>>32;
4209       unsigned int base_l = (unsigned int)((unsigned long)base);
4210       if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
4211         lgr_if_needed(Z_R0, current);
4212         z_aih(Z_R0, -((int)base_h));     // Base has no set bits in lower half.
4213       } else if ((base_h == 0) && (base_l != 0)) {
4214         lgr_if_needed(Z_R0, current);
4215         z_agfi(Z_R0, -(int)base_l);
4216       } else {
4217         int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
4218         add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); // Subtract base by adding complement.
4219       }
4220 
4221       if (shift != 0) {
4222         z_srlg(Z_R0, Z_R0, shift);
4223       }
4224       bind(done);
4225       z_cl(Z_R0, disp, Rbase);
4226 #ifdef ASSERT
4227       if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);
4228       if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);
4229 #endif
4230     }
4231   } else {
4232     z_clg(Rop1, disp, Z_R0, Rbase);
4233   }
4234   BLOCK_COMMENT("} compare klass ptr");
4235 }
4236 
4237 //---------------------------
4238 //  Compressed oops
4239 //---------------------------
4240 
4241 void MacroAssembler::encode_heap_oop(Register oop) {
4242   oop_encoder(oop, oop, true /*maybe null*/);
4243 }
4244 
4245 void MacroAssembler::encode_heap_oop_not_null(Register oop) {
4246   oop_encoder(oop, oop, false /*not null*/);
4247 }
4248 
4249 // Called with something derived from the oop base. e.g. oop_base>>3.
4250 int MacroAssembler::get_oop_base_pow2_offset(uint64_t oop_base) {
4251   unsigned int oop_base_ll = ((unsigned int)(oop_base >>  0)) & 0xffff;
4252   unsigned int oop_base_lh = ((unsigned int)(oop_base >> 16)) & 0xffff;
4253   unsigned int oop_base_hl = ((unsigned int)(oop_base >> 32)) & 0xffff;
4254   unsigned int oop_base_hh = ((unsigned int)(oop_base >> 48)) & 0xffff;
4255   unsigned int n_notzero_parts = (oop_base_ll == 0 ? 0:1)
4256                                + (oop_base_lh == 0 ? 0:1)
4257                                + (oop_base_hl == 0 ? 0:1)
4258                                + (oop_base_hh == 0 ? 0:1);
4259 
4260   assert(oop_base != 0, "This is for HeapBased cOops only");
4261 
4262   if (n_notzero_parts != 1) { //  Check if oop_base is just a few pages shy of a power of 2.
4263     uint64_t pow2_offset = 0x10000 - oop_base_ll;
4264     if (pow2_offset < 0x8000) {  // This might not be necessary.
4265       uint64_t oop_base2 = oop_base + pow2_offset;
4266 
4267       oop_base_ll = ((unsigned int)(oop_base2 >>  0)) & 0xffff;
4268       oop_base_lh = ((unsigned int)(oop_base2 >> 16)) & 0xffff;
4269       oop_base_hl = ((unsigned int)(oop_base2 >> 32)) & 0xffff;
4270       oop_base_hh = ((unsigned int)(oop_base2 >> 48)) & 0xffff;
4271       n_notzero_parts = (oop_base_ll == 0 ? 0:1) +
4272                         (oop_base_lh == 0 ? 0:1) +
4273                         (oop_base_hl == 0 ? 0:1) +
4274                         (oop_base_hh == 0 ? 0:1);
4275       if (n_notzero_parts == 1) {
4276         assert(-(int64_t)pow2_offset != (int64_t)-1, "We use -1 to signal uninitialized base register");
4277         return -pow2_offset;
4278       }
4279     }
4280   }
4281   return 0;
4282 }
4283 
4284 // If base address is offset from a straight power of two by just a few pages,
4285 // return this offset to the caller for a possible later composite add.
4286 // TODO/FIX: will only work correctly for 4k pages.
4287 int MacroAssembler::get_oop_base(Register Rbase, uint64_t oop_base) {
4288   int pow2_offset = get_oop_base_pow2_offset(oop_base);
4289 
4290   load_const_optimized(Rbase, oop_base - pow2_offset); // Best job possible.
4291 
4292   return pow2_offset;
4293 }
4294 
4295 int MacroAssembler::get_oop_base_complement(Register Rbase, uint64_t oop_base) {
4296   int offset = get_oop_base(Rbase, oop_base);
4297   z_lcgr(Rbase, Rbase);
4298   return -offset;
4299 }
4300 
4301 // Compare compressed oop in memory against oop in register.
4302 // Rop1            - Oop in register.
4303 // disp            - Offset of cOop in memory.
4304 // Rbase           - Base address of cOop in memory.
4305 // maybeNULL       - True if Rop1 possibly is a NULL.
4306 // maybeNULLtarget - Branch target for Rop1 == NULL, if flow control shall NOT continue with compare instruction.
4307 void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL) {
4308   Register Rbase  = mem.baseOrR0();
4309   Register Rindex = mem.indexOrR0();
4310   int64_t  disp   = mem.disp();
4311 
4312   const int shift = Universe::narrow_oop_shift();
4313   address   base  = Universe::narrow_oop_base();
4314 
4315   assert(UseCompressedOops, "must be on to call this method");
4316   assert(Universe::heap() != NULL, "java heap must be initialized to call this method");
4317   assert((shift == 0) || (shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");
4318   assert_different_registers(Rop1, Z_R0);
4319   assert_different_registers(Rop1, Rbase, Z_R1);
4320   assert_different_registers(Rop1, Rindex, Z_R1);
4321 
4322   BLOCK_COMMENT("compare heap oop {");
4323 
4324   // First encode register oop and then compare with cOop in memory.
4325   // This sequence saves an unnecessary cOop load and decode.
4326   if (base == NULL) {
4327     if (shift == 0) {
4328       z_cl(Rop1, disp, Rindex, Rbase);  // Unscaled
4329     } else {
4330       z_srlg(Z_R0, Rop1, shift);        // ZeroBased
4331       z_cl(Z_R0, disp, Rindex, Rbase);
4332     }
4333   } else {                              // HeapBased
4334 #ifdef ASSERT
4335     bool  used_R0 = true;
4336     bool  used_R1 = true;
4337 #endif
4338     Label done;
4339     int   pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
4340 
4341     if (maybeNULL) {       // NULL ptr must be preserved!
4342       z_ltgr(Z_R0, Rop1);
4343       z_bre(done);
4344     }
4345 
4346     add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1);
4347     z_srlg(Z_R0, Z_R0, shift);
4348 
4349     bind(done);
4350     z_cl(Z_R0, disp, Rindex, Rbase);
4351 #ifdef ASSERT
4352     if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);
4353     if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);
4354 #endif
4355   }
4356   BLOCK_COMMENT("} compare heap oop");
4357 }
4358 
4359 // Load heap oop and decompress, if necessary.
4360 void  MacroAssembler::load_heap_oop(Register dest, const Address &a) {
4361   if (UseCompressedOops) {
4362     z_llgf(dest, a.disp(), a.indexOrR0(), a.baseOrR0());
4363     oop_decoder(dest, dest, true);
4364   } else {
4365     z_lg(dest, a.disp(), a.indexOrR0(), a.baseOrR0());
4366   }
4367 }
4368 
4369 // Load heap oop and decompress, if necessary.
4370 void MacroAssembler::load_heap_oop(Register dest, int64_t disp, Register base) {
4371   if (UseCompressedOops) {
4372     z_llgf(dest, disp, base);
4373     oop_decoder(dest, dest, true);
4374   } else {
4375     z_lg(dest, disp, base);
4376   }
4377 }
4378 
4379 // Load heap oop and decompress, if necessary.
4380 void MacroAssembler::load_heap_oop_not_null(Register dest, int64_t disp, Register base) {
4381   if (UseCompressedOops) {
4382     z_llgf(dest, disp, base);
4383     oop_decoder(dest, dest, false);
4384   } else {
4385     z_lg(dest, disp, base);
4386   }
4387 }
4388 
4389 // Compress, if necessary, and store oop to heap.
4390 void MacroAssembler::store_heap_oop(Register Roop, RegisterOrConstant offset, Register base) {
4391   Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0;
4392   if (UseCompressedOops) {
4393     assert_different_registers(Roop, offset.register_or_noreg(), base);
4394     encode_heap_oop(Roop);
4395     z_st(Roop, offset.constant_or_zero(), Ridx, base);
4396   } else {
4397     z_stg(Roop, offset.constant_or_zero(), Ridx, base);
4398   }
4399 }
4400 
4401 // Compress, if necessary, and store oop to heap. Oop is guaranteed to be not NULL.
4402 void MacroAssembler::store_heap_oop_not_null(Register Roop, RegisterOrConstant offset, Register base) {
4403   Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0;
4404   if (UseCompressedOops) {
4405     assert_different_registers(Roop, offset.register_or_noreg(), base);
4406     encode_heap_oop_not_null(Roop);
4407     z_st(Roop, offset.constant_or_zero(), Ridx, base);
4408   } else {
4409     z_stg(Roop, offset.constant_or_zero(), Ridx, base);
4410   }
4411 }
4412 
4413 // Store NULL oop to heap.
4414 void MacroAssembler::store_heap_oop_null(Register zero, RegisterOrConstant offset, Register base) {
4415   Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0;
4416   if (UseCompressedOops) {
4417     z_st(zero, offset.constant_or_zero(), Ridx, base);
4418   } else {
4419     z_stg(zero, offset.constant_or_zero(), Ridx, base);
4420   }
4421 }
4422 
4423 //-------------------------------------------------
4424 // Encode compressed oop. Generally usable encoder.
4425 //-------------------------------------------------
4426 // Rsrc - contains regular oop on entry. It remains unchanged.
4427 // Rdst - contains compressed oop on exit.
4428 // Rdst and Rsrc may indicate same register, in which case Rsrc does not remain unchanged.
4429 //
4430 // Rdst must not indicate scratch register Z_R1 (Z_R1_scratch) for functionality.
4431 // Rdst should not indicate scratch register Z_R0 (Z_R0_scratch) for performance.
4432 //
4433 // only32bitValid is set, if later code only uses the lower 32 bits. In this
4434 // case we must not fix the upper 32 bits.
4435 void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL,
4436                                  Register Rbase, int pow2_offset, bool only32bitValid) {
4437 
4438   const address oop_base  = Universe::narrow_oop_base();
4439   const int     oop_shift = Universe::narrow_oop_shift();
4440   const bool    disjoint  = Universe::narrow_oop_base_disjoint();
4441 
4442   assert(UseCompressedOops, "must be on to call this method");
4443   assert(Universe::heap() != NULL, "java heap must be initialized to call this encoder");
4444   assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");
4445 
4446   if (disjoint || (oop_base == NULL)) {
4447     BLOCK_COMMENT("cOop encoder zeroBase {");
4448     if (oop_shift == 0) {
4449       if (oop_base != NULL && !only32bitValid) {
4450         z_llgfr(Rdst, Rsrc); // Clear upper bits in case the register will be decoded again.
4451       } else {
4452         lgr_if_needed(Rdst, Rsrc);
4453       }
4454     } else {
4455       z_srlg(Rdst, Rsrc, oop_shift);
4456       if (oop_base != NULL && !only32bitValid) {
4457         z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4458       }
4459     }
4460     BLOCK_COMMENT("} cOop encoder zeroBase");
4461     return;
4462   }
4463 
4464   bool used_R0 = false;
4465   bool used_R1 = false;
4466 
4467   BLOCK_COMMENT("cOop encoder general {");
4468   assert_different_registers(Rdst, Z_R1);
4469   assert_different_registers(Rsrc, Rbase);
4470   if (maybeNULL) {
4471     Label done;
4472     // We reorder shifting and subtracting, so that we can compare
4473     // and shift in parallel:
4474     //
4475     // cycle 0:  potential LoadN, base = <const>
4476     // cycle 1:  base = !base     dst = src >> 3,    cmp cr = (src != 0)
4477     // cycle 2:  if (cr) br,      dst = dst + base + offset
4478 
4479     // Get oop_base components.
4480     if (pow2_offset == -1) {
4481       if (Rdst == Rbase) {
4482         if (Rdst == Z_R1 || Rsrc == Z_R1) {
4483           Rbase = Z_R0;
4484           used_R0 = true;
4485         } else {
4486           Rdst = Z_R1;
4487           used_R1 = true;
4488         }
4489       }
4490       if (Rbase == Z_R1) {
4491         used_R1 = true;
4492       }
4493       pow2_offset = get_oop_base_complement(Rbase, ((uint64_t)(intptr_t)oop_base) >> oop_shift);
4494     }
4495     assert_different_registers(Rdst, Rbase);
4496 
4497     // Check for NULL oop (must be left alone) and shift.
4498     if (oop_shift != 0) {  // Shift out alignment bits
4499       if (((intptr_t)oop_base&0xc000000000000000L) == 0L) { // We are sure: no single address will have the leftmost bit set.
4500         z_srag(Rdst, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4501       } else {
4502         z_srlg(Rdst, Rsrc, oop_shift);
4503         z_ltgr(Rsrc, Rsrc);  // This is the recommended way of testing for zero.
4504         // This probably is faster, as it does not write a register. No!
4505         // z_cghi(Rsrc, 0);
4506       }
4507     } else {
4508       z_ltgr(Rdst, Rsrc);   // Move NULL to result register.
4509     }
4510     z_bre(done);
4511 
4512     // Subtract oop_base components.
4513     if ((Rdst == Z_R0) || (Rbase == Z_R0)) {
4514       z_algr(Rdst, Rbase);
4515       if (pow2_offset != 0) { add2reg(Rdst, pow2_offset); }
4516     } else {
4517       add2reg_with_index(Rdst, pow2_offset, Rbase, Rdst);
4518     }
4519     if (!only32bitValid) {
4520       z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4521     }
4522     bind(done);
4523 
4524   } else {  // not null
4525     // Get oop_base components.
4526     if (pow2_offset == -1) {
4527       pow2_offset = get_oop_base_complement(Rbase, (uint64_t)(intptr_t)oop_base);
4528     }
4529 
4530     // Subtract oop_base components and shift.
4531     if (Rdst == Z_R0 || Rsrc == Z_R0 || Rbase == Z_R0) {
4532       // Don't use lay instruction.
4533       if (Rdst == Rsrc) {
4534         z_algr(Rdst, Rbase);
4535       } else {
4536         lgr_if_needed(Rdst, Rbase);
4537         z_algr(Rdst, Rsrc);
4538       }
4539       if (pow2_offset != 0) add2reg(Rdst, pow2_offset);
4540     } else {
4541       add2reg_with_index(Rdst, pow2_offset, Rbase, Rsrc);
4542     }
4543     if (oop_shift != 0) {   // Shift out alignment bits.
4544       z_srlg(Rdst, Rdst, oop_shift);
4545     }
4546     if (!only32bitValid) {
4547       z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4548     }
4549   }
4550 #ifdef ASSERT
4551   if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb01bUL, 2); }
4552   if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb02bUL, 2); }
4553 #endif
4554   BLOCK_COMMENT("} cOop encoder general");
4555 }
4556 
4557 //-------------------------------------------------
4558 // decode compressed oop. Generally usable decoder.
4559 //-------------------------------------------------
4560 // Rsrc - contains compressed oop on entry.
4561 // Rdst - contains regular oop on exit.
4562 // Rdst and Rsrc may indicate same register.
4563 // Rdst must not be the same register as Rbase, if Rbase was preloaded (before call).
4564 // Rdst can be the same register as Rbase. Then, either Z_R0 or Z_R1 must be available as scratch.
4565 // Rbase - register to use for the base
4566 // pow2_offset - offset of base to nice value. If -1, base must be loaded.
4567 // For performance, it is good to
4568 //  - avoid Z_R0 for any of the argument registers.
4569 //  - keep Rdst and Rsrc distinct from Rbase. Rdst == Rsrc is ok for performance.
4570 //  - avoid Z_R1 for Rdst if Rdst == Rbase.
4571 void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, Register Rbase, int pow2_offset) {
4572 
4573   const address oop_base  = Universe::narrow_oop_base();
4574   const int     oop_shift = Universe::narrow_oop_shift();
4575   const bool    disjoint  = Universe::narrow_oop_base_disjoint();
4576 
4577   assert(UseCompressedOops, "must be on to call this method");
4578   assert(Universe::heap() != NULL, "java heap must be initialized to call this decoder");
4579   assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes),
4580          "cOop encoder detected bad shift");
4581 
4582   // cOops are always loaded zero-extended from memory. No explicit zero-extension necessary.
4583 
4584   if (oop_base != NULL) {
4585     unsigned int oop_base_hl = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xffff;
4586     unsigned int oop_base_hh = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 48)) & 0xffff;
4587     unsigned int oop_base_hf = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xFFFFffff;
4588     if (disjoint && (oop_base_hl == 0 || oop_base_hh == 0)) {
4589       BLOCK_COMMENT("cOop decoder disjointBase {");
4590       // We do not need to load the base. Instead, we can install the upper bits
4591       // with an OR instead of an ADD.
4592       Label done;
4593 
4594       // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.
4595       if (maybeNULL) {  // NULL ptr must be preserved!
4596         z_slag(Rdst, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4597         z_bre(done);
4598       } else {
4599         z_sllg(Rdst, Rsrc, oop_shift);  // Logical shift leaves condition code alone.
4600       }
4601       if ((oop_base_hl != 0) && (oop_base_hh != 0)) {
4602         z_oihf(Rdst, oop_base_hf);
4603       } else if (oop_base_hl != 0) {
4604         z_oihl(Rdst, oop_base_hl);
4605       } else {
4606         assert(oop_base_hh != 0, "not heapbased mode");
4607         z_oihh(Rdst, oop_base_hh);
4608       }
4609       bind(done);
4610       BLOCK_COMMENT("} cOop decoder disjointBase");
4611     } else {
4612       BLOCK_COMMENT("cOop decoder general {");
4613       // There are three decode steps:
4614       //   scale oop offset (shift left)
4615       //   get base (in reg) and pow2_offset (constant)
4616       //   add base, pow2_offset, and oop offset
4617       // The following register overlap situations may exist:
4618       // Rdst == Rsrc,  Rbase any other
4619       //   not a problem. Scaling in-place leaves Rbase undisturbed.
4620       //   Loading Rbase does not impact the scaled offset.
4621       // Rdst == Rbase, Rsrc  any other
4622       //   scaling would destroy a possibly preloaded Rbase. Loading Rbase
4623       //   would destroy the scaled offset.
4624       //   Remedy: use Rdst_tmp if Rbase has been preloaded.
4625       //           use Rbase_tmp if base has to be loaded.
4626       // Rsrc == Rbase, Rdst  any other
4627       //   Only possible without preloaded Rbase.
4628       //   Loading Rbase does not destroy compressed oop because it was scaled into Rdst before.
4629       // Rsrc == Rbase, Rdst == Rbase
4630       //   Only possible without preloaded Rbase.
4631       //   Loading Rbase would destroy compressed oop. Scaling in-place is ok.
4632       //   Remedy: use Rbase_tmp.
4633       //
4634       Label    done;
4635       Register Rdst_tmp       = Rdst;
4636       Register Rbase_tmp      = Rbase;
4637       bool     used_R0        = false;
4638       bool     used_R1        = false;
4639       bool     base_preloaded = pow2_offset >= 0;
4640       guarantee(!(base_preloaded && (Rsrc == Rbase)), "Register clash, check caller");
4641       assert(oop_shift != 0, "room for optimization");
4642 
4643       // Check if we need to use scratch registers.
4644       if (Rdst == Rbase) {
4645         assert(!(((Rdst == Z_R0) && (Rsrc == Z_R1)) || ((Rdst == Z_R1) && (Rsrc == Z_R0))), "need a scratch reg");
4646         if (Rdst != Rsrc) {
4647           if (base_preloaded) { Rdst_tmp  = (Rdst == Z_R1) ? Z_R0 : Z_R1; }
4648           else                { Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; }
4649         } else {
4650           Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1;
4651         }
4652       }
4653       if (base_preloaded) lgr_if_needed(Rbase_tmp, Rbase);
4654 
4655       // Scale oop and check for NULL.
4656       // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.
4657       if (maybeNULL) {  // NULL ptr must be preserved!
4658         z_slag(Rdst_tmp, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4659         z_bre(done);
4660       } else {
4661         z_sllg(Rdst_tmp, Rsrc, oop_shift);  // Logical shift leaves condition code alone.
4662       }
4663 
4664       // Get oop_base components.
4665       if (!base_preloaded) {
4666         pow2_offset = get_oop_base(Rbase_tmp, (uint64_t)(intptr_t)oop_base);
4667       }
4668 
4669       // Add up all components.
4670       if ((Rbase_tmp == Z_R0) || (Rdst_tmp == Z_R0)) {
4671         z_algr(Rdst_tmp, Rbase_tmp);
4672         if (pow2_offset != 0) { add2reg(Rdst_tmp, pow2_offset); }
4673       } else {
4674         add2reg_with_index(Rdst_tmp, pow2_offset, Rbase_tmp, Rdst_tmp);
4675       }
4676 
4677       bind(done);
4678       lgr_if_needed(Rdst, Rdst_tmp);
4679 #ifdef ASSERT
4680       if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb03bUL, 2); }
4681       if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb04bUL, 2); }
4682 #endif
4683       BLOCK_COMMENT("} cOop decoder general");
4684     }
4685   } else {
4686     BLOCK_COMMENT("cOop decoder zeroBase {");
4687     if (oop_shift == 0) {
4688       lgr_if_needed(Rdst, Rsrc);
4689     } else {
4690       z_sllg(Rdst, Rsrc, oop_shift);
4691     }
4692     BLOCK_COMMENT("} cOop decoder zeroBase");
4693   }
4694 }
4695 
4696 // ((OopHandle)result).resolve();
4697 void MacroAssembler::resolve_oop_handle(Register result) {
4698   // OopHandle::resolve is an indirection.
4699   z_lg(result, 0, result);
4700 }
4701 
4702 void MacroAssembler::load_mirror(Register mirror, Register method) {
4703   mem2reg_opt(mirror, Address(method, Method::const_offset()));
4704   mem2reg_opt(mirror, Address(mirror, ConstMethod::constants_offset()));
4705   mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes()));
4706   mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset()));
4707   resolve_oop_handle(mirror);
4708 }
4709 
4710 //---------------------------------------------------------------
4711 //---  Operations on arrays.
4712 //---------------------------------------------------------------
4713 
4714 // Compiler ensures base is doubleword aligned and cnt is #doublewords.
4715 // Emitter does not KILL cnt and base arguments, since they need to be copied to
4716 // work registers anyway.
4717 // Actually, only r0, r1, and r5 are killed.
4718 unsigned int MacroAssembler::Clear_Array(Register cnt_arg, Register base_pointer_arg, Register src_addr, Register src_len) {
4719   // Src_addr is evenReg.
4720   // Src_len is odd_Reg.
4721 
4722   int      block_start = offset();
4723   Register tmp_reg  = src_len; // Holds target instr addr for EX.
4724   Register dst_len  = Z_R1;    // Holds dst len  for MVCLE.
4725   Register dst_addr = Z_R0;    // Holds dst addr for MVCLE.
4726 
4727   Label doXC, doMVCLE, done;
4728 
4729   BLOCK_COMMENT("Clear_Array {");
4730 
4731   // Check for zero len and convert to long.
4732   z_ltgfr(src_len, cnt_arg);      // Remember casted value for doSTG case.
4733   z_bre(done);                    // Nothing to do if len == 0.
4734 
4735   // Prefetch data to be cleared.
4736   if (VM_Version::has_Prefetch()) {
4737     z_pfd(0x02,   0, Z_R0, base_pointer_arg);
4738     z_pfd(0x02, 256, Z_R0, base_pointer_arg);
4739   }
4740 
4741   z_sllg(dst_len, src_len, 3);    // #bytes to clear.
4742   z_cghi(src_len, 32);            // Check for len <= 256 bytes (<=32 DW).
4743   z_brnh(doXC);                   // If so, use executed XC to clear.
4744 
4745   // MVCLE: initialize long arrays (general case).
4746   bind(doMVCLE);
4747   z_lgr(dst_addr, base_pointer_arg);
4748   clear_reg(src_len, true, false); // Src len of MVCLE is zero.
4749 
4750   MacroAssembler::move_long_ext(dst_addr, src_addr, 0);
4751   z_bru(done);
4752 
4753   // XC: initialize short arrays.
4754   Label XC_template; // Instr template, never exec directly!
4755     bind(XC_template);
4756     z_xc(0,0,base_pointer_arg,0,base_pointer_arg);
4757 
4758   bind(doXC);
4759     add2reg(dst_len, -1);             // Get #bytes-1 for EXECUTE.
4760     if (VM_Version::has_ExecuteExtensions()) {
4761       z_exrl(dst_len, XC_template);   // Execute XC with var. len.
4762     } else {
4763       z_larl(tmp_reg, XC_template);
4764       z_ex(dst_len,0,Z_R0,tmp_reg);   // Execute XC with var. len.
4765     }
4766     // z_bru(done);      // fallthru
4767 
4768   bind(done);
4769 
4770   BLOCK_COMMENT("} Clear_Array");
4771 
4772   int block_end = offset();
4773   return block_end - block_start;
4774 }
4775 
4776 // Compiler ensures base is doubleword aligned and cnt is count of doublewords.
4777 // Emitter does not KILL any arguments nor work registers.
4778 // Emitter generates up to 16 XC instructions, depending on the array length.
4779 unsigned int MacroAssembler::Clear_Array_Const(long cnt, Register base) {
4780   int  block_start    = offset();
4781   int  off;
4782   int  lineSize_Bytes = AllocatePrefetchStepSize;
4783   int  lineSize_DW    = AllocatePrefetchStepSize>>LogBytesPerWord;
4784   bool doPrefetch     = VM_Version::has_Prefetch();
4785   int  XC_maxlen      = 256;
4786   int  numXCInstr     = cnt > 0 ? (cnt*BytesPerWord-1)/XC_maxlen+1 : 0;
4787 
4788   BLOCK_COMMENT("Clear_Array_Const {");
4789   assert(cnt*BytesPerWord <= 4096, "ClearArrayConst can handle 4k only");
4790 
4791   // Do less prefetching for very short arrays.
4792   if (numXCInstr > 0) {
4793     // Prefetch only some cache lines, then begin clearing.
4794     if (doPrefetch) {
4795       if (cnt*BytesPerWord <= lineSize_Bytes/4) {  // If less than 1/4 of a cache line to clear,
4796         z_pfd(0x02, 0, Z_R0, base);                // prefetch just the first cache line.
4797       } else {
4798         assert(XC_maxlen == lineSize_Bytes, "ClearArrayConst needs 256B cache lines");
4799         for (off = 0; (off < AllocatePrefetchLines) && (off <= numXCInstr); off ++) {
4800           z_pfd(0x02, off*lineSize_Bytes, Z_R0, base);
4801         }
4802       }
4803     }
4804 
4805     for (off=0; off<(numXCInstr-1); off++) {
4806       z_xc(off*XC_maxlen, XC_maxlen-1, base, off*XC_maxlen, base);
4807 
4808       // Prefetch some cache lines in advance.
4809       if (doPrefetch && (off <= numXCInstr-AllocatePrefetchLines)) {
4810         z_pfd(0x02, (off+AllocatePrefetchLines)*lineSize_Bytes, Z_R0, base);
4811       }
4812     }
4813     if (off*XC_maxlen < cnt*BytesPerWord) {
4814       z_xc(off*XC_maxlen, (cnt*BytesPerWord-off*XC_maxlen)-1, base, off*XC_maxlen, base);
4815     }
4816   }
4817   BLOCK_COMMENT("} Clear_Array_Const");
4818 
4819   int block_end = offset();
4820   return block_end - block_start;
4821 }
4822 
4823 // Compiler ensures base is doubleword aligned and cnt is #doublewords.
4824 // Emitter does not KILL cnt and base arguments, since they need to be copied to
4825 // work registers anyway.
4826 // Actually, only r0, r1, r4, and r5 (which are work registers) are killed.
4827 //
4828 // For very large arrays, exploit MVCLE H/W support.
4829 // MVCLE instruction automatically exploits H/W-optimized page mover.
4830 // - Bytes up to next page boundary are cleared with a series of XC to self.
4831 // - All full pages are cleared with the page mover H/W assist.
4832 // - Remaining bytes are again cleared by a series of XC to self.
4833 //
4834 unsigned int MacroAssembler::Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register src_addr, Register src_len) {
4835   // Src_addr is evenReg.
4836   // Src_len is odd_Reg.
4837 
4838   int      block_start = offset();
4839   Register dst_len  = Z_R1;      // Holds dst len  for MVCLE.
4840   Register dst_addr = Z_R0;      // Holds dst addr for MVCLE.
4841 
4842   BLOCK_COMMENT("Clear_Array_Const_Big {");
4843 
4844   // Get len to clear.
4845   load_const_optimized(dst_len, (long)cnt*8L);  // in Bytes = #DW*8
4846 
4847   // Prepare other args to MVCLE.
4848   z_lgr(dst_addr, base_pointer_arg);
4849   // Indicate unused result.
4850   (void) clear_reg(src_len, true, false);  // Src len of MVCLE is zero.
4851 
4852   // Clear.
4853   MacroAssembler::move_long_ext(dst_addr, src_addr, 0);
4854   BLOCK_COMMENT("} Clear_Array_Const_Big");
4855 
4856   int block_end = offset();
4857   return block_end - block_start;
4858 }
4859 
4860 // Allocator.
4861 unsigned int MacroAssembler::CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg,
4862                                                            Register cnt_reg,
4863                                                            Register tmp1_reg, Register tmp2_reg) {
4864   // Tmp1 is oddReg.
4865   // Tmp2 is evenReg.
4866 
4867   int block_start = offset();
4868   Label doMVC, doMVCLE, done, MVC_template;
4869 
4870   BLOCK_COMMENT("CopyRawMemory_AlignedDisjoint {");
4871 
4872   // Check for zero len and convert to long.
4873   z_ltgfr(cnt_reg, cnt_reg);      // Remember casted value for doSTG case.
4874   z_bre(done);                    // Nothing to do if len == 0.
4875 
4876   z_sllg(Z_R1, cnt_reg, 3);       // Dst len in bytes. calc early to have the result ready.
4877 
4878   z_cghi(cnt_reg, 32);            // Check for len <= 256 bytes (<=32 DW).
4879   z_brnh(doMVC);                  // If so, use executed MVC to clear.
4880 
4881   bind(doMVCLE);                  // A lot of data (more than 256 bytes).
4882   // Prep dest reg pair.
4883   z_lgr(Z_R0, dst_reg);           // dst addr
4884   // Dst len already in Z_R1.
4885   // Prep src reg pair.
4886   z_lgr(tmp2_reg, src_reg);       // src addr
4887   z_lgr(tmp1_reg, Z_R1);          // Src len same as dst len.
4888 
4889   // Do the copy.
4890   move_long_ext(Z_R0, tmp2_reg, 0xb0); // Bypass cache.
4891   z_bru(done);                         // All done.
4892 
4893   bind(MVC_template);             // Just some data (not more than 256 bytes).
4894   z_mvc(0, 0, dst_reg, 0, src_reg);
4895 
4896   bind(doMVC);
4897 
4898   if (VM_Version::has_ExecuteExtensions()) {
4899     add2reg(Z_R1, -1);
4900   } else {
4901     add2reg(tmp1_reg, -1, Z_R1);
4902     z_larl(Z_R1, MVC_template);
4903   }
4904 
4905   if (VM_Version::has_Prefetch()) {
4906     z_pfd(1,  0,Z_R0,src_reg);
4907     z_pfd(2,  0,Z_R0,dst_reg);
4908     //    z_pfd(1,256,Z_R0,src_reg);    // Assume very short copy.
4909     //    z_pfd(2,256,Z_R0,dst_reg);
4910   }
4911 
4912   if (VM_Version::has_ExecuteExtensions()) {
4913     z_exrl(Z_R1, MVC_template);
4914   } else {
4915     z_ex(tmp1_reg, 0, Z_R0, Z_R1);
4916   }
4917 
4918   bind(done);
4919 
4920   BLOCK_COMMENT("} CopyRawMemory_AlignedDisjoint");
4921 
4922   int block_end = offset();
4923   return block_end - block_start;
4924 }
4925 
4926 //------------------------------------------------------
4927 //   Special String Intrinsics. Implementation
4928 //------------------------------------------------------
4929 
4930 // Intrinsics for CompactStrings
4931 
4932 // Compress char[] to byte[].
4933 //   Restores: src, dst
4934 //   Uses:     cnt
4935 //   Kills:    tmp, Z_R0, Z_R1.
4936 //   Early clobber: result.
4937 // Note:
4938 //   cnt is signed int. Do not rely on high word!
4939 //       counts # characters, not bytes.
4940 // The result is the number of characters copied before the first incompatible character was found.
4941 // If precise is true, the processing stops exactly at this point. Otherwise, the result may be off
4942 // by a few bytes. The result always indicates the number of copied characters.
4943 // When used as a character index, the returned value points to the first incompatible character.
4944 //
4945 // Note: Does not behave exactly like package private StringUTF16 compress java implementation in case of failure:
4946 // - Different number of characters may have been written to dead array (if precise is false).
4947 // - Returns a number <cnt instead of 0. (Result gets compared with cnt.)
4948 unsigned int MacroAssembler::string_compress(Register result, Register src, Register dst, Register cnt,
4949                                              Register tmp,    bool precise) {
4950   assert_different_registers(Z_R0, Z_R1, result, src, dst, cnt, tmp);
4951 
4952   if (precise) {
4953     BLOCK_COMMENT("encode_iso_array {");
4954   } else {
4955     BLOCK_COMMENT("string_compress {");
4956   }
4957   int  block_start = offset();
4958 
4959   Register       Rsrc  = src;
4960   Register       Rdst  = dst;
4961   Register       Rix   = tmp;
4962   Register       Rcnt  = cnt;
4963   Register       Rmask = result;  // holds incompatibility check mask until result value is stored.
4964   Label          ScalarShortcut, AllDone;
4965 
4966   z_iilf(Rmask, 0xFF00FF00);
4967   z_iihf(Rmask, 0xFF00FF00);
4968 
4969 #if 0  // Sacrifice shortcuts for code compactness
4970   {
4971     //---<  shortcuts for short strings (very frequent)   >---
4972     //   Strings with 4 and 8 characters were fond to occur very frequently.
4973     //   Therefore, we handle them right away with minimal overhead.
4974     Label     skipShortcut, skip4Shortcut, skip8Shortcut;
4975     Register  Rout = Z_R0;
4976     z_chi(Rcnt, 4);
4977     z_brne(skip4Shortcut);                 // 4 characters are very frequent
4978       z_lg(Z_R0, 0, Rsrc);                 // Treat exactly 4 characters specially.
4979       if (VM_Version::has_DistinctOpnds()) {
4980         Rout = Z_R0;
4981         z_ngrk(Rix, Z_R0, Rmask);
4982       } else {
4983         Rout = Rix;
4984         z_lgr(Rix, Z_R0);
4985         z_ngr(Z_R0, Rmask);
4986       }
4987       z_brnz(skipShortcut);
4988       z_stcmh(Rout, 5, 0, Rdst);
4989       z_stcm(Rout,  5, 2, Rdst);
4990       z_lgfr(result, Rcnt);
4991       z_bru(AllDone);
4992     bind(skip4Shortcut);
4993 
4994     z_chi(Rcnt, 8);
4995     z_brne(skip8Shortcut);                 // There's more to do...
4996       z_lmg(Z_R0, Z_R1, 0, Rsrc);          // Treat exactly 8 characters specially.
4997       if (VM_Version::has_DistinctOpnds()) {
4998         Rout = Z_R0;
4999         z_ogrk(Rix, Z_R0, Z_R1);
5000         z_ngr(Rix, Rmask);
5001       } else {
5002         Rout = Rix;
5003         z_lgr(Rix, Z_R0);
5004         z_ogr(Z_R0, Z_R1);
5005         z_ngr(Z_R0, Rmask);
5006       }
5007       z_brnz(skipShortcut);
5008       z_stcmh(Rout, 5, 0, Rdst);
5009       z_stcm(Rout,  5, 2, Rdst);
5010       z_stcmh(Z_R1, 5, 4, Rdst);
5011       z_stcm(Z_R1,  5, 6, Rdst);
5012       z_lgfr(result, Rcnt);
5013       z_bru(AllDone);
5014 
5015     bind(skip8Shortcut);
5016     clear_reg(Z_R0, true, false);          // #characters already processed (none). Precond for scalar loop.
5017     z_brl(ScalarShortcut);                 // Just a few characters
5018 
5019     bind(skipShortcut);
5020   }
5021 #endif
5022   clear_reg(Z_R0);                         // make sure register is properly initialized.
5023 
5024   if (VM_Version::has_VectorFacility()) {
5025     const int  min_vcnt     = 32;          // Minimum #characters required to use vector instructions.
5026                                            // Otherwise just do nothing in vector mode.
5027                                            // Must be multiple of 2*(vector register length in chars (8 HW = 128 bits)).
5028     const int  log_min_vcnt = exact_log2(min_vcnt);
5029     Label      VectorLoop, VectorDone, VectorBreak;
5030 
5031     VectorRegister Vtmp1      = Z_V16;
5032     VectorRegister Vtmp2      = Z_V17;
5033     VectorRegister Vmask      = Z_V18;
5034     VectorRegister Vzero      = Z_V19;
5035     VectorRegister Vsrc_first = Z_V20;
5036     VectorRegister Vsrc_last  = Z_V23;
5037 
5038     assert((Vsrc_last->encoding() - Vsrc_first->encoding() + 1) == min_vcnt/8, "logic error");
5039     assert(VM_Version::has_DistinctOpnds(), "Assumption when has_VectorFacility()");
5040     z_srak(Rix, Rcnt, log_min_vcnt);       // # vector loop iterations
5041     z_brz(VectorDone);                     // not enough data for vector loop
5042 
5043     z_vzero(Vzero);                        // all zeroes
5044     z_vgmh(Vmask, 0, 7);                   // generate 0xff00 mask for all 2-byte elements
5045     z_sllg(Z_R0, Rix, log_min_vcnt);       // remember #chars that will be processed by vector loop
5046 
5047     bind(VectorLoop);
5048       z_vlm(Vsrc_first, Vsrc_last, 0, Rsrc);
5049       add2reg(Rsrc, min_vcnt*2);
5050 
5051       //---<  check for incompatible character  >---
5052       z_vo(Vtmp1, Z_V20, Z_V21);
5053       z_vo(Vtmp2, Z_V22, Z_V23);
5054       z_vo(Vtmp1, Vtmp1, Vtmp2);
5055       z_vn(Vtmp1, Vtmp1, Vmask);
5056       z_vceqhs(Vtmp1, Vtmp1, Vzero);       // high half of all chars must be zero for successful compress.
5057       z_bvnt(VectorBreak);                 // break vector loop if not all vector elements compare eq -> incompatible character found.
5058                                            // re-process data from current iteration in break handler.
5059 
5060       //---<  pack & store characters  >---
5061       z_vpkh(Vtmp1, Z_V20, Z_V21);         // pack (src1, src2) -> tmp1
5062       z_vpkh(Vtmp2, Z_V22, Z_V23);         // pack (src3, src4) -> tmp2
5063       z_vstm(Vtmp1, Vtmp2, 0, Rdst);       // store packed string
5064       add2reg(Rdst, min_vcnt);
5065 
5066       z_brct(Rix, VectorLoop);
5067 
5068     z_bru(VectorDone);
5069 
5070     bind(VectorBreak);
5071       add2reg(Rsrc, -min_vcnt*2);          // Fix Rsrc. Rsrc was already updated, but Rdst and Rix are not.
5072       z_sll(Rix, log_min_vcnt);            // # chars processed so far in VectorLoop, excl. current iteration.
5073       z_sr(Z_R0, Rix);                     // correct # chars processed in total.
5074 
5075     bind(VectorDone);
5076   }
5077 
5078   {
5079     const int  min_cnt     =  8;           // Minimum #characters required to use unrolled loop.
5080                                            // Otherwise just do nothing in unrolled loop.
5081                                            // Must be multiple of 8.
5082     const int  log_min_cnt = exact_log2(min_cnt);
5083     Label      UnrolledLoop, UnrolledDone, UnrolledBreak;
5084 
5085     if (VM_Version::has_DistinctOpnds()) {
5086       z_srk(Rix, Rcnt, Z_R0);              // remaining # chars to compress in unrolled loop
5087     } else {
5088       z_lr(Rix, Rcnt);
5089       z_sr(Rix, Z_R0);
5090     }
5091     z_sra(Rix, log_min_cnt);             // unrolled loop count
5092     z_brz(UnrolledDone);
5093 
5094     bind(UnrolledLoop);
5095       z_lmg(Z_R0, Z_R1, 0, Rsrc);
5096       if (precise) {
5097         z_ogr(Z_R1, Z_R0);                 // check all 8 chars for incompatibility
5098         z_ngr(Z_R1, Rmask);
5099         z_brnz(UnrolledBreak);
5100 
5101         z_lg(Z_R1, 8, Rsrc);               // reload destroyed register
5102         z_stcmh(Z_R0, 5, 0, Rdst);
5103         z_stcm(Z_R0,  5, 2, Rdst);
5104       } else {
5105         z_stcmh(Z_R0, 5, 0, Rdst);
5106         z_stcm(Z_R0,  5, 2, Rdst);
5107 
5108         z_ogr(Z_R0, Z_R1);
5109         z_ngr(Z_R0, Rmask);
5110         z_brnz(UnrolledBreak);
5111       }
5112       z_stcmh(Z_R1, 5, 4, Rdst);
5113       z_stcm(Z_R1,  5, 6, Rdst);
5114 
5115       add2reg(Rsrc, min_cnt*2);
5116       add2reg(Rdst, min_cnt);
5117       z_brct(Rix, UnrolledLoop);
5118 
5119     z_lgfr(Z_R0, Rcnt);                    // # chars processed in total after unrolled loop.
5120     z_nilf(Z_R0, ~(min_cnt-1));
5121     z_tmll(Rcnt, min_cnt-1);
5122     z_brnaz(ScalarShortcut);               // if all bits zero, there is nothing left to do for scalar loop.
5123                                            // Rix == 0 in all cases.
5124     z_sllg(Z_R1, Rcnt, 1);                 // # src bytes already processed. Only lower 32 bits are valid!
5125                                            //   Z_R1 contents must be treated as unsigned operand! For huge strings,
5126                                            //   (Rcnt >= 2**30), the value may spill into the sign bit by sllg.
5127     z_lgfr(result, Rcnt);                  // all characters processed.
5128     z_slgfr(Rdst, Rcnt);                   // restore ptr
5129     z_slgfr(Rsrc, Z_R1);                   // restore ptr, double the element count for Rsrc restore
5130     z_bru(AllDone);
5131 
5132     bind(UnrolledBreak);
5133     z_lgfr(Z_R0, Rcnt);                    // # chars processed in total after unrolled loop
5134     z_nilf(Z_R0, ~(min_cnt-1));
5135     z_sll(Rix, log_min_cnt);               // # chars not yet processed in UnrolledLoop (due to break), broken iteration not included.
5136     z_sr(Z_R0, Rix);                       // fix # chars processed OK so far.
5137     if (!precise) {
5138       z_lgfr(result, Z_R0);
5139       z_sllg(Z_R1, Z_R0, 1);               // # src bytes already processed. Only lower 32 bits are valid!
5140                                            //   Z_R1 contents must be treated as unsigned operand! For huge strings,
5141                                            //   (Rcnt >= 2**30), the value may spill into the sign bit by sllg.
5142       z_aghi(result, min_cnt/2);           // min_cnt/2 characters have already been written
5143                                            // but ptrs were not updated yet.
5144       z_slgfr(Rdst, Z_R0);                 // restore ptr
5145       z_slgfr(Rsrc, Z_R1);                 // restore ptr, double the element count for Rsrc restore
5146       z_bru(AllDone);
5147     }
5148     bind(UnrolledDone);
5149   }
5150 
5151   {
5152     Label     ScalarLoop, ScalarDone, ScalarBreak;
5153 
5154     bind(ScalarShortcut);
5155     z_ltgfr(result, Rcnt);
5156     z_brz(AllDone);
5157 
5158 #if 0  // Sacrifice shortcuts for code compactness
5159     {
5160       //---<  Special treatment for very short strings (one or two characters)  >---
5161       //   For these strings, we are sure that the above code was skipped.
5162       //   Thus, no registers were modified, register restore is not required.
5163       Label     ScalarDoit, Scalar2Char;
5164       z_chi(Rcnt, 2);
5165       z_brh(ScalarDoit);
5166       z_llh(Z_R1,  0, Z_R0, Rsrc);
5167       z_bre(Scalar2Char);
5168       z_tmll(Z_R1, 0xff00);
5169       z_lghi(result, 0);                   // cnt == 1, first char invalid, no chars successfully processed
5170       z_brnaz(AllDone);
5171       z_stc(Z_R1,  0, Z_R0, Rdst);
5172       z_lghi(result, 1);
5173       z_bru(AllDone);
5174 
5175       bind(Scalar2Char);
5176       z_llh(Z_R0,  2, Z_R0, Rsrc);
5177       z_tmll(Z_R1, 0xff00);
5178       z_lghi(result, 0);                   // cnt == 2, first char invalid, no chars successfully processed
5179       z_brnaz(AllDone);
5180       z_stc(Z_R1,  0, Z_R0, Rdst);
5181       z_tmll(Z_R0, 0xff00);
5182       z_lghi(result, 1);                   // cnt == 2, second char invalid, one char successfully processed
5183       z_brnaz(AllDone);
5184       z_stc(Z_R0,  1, Z_R0, Rdst);
5185       z_lghi(result, 2);
5186       z_bru(AllDone);
5187 
5188       bind(ScalarDoit);
5189     }
5190 #endif
5191 
5192     if (VM_Version::has_DistinctOpnds()) {
5193       z_srk(Rix, Rcnt, Z_R0);              // remaining # chars to compress in unrolled loop
5194     } else {
5195       z_lr(Rix, Rcnt);
5196       z_sr(Rix, Z_R0);
5197     }
5198     z_lgfr(result, Rcnt);                  // # processed characters (if all runs ok).
5199     z_brz(ScalarDone);                     // uses CC from Rix calculation
5200 
5201     bind(ScalarLoop);
5202       z_llh(Z_R1, 0, Z_R0, Rsrc);
5203       z_tmll(Z_R1, 0xff00);
5204       z_brnaz(ScalarBreak);
5205       z_stc(Z_R1, 0, Z_R0, Rdst);
5206       add2reg(Rsrc, 2);
5207       add2reg(Rdst, 1);
5208       z_brct(Rix, ScalarLoop);
5209 
5210     z_bru(ScalarDone);
5211 
5212     bind(ScalarBreak);
5213     z_sr(result, Rix);
5214 
5215     bind(ScalarDone);
5216     z_sgfr(Rdst, result);                  // restore ptr
5217     z_sgfr(Rsrc, result);                  // restore ptr, double the element count for Rsrc restore
5218     z_sgfr(Rsrc, result);
5219   }
5220   bind(AllDone);
5221 
5222   if (precise) {
5223     BLOCK_COMMENT("} encode_iso_array");
5224   } else {
5225     BLOCK_COMMENT("} string_compress");
5226   }
5227   return offset() - block_start;
5228 }
5229 
5230 // Inflate byte[] to char[].
5231 unsigned int MacroAssembler::string_inflate_trot(Register src, Register dst, Register cnt, Register tmp) {
5232   int block_start = offset();
5233 
5234   BLOCK_COMMENT("string_inflate {");
5235 
5236   Register stop_char = Z_R0;
5237   Register table     = Z_R1;
5238   Register src_addr  = tmp;
5239 
5240   assert_different_registers(Z_R0, Z_R1, tmp, src, dst, cnt);
5241   assert(dst->encoding()%2 == 0, "must be even reg");
5242   assert(cnt->encoding()%2 == 1, "must be odd reg");
5243   assert(cnt->encoding() - dst->encoding() == 1, "must be even/odd pair");
5244 
5245   StubRoutines::zarch::generate_load_trot_table_addr(this, table);  // kills Z_R0 (if ASSERT)
5246   clear_reg(stop_char);  // Stop character. Not used here, but initialized to have a defined value.
5247   lgr_if_needed(src_addr, src);
5248   z_llgfr(cnt, cnt);     // # src characters, must be a positive simm32.
5249 
5250   translate_ot(dst, src_addr, /* mask = */ 0x0001);
5251 
5252   BLOCK_COMMENT("} string_inflate");
5253 
5254   return offset() - block_start;
5255 }
5256 
5257 // Inflate byte[] to char[].
5258 //   Restores: src, dst
5259 //   Uses:     cnt
5260 //   Kills:    tmp, Z_R0, Z_R1.
5261 // Note:
5262 //   cnt is signed int. Do not rely on high word!
5263 //       counts # characters, not bytes.
5264 unsigned int MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp) {
5265   assert_different_registers(Z_R0, Z_R1, src, dst, cnt, tmp);
5266 
5267   BLOCK_COMMENT("string_inflate {");
5268   int block_start = offset();
5269 
5270   Register   Rcnt = cnt;   // # characters (src: bytes, dst: char (2-byte)), remaining after current loop.
5271   Register   Rix  = tmp;   // loop index
5272   Register   Rsrc = src;   // addr(src array)
5273   Register   Rdst = dst;   // addr(dst array)
5274   Label      ScalarShortcut, AllDone;
5275 
5276 #if 0  // Sacrifice shortcuts for code compactness
5277   {
5278     //---<  shortcuts for short strings (very frequent)   >---
5279     Label   skipShortcut, skip4Shortcut;
5280     z_ltr(Rcnt, Rcnt);                     // absolutely nothing to do for strings of len == 0.
5281     z_brz(AllDone);
5282     clear_reg(Z_R0);                       // make sure registers are properly initialized.
5283     clear_reg(Z_R1);
5284     z_chi(Rcnt, 4);
5285     z_brne(skip4Shortcut);                 // 4 characters are very frequent
5286       z_icm(Z_R0, 5,    0, Rsrc);          // Treat exactly 4 characters specially.
5287       z_icm(Z_R1, 5,    2, Rsrc);
5288       z_stm(Z_R0, Z_R1, 0, Rdst);
5289       z_bru(AllDone);
5290     bind(skip4Shortcut);
5291 
5292     z_chi(Rcnt, 8);
5293     z_brh(skipShortcut);                   // There's a lot to do...
5294     z_lgfr(Z_R0, Rcnt);                    // remaining #characters (<= 8). Precond for scalar loop.
5295                                            // This does not destroy the "register cleared" state of Z_R0.
5296     z_brl(ScalarShortcut);                 // Just a few characters
5297       z_icmh(Z_R0, 5, 0, Rsrc);            // Treat exactly 8 characters specially.
5298       z_icmh(Z_R1, 5, 4, Rsrc);
5299       z_icm(Z_R0,  5, 2, Rsrc);
5300       z_icm(Z_R1,  5, 6, Rsrc);
5301       z_stmg(Z_R0, Z_R1, 0, Rdst);
5302       z_bru(AllDone);
5303     bind(skipShortcut);
5304   }
5305 #endif
5306   clear_reg(Z_R0);                         // make sure register is properly initialized.
5307 
5308   if (VM_Version::has_VectorFacility()) {
5309     const int  min_vcnt     = 32;          // Minimum #characters required to use vector instructions.
5310                                            // Otherwise just do nothing in vector mode.
5311                                            // Must be multiple of vector register length (16 bytes = 128 bits).
5312     const int  log_min_vcnt = exact_log2(min_vcnt);
5313     Label      VectorLoop, VectorDone;
5314 
5315     assert(VM_Version::has_DistinctOpnds(), "Assumption when has_VectorFacility()");
5316     z_srak(Rix, Rcnt, log_min_vcnt);       // calculate # vector loop iterations
5317     z_brz(VectorDone);                     // skip if none
5318 
5319     z_sllg(Z_R0, Rix, log_min_vcnt);       // remember #chars that will be processed by vector loop
5320 
5321     bind(VectorLoop);
5322       z_vlm(Z_V20, Z_V21, 0, Rsrc);        // get next 32 characters (single-byte)
5323       add2reg(Rsrc, min_vcnt);
5324 
5325       z_vuplhb(Z_V22, Z_V20);              // V2 <- (expand) V0(high)
5326       z_vupllb(Z_V23, Z_V20);              // V3 <- (expand) V0(low)
5327       z_vuplhb(Z_V24, Z_V21);              // V4 <- (expand) V1(high)
5328       z_vupllb(Z_V25, Z_V21);              // V5 <- (expand) V1(low)
5329       z_vstm(Z_V22, Z_V25, 0, Rdst);       // store next 32 bytes
5330       add2reg(Rdst, min_vcnt*2);
5331 
5332       z_brct(Rix, VectorLoop);
5333 
5334     bind(VectorDone);
5335   }
5336 
5337   const int  min_cnt     =  8;             // Minimum #characters required to use unrolled scalar loop.
5338                                            // Otherwise just do nothing in unrolled scalar mode.
5339                                            // Must be multiple of 8.
5340   {
5341     const int  log_min_cnt = exact_log2(min_cnt);
5342     Label      UnrolledLoop, UnrolledDone;
5343 
5344 
5345     if (VM_Version::has_DistinctOpnds()) {
5346       z_srk(Rix, Rcnt, Z_R0);              // remaining # chars to process in unrolled loop
5347     } else {
5348       z_lr(Rix, Rcnt);
5349       z_sr(Rix, Z_R0);
5350     }
5351     z_sra(Rix, log_min_cnt);               // unrolled loop count
5352     z_brz(UnrolledDone);
5353 
5354     clear_reg(Z_R0);
5355     clear_reg(Z_R1);
5356 
5357     bind(UnrolledLoop);
5358       z_icmh(Z_R0, 5, 0, Rsrc);
5359       z_icmh(Z_R1, 5, 4, Rsrc);
5360       z_icm(Z_R0,  5, 2, Rsrc);
5361       z_icm(Z_R1,  5, 6, Rsrc);
5362       add2reg(Rsrc, min_cnt);
5363 
5364       z_stmg(Z_R0, Z_R1, 0, Rdst);
5365 
5366       add2reg(Rdst, min_cnt*2);
5367       z_brct(Rix, UnrolledLoop);
5368 
5369     bind(UnrolledDone);
5370     z_lgfr(Z_R0, Rcnt);                    // # chars left over after unrolled loop.
5371     z_nilf(Z_R0, min_cnt-1);
5372     z_brnz(ScalarShortcut);                // if zero, there is nothing left to do for scalar loop.
5373                                            // Rix == 0 in all cases.
5374     z_sgfr(Z_R0, Rcnt);                    // negative # characters the ptrs have been advanced previously.
5375     z_agr(Rdst, Z_R0);                     // restore ptr, double the element count for Rdst restore.
5376     z_agr(Rdst, Z_R0);
5377     z_agr(Rsrc, Z_R0);                     // restore ptr.
5378     z_bru(AllDone);
5379   }
5380 
5381   {
5382     bind(ScalarShortcut);
5383     // Z_R0 must contain remaining # characters as 64-bit signed int here.
5384     //      register contents is preserved over scalar processing (for register fixup).
5385 
5386 #if 0  // Sacrifice shortcuts for code compactness
5387     {
5388       Label      ScalarDefault;
5389       z_chi(Rcnt, 2);
5390       z_brh(ScalarDefault);
5391       z_llc(Z_R0,  0, Z_R0, Rsrc);     // 6 bytes
5392       z_sth(Z_R0,  0, Z_R0, Rdst);     // 4 bytes
5393       z_brl(AllDone);
5394       z_llc(Z_R0,  1, Z_R0, Rsrc);     // 6 bytes
5395       z_sth(Z_R0,  2, Z_R0, Rdst);     // 4 bytes
5396       z_bru(AllDone);
5397       bind(ScalarDefault);
5398     }
5399 #endif
5400 
5401     Label   CodeTable;
5402     // Some comments on Rix calculation:
5403     //  - Rcnt is small, therefore no bits shifted out of low word (sll(g) instructions).
5404     //  - high word of both Rix and Rcnt may contain garbage
5405     //  - the final lngfr takes care of that garbage, extending the sign to high word
5406     z_sllg(Rix, Z_R0, 2);                // calculate 10*Rix = (4*Rix + Rix)*2
5407     z_ar(Rix, Z_R0);
5408     z_larl(Z_R1, CodeTable);
5409     z_sll(Rix, 1);
5410     z_lngfr(Rix, Rix);      // ix range: [0..7], after inversion & mult: [-(7*12)..(0*12)].
5411     z_bc(Assembler::bcondAlways, 0, Rix, Z_R1);
5412 
5413     z_llc(Z_R1,  6, Z_R0, Rsrc);  // 6 bytes
5414     z_sth(Z_R1, 12, Z_R0, Rdst);  // 4 bytes
5415 
5416     z_llc(Z_R1,  5, Z_R0, Rsrc);
5417     z_sth(Z_R1, 10, Z_R0, Rdst);
5418 
5419     z_llc(Z_R1,  4, Z_R0, Rsrc);
5420     z_sth(Z_R1,  8, Z_R0, Rdst);
5421 
5422     z_llc(Z_R1,  3, Z_R0, Rsrc);
5423     z_sth(Z_R1,  6, Z_R0, Rdst);
5424 
5425     z_llc(Z_R1,  2, Z_R0, Rsrc);
5426     z_sth(Z_R1,  4, Z_R0, Rdst);
5427 
5428     z_llc(Z_R1,  1, Z_R0, Rsrc);
5429     z_sth(Z_R1,  2, Z_R0, Rdst);
5430 
5431     z_llc(Z_R1,  0, Z_R0, Rsrc);
5432     z_sth(Z_R1,  0, Z_R0, Rdst);
5433     bind(CodeTable);
5434 
5435     z_chi(Rcnt, 8);                        // no fixup for small strings. Rdst, Rsrc were not modified.
5436     z_brl(AllDone);
5437 
5438     z_sgfr(Z_R0, Rcnt);                    // # characters the ptrs have been advanced previously.
5439     z_agr(Rdst, Z_R0);                     // restore ptr, double the element count for Rdst restore.
5440     z_agr(Rdst, Z_R0);
5441     z_agr(Rsrc, Z_R0);                     // restore ptr.
5442   }
5443   bind(AllDone);
5444 
5445   BLOCK_COMMENT("} string_inflate");
5446   return offset() - block_start;
5447 }
5448 
5449 // Inflate byte[] to char[], length known at compile time.
5450 //   Restores: src, dst
5451 //   Kills:    tmp, Z_R0, Z_R1.
5452 // Note:
5453 //   len is signed int. Counts # characters, not bytes.
5454 unsigned int MacroAssembler::string_inflate_const(Register src, Register dst, Register tmp, int len) {
5455   assert_different_registers(Z_R0, Z_R1, src, dst, tmp);
5456 
5457   BLOCK_COMMENT("string_inflate_const {");
5458   int block_start = offset();
5459 
5460   Register   Rix  = tmp;   // loop index
5461   Register   Rsrc = src;   // addr(src array)
5462   Register   Rdst = dst;   // addr(dst array)
5463   Label      ScalarShortcut, AllDone;
5464   int        nprocessed = 0;
5465   int        src_off    = 0;  // compensate for saved (optimized away) ptr advancement.
5466   int        dst_off    = 0;  // compensate for saved (optimized away) ptr advancement.
5467   bool       restore_inputs = false;
5468   bool       workreg_clear  = false;
5469 
5470   if ((len >= 32) && VM_Version::has_VectorFacility()) {
5471     const int  min_vcnt     = 32;          // Minimum #characters required to use vector instructions.
5472                                            // Otherwise just do nothing in vector mode.
5473                                            // Must be multiple of vector register length (16 bytes = 128 bits).
5474     const int  log_min_vcnt = exact_log2(min_vcnt);
5475     const int  iterations   = (len - nprocessed) >> log_min_vcnt;
5476     nprocessed             += iterations << log_min_vcnt;
5477     Label      VectorLoop;
5478 
5479     if (iterations == 1) {
5480       z_vlm(Z_V20, Z_V21, 0+src_off, Rsrc);  // get next 32 characters (single-byte)
5481       z_vuplhb(Z_V22, Z_V20);                // V2 <- (expand) V0(high)
5482       z_vupllb(Z_V23, Z_V20);                // V3 <- (expand) V0(low)
5483       z_vuplhb(Z_V24, Z_V21);                // V4 <- (expand) V1(high)
5484       z_vupllb(Z_V25, Z_V21);                // V5 <- (expand) V1(low)
5485       z_vstm(Z_V22, Z_V25, 0+dst_off, Rdst); // store next 32 bytes
5486 
5487       src_off += min_vcnt;
5488       dst_off += min_vcnt*2;
5489     } else {
5490       restore_inputs = true;
5491 
5492       z_lgfi(Rix, len>>log_min_vcnt);
5493       bind(VectorLoop);
5494         z_vlm(Z_V20, Z_V21, 0, Rsrc);        // get next 32 characters (single-byte)
5495         add2reg(Rsrc, min_vcnt);
5496 
5497         z_vuplhb(Z_V22, Z_V20);              // V2 <- (expand) V0(high)
5498         z_vupllb(Z_V23, Z_V20);              // V3 <- (expand) V0(low)
5499         z_vuplhb(Z_V24, Z_V21);              // V4 <- (expand) V1(high)
5500         z_vupllb(Z_V25, Z_V21);              // V5 <- (expand) V1(low)
5501         z_vstm(Z_V22, Z_V25, 0, Rdst);       // store next 32 bytes
5502         add2reg(Rdst, min_vcnt*2);
5503 
5504         z_brct(Rix, VectorLoop);
5505     }
5506   }
5507 
5508   if (((len-nprocessed) >= 16) && VM_Version::has_VectorFacility()) {
5509     const int  min_vcnt     = 16;          // Minimum #characters required to use vector instructions.
5510                                            // Otherwise just do nothing in vector mode.
5511                                            // Must be multiple of vector register length (16 bytes = 128 bits).
5512     const int  log_min_vcnt = exact_log2(min_vcnt);
5513     const int  iterations   = (len - nprocessed) >> log_min_vcnt;
5514     nprocessed             += iterations << log_min_vcnt;
5515     assert(iterations == 1, "must be!");
5516 
5517     z_vl(Z_V20, 0+src_off, Z_R0, Rsrc);    // get next 16 characters (single-byte)
5518     z_vuplhb(Z_V22, Z_V20);                // V2 <- (expand) V0(high)
5519     z_vupllb(Z_V23, Z_V20);                // V3 <- (expand) V0(low)
5520     z_vstm(Z_V22, Z_V23, 0+dst_off, Rdst); // store next 32 bytes
5521 
5522     src_off += min_vcnt;
5523     dst_off += min_vcnt*2;
5524   }
5525 
5526   if ((len-nprocessed) > 8) {
5527     const int  min_cnt     =  8;           // Minimum #characters required to use unrolled scalar loop.
5528                                            // Otherwise just do nothing in unrolled scalar mode.
5529                                            // Must be multiple of 8.
5530     const int  log_min_cnt = exact_log2(min_cnt);
5531     const int  iterations  = (len - nprocessed) >> log_min_cnt;
5532     nprocessed     += iterations << log_min_cnt;
5533 
5534     //---<  avoid loop overhead/ptr increment for small # iterations  >---
5535     if (iterations <= 2) {
5536       clear_reg(Z_R0);
5537       clear_reg(Z_R1);
5538       workreg_clear = true;
5539 
5540       z_icmh(Z_R0, 5, 0+src_off, Rsrc);
5541       z_icmh(Z_R1, 5, 4+src_off, Rsrc);
5542       z_icm(Z_R0,  5, 2+src_off, Rsrc);
5543       z_icm(Z_R1,  5, 6+src_off, Rsrc);
5544       z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst);
5545 
5546       src_off += min_cnt;
5547       dst_off += min_cnt*2;
5548     }
5549 
5550     if (iterations == 2) {
5551       z_icmh(Z_R0, 5, 0+src_off, Rsrc);
5552       z_icmh(Z_R1, 5, 4+src_off, Rsrc);
5553       z_icm(Z_R0,  5, 2+src_off, Rsrc);
5554       z_icm(Z_R1,  5, 6+src_off, Rsrc);
5555       z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst);
5556 
5557       src_off += min_cnt;
5558       dst_off += min_cnt*2;
5559     }
5560 
5561     if (iterations > 2) {
5562       Label      UnrolledLoop;
5563       restore_inputs  = true;
5564 
5565       clear_reg(Z_R0);
5566       clear_reg(Z_R1);
5567       workreg_clear = true;
5568 
5569       z_lgfi(Rix, iterations);
5570       bind(UnrolledLoop);
5571         z_icmh(Z_R0, 5, 0, Rsrc);
5572         z_icmh(Z_R1, 5, 4, Rsrc);
5573         z_icm(Z_R0,  5, 2, Rsrc);
5574         z_icm(Z_R1,  5, 6, Rsrc);
5575         add2reg(Rsrc, min_cnt);
5576 
5577         z_stmg(Z_R0, Z_R1, 0, Rdst);
5578         add2reg(Rdst, min_cnt*2);
5579 
5580         z_brct(Rix, UnrolledLoop);
5581     }
5582   }
5583 
5584   if ((len-nprocessed) > 0) {
5585     switch (len-nprocessed) {
5586       case 8:
5587         if (!workreg_clear) {
5588           clear_reg(Z_R0);
5589           clear_reg(Z_R1);
5590         }
5591         z_icmh(Z_R0, 5, 0+src_off, Rsrc);
5592         z_icmh(Z_R1, 5, 4+src_off, Rsrc);
5593         z_icm(Z_R0,  5, 2+src_off, Rsrc);
5594         z_icm(Z_R1,  5, 6+src_off, Rsrc);
5595         z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst);
5596         break;
5597       case 7:
5598         if (!workreg_clear) {
5599           clear_reg(Z_R0);
5600           clear_reg(Z_R1);
5601         }
5602         clear_reg(Rix);
5603         z_icm(Z_R0,  5, 0+src_off, Rsrc);
5604         z_icm(Z_R1,  5, 2+src_off, Rsrc);
5605         z_icm(Rix,   5, 4+src_off, Rsrc);
5606         z_stm(Z_R0,  Z_R1, 0+dst_off, Rdst);
5607         z_llc(Z_R0,  6+src_off, Z_R0, Rsrc);
5608         z_st(Rix,    8+dst_off, Z_R0, Rdst);
5609         z_sth(Z_R0, 12+dst_off, Z_R0, Rdst);
5610         break;
5611       case 6:
5612         if (!workreg_clear) {
5613           clear_reg(Z_R0);
5614           clear_reg(Z_R1);
5615         }
5616         clear_reg(Rix);
5617         z_icm(Z_R0, 5, 0+src_off, Rsrc);
5618         z_icm(Z_R1, 5, 2+src_off, Rsrc);
5619         z_icm(Rix,  5, 4+src_off, Rsrc);
5620         z_stm(Z_R0, Z_R1, 0+dst_off, Rdst);
5621         z_st(Rix,   8+dst_off, Z_R0, Rdst);
5622         break;
5623       case 5:
5624         if (!workreg_clear) {
5625           clear_reg(Z_R0);
5626           clear_reg(Z_R1);
5627         }
5628         z_icm(Z_R0, 5, 0+src_off, Rsrc);
5629         z_icm(Z_R1, 5, 2+src_off, Rsrc);
5630         z_llc(Rix,  4+src_off, Z_R0, Rsrc);
5631         z_stm(Z_R0, Z_R1, 0+dst_off, Rdst);
5632         z_sth(Rix,  8+dst_off, Z_R0, Rdst);
5633         break;
5634       case 4:
5635         if (!workreg_clear) {
5636           clear_reg(Z_R0);
5637           clear_reg(Z_R1);
5638         }
5639         z_icm(Z_R0, 5, 0+src_off, Rsrc);
5640         z_icm(Z_R1, 5, 2+src_off, Rsrc);
5641         z_stm(Z_R0, Z_R1, 0+dst_off, Rdst);
5642         break;
5643       case 3:
5644         if (!workreg_clear) {
5645           clear_reg(Z_R0);
5646         }
5647         z_llc(Z_R1, 2+src_off, Z_R0, Rsrc);
5648         z_icm(Z_R0, 5, 0+src_off, Rsrc);
5649         z_sth(Z_R1, 4+dst_off, Z_R0, Rdst);
5650         z_st(Z_R0,  0+dst_off, Rdst);
5651         break;
5652       case 2:
5653         z_llc(Z_R0, 0+src_off, Z_R0, Rsrc);
5654         z_llc(Z_R1, 1+src_off, Z_R0, Rsrc);
5655         z_sth(Z_R0, 0+dst_off, Z_R0, Rdst);
5656         z_sth(Z_R1, 2+dst_off, Z_R0, Rdst);
5657         break;
5658       case 1:
5659         z_llc(Z_R0, 0+src_off, Z_R0, Rsrc);
5660         z_sth(Z_R0, 0+dst_off, Z_R0, Rdst);
5661         break;
5662       default:
5663         guarantee(false, "Impossible");
5664         break;
5665     }
5666     src_off   +=  len-nprocessed;
5667     dst_off   += (len-nprocessed)*2;
5668     nprocessed = len;
5669   }
5670 
5671   //---< restore modified input registers  >---
5672   if ((nprocessed > 0) && restore_inputs) {
5673     z_agfi(Rsrc, -(nprocessed-src_off));
5674     if (nprocessed < 1000000000) { // avoid int overflow
5675       z_agfi(Rdst, -(nprocessed*2-dst_off));
5676     } else {
5677       z_agfi(Rdst, -(nprocessed-dst_off));
5678       z_agfi(Rdst, -nprocessed);
5679     }
5680   }
5681 
5682   BLOCK_COMMENT("} string_inflate_const");
5683   return offset() - block_start;
5684 }
5685 
5686 // Kills src.
5687 unsigned int MacroAssembler::has_negatives(Register result, Register src, Register cnt,
5688                                            Register odd_reg, Register even_reg, Register tmp) {
5689   int block_start = offset();
5690   Label Lloop1, Lloop2, Lslow, Lnotfound, Ldone;
5691   const Register addr = src, mask = tmp;
5692 
5693   BLOCK_COMMENT("has_negatives {");
5694 
5695   z_llgfr(Z_R1, cnt);      // Number of bytes to read. (Must be a positive simm32.)
5696   z_llilf(mask, 0x80808080);
5697   z_lhi(result, 1);        // Assume true.
5698   // Last possible addr for fast loop.
5699   z_lay(odd_reg, -16, Z_R1, src);
5700   z_chi(cnt, 16);
5701   z_brl(Lslow);
5702 
5703   // ind1: index, even_reg: index increment, odd_reg: index limit
5704   z_iihf(mask, 0x80808080);
5705   z_lghi(even_reg, 16);
5706 
5707   bind(Lloop1); // 16 bytes per iteration.
5708   z_lg(Z_R0, Address(addr));
5709   z_lg(Z_R1, Address(addr, 8));
5710   z_ogr(Z_R0, Z_R1);
5711   z_ngr(Z_R0, mask);
5712   z_brne(Ldone);           // If found return 1.
5713   z_brxlg(addr, even_reg, Lloop1);
5714 
5715   bind(Lslow);
5716   z_aghi(odd_reg, 16-1);   // Last possible addr for slow loop.
5717   z_lghi(even_reg, 1);
5718   z_cgr(addr, odd_reg);
5719   z_brh(Lnotfound);
5720 
5721   bind(Lloop2); // 1 byte per iteration.
5722   z_cli(Address(addr), 0x80);
5723   z_brnl(Ldone);           // If found return 1.
5724   z_brxlg(addr, even_reg, Lloop2);
5725 
5726   bind(Lnotfound);
5727   z_lhi(result, 0);
5728 
5729   bind(Ldone);
5730 
5731   BLOCK_COMMENT("} has_negatives");
5732 
5733   return offset() - block_start;
5734 }
5735 
5736 // kill: cnt1, cnt2, odd_reg, even_reg; early clobber: result
5737 unsigned int MacroAssembler::string_compare(Register str1, Register str2,
5738                                             Register cnt1, Register cnt2,
5739                                             Register odd_reg, Register even_reg, Register result, int ae) {
5740   int block_start = offset();
5741 
5742   assert_different_registers(str1, cnt1, cnt2, odd_reg, even_reg, result);
5743   assert_different_registers(str2, cnt1, cnt2, odd_reg, even_reg, result);
5744 
5745   // If strings are equal up to min length, return the length difference.
5746   const Register diff = result, // Pre-set result with length difference.
5747                  min  = cnt1,   // min number of bytes
5748                  tmp  = cnt2;
5749 
5750   // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a)
5751   // we interchange str1 and str2 in the UL case and negate the result.
5752   // Like this, str1 is always latin1 encoded, except for the UU case.
5753   // In addition, we need 0 (or sign which is 0) extend when using 64 bit register.
5754   const bool used_as_LU = (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL);
5755 
5756   BLOCK_COMMENT("string_compare {");
5757 
5758   if (used_as_LU) {
5759     z_srl(cnt2, 1);
5760   }
5761 
5762   // See if the lengths are different, and calculate min in cnt1.
5763   // Save diff in case we need it for a tie-breaker.
5764 
5765   // diff = cnt1 - cnt2
5766   if (VM_Version::has_DistinctOpnds()) {
5767     z_srk(diff, cnt1, cnt2);
5768   } else {
5769     z_lr(diff, cnt1);
5770     z_sr(diff, cnt2);
5771   }
5772   if (str1 != str2) {
5773     if (VM_Version::has_LoadStoreConditional()) {
5774       z_locr(min, cnt2, Assembler::bcondHigh);
5775     } else {
5776       Label Lskip;
5777       z_brl(Lskip);    // min ok if cnt1 < cnt2
5778       z_lr(min, cnt2); // min = cnt2
5779       bind(Lskip);
5780     }
5781   }
5782 
5783   if (ae == StrIntrinsicNode::UU) {
5784     z_sra(diff, 1);
5785   }
5786   if (str1 != str2) {
5787     Label Ldone;
5788     if (used_as_LU) {
5789       // Loop which searches the first difference character by character.
5790       Label Lloop;
5791       const Register ind1 = Z_R1,
5792                      ind2 = min;
5793       int stride1 = 1, stride2 = 2; // See comment above.
5794 
5795       // ind1: index, even_reg: index increment, odd_reg: index limit
5796       z_llilf(ind1, (unsigned int)(-stride1));
5797       z_lhi(even_reg, stride1);
5798       add2reg(odd_reg, -stride1, min);
5799       clear_reg(ind2); // kills min
5800 
5801       bind(Lloop);
5802       z_brxh(ind1, even_reg, Ldone);
5803       z_llc(tmp, Address(str1, ind1));
5804       z_llh(Z_R0, Address(str2, ind2));
5805       z_ahi(ind2, stride2);
5806       z_sr(tmp, Z_R0);
5807       z_bre(Lloop);
5808 
5809       z_lr(result, tmp);
5810 
5811     } else {
5812       // Use clcle in fast loop (only for same encoding).
5813       z_lgr(Z_R0, str1);
5814       z_lgr(even_reg, str2);
5815       z_llgfr(Z_R1, min);
5816       z_llgfr(odd_reg, min);
5817 
5818       if (ae == StrIntrinsicNode::LL) {
5819         compare_long_ext(Z_R0, even_reg, 0);
5820       } else {
5821         compare_long_uni(Z_R0, even_reg, 0);
5822       }
5823       z_bre(Ldone);
5824       z_lgr(Z_R1, Z_R0);
5825       if (ae == StrIntrinsicNode::LL) {
5826         z_llc(Z_R0, Address(even_reg));
5827         z_llc(result, Address(Z_R1));
5828       } else {
5829         z_llh(Z_R0, Address(even_reg));
5830         z_llh(result, Address(Z_R1));
5831       }
5832       z_sr(result, Z_R0);
5833     }
5834 
5835     // Otherwise, return the difference between the first mismatched chars.
5836     bind(Ldone);
5837   }
5838 
5839   if (ae == StrIntrinsicNode::UL) {
5840     z_lcr(result, result); // Negate result (see note above).
5841   }
5842 
5843   BLOCK_COMMENT("} string_compare");
5844 
5845   return offset() - block_start;
5846 }
5847 
5848 unsigned int MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, Register limit,
5849                                           Register odd_reg, Register even_reg, Register result, bool is_byte) {
5850   int block_start = offset();
5851 
5852   BLOCK_COMMENT("array_equals {");
5853 
5854   assert_different_registers(ary1, limit, odd_reg, even_reg);
5855   assert_different_registers(ary2, limit, odd_reg, even_reg);
5856 
5857   Label Ldone, Ldone_true, Ldone_false, Lclcle, CLC_template;
5858   int base_offset = 0;
5859 
5860   if (ary1 != ary2) {
5861     if (is_array_equ) {
5862       base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR);
5863 
5864       // Return true if the same array.
5865       compareU64_and_branch(ary1, ary2, Assembler::bcondEqual, Ldone_true);
5866 
5867       // Return false if one of them is NULL.
5868       compareU64_and_branch(ary1, (intptr_t)0, Assembler::bcondEqual, Ldone_false);
5869       compareU64_and_branch(ary2, (intptr_t)0, Assembler::bcondEqual, Ldone_false);
5870 
5871       // Load the lengths of arrays.
5872       z_llgf(odd_reg, Address(ary1, arrayOopDesc::length_offset_in_bytes()));
5873 
5874       // Return false if the two arrays are not equal length.
5875       z_c(odd_reg, Address(ary2, arrayOopDesc::length_offset_in_bytes()));
5876       z_brne(Ldone_false);
5877 
5878       // string len in bytes (right operand)
5879       if (!is_byte) {
5880         z_chi(odd_reg, 128);
5881         z_sll(odd_reg, 1); // preserves flags
5882         z_brh(Lclcle);
5883       } else {
5884         compareU32_and_branch(odd_reg, (intptr_t)256, Assembler::bcondHigh, Lclcle);
5885       }
5886     } else {
5887       z_llgfr(odd_reg, limit); // Need to zero-extend prior to using the value.
5888       compareU32_and_branch(limit, (intptr_t)256, Assembler::bcondHigh, Lclcle);
5889     }
5890 
5891 
5892     // Use clc instruction for up to 256 bytes.
5893     {
5894       Register str1_reg = ary1,
5895           str2_reg = ary2;
5896       if (is_array_equ) {
5897         str1_reg = Z_R1;
5898         str2_reg = even_reg;
5899         add2reg(str1_reg, base_offset, ary1); // string addr (left operand)
5900         add2reg(str2_reg, base_offset, ary2); // string addr (right operand)
5901       }
5902       z_ahi(odd_reg, -1); // Clc uses decremented limit. Also compare result to 0.
5903       z_brl(Ldone_true);
5904       // Note: We could jump to the template if equal.
5905 
5906       assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware");
5907       z_exrl(odd_reg, CLC_template);
5908       z_bre(Ldone_true);
5909       // fall through
5910 
5911       bind(Ldone_false);
5912       clear_reg(result);
5913       z_bru(Ldone);
5914 
5915       bind(CLC_template);
5916       z_clc(0, 0, str1_reg, 0, str2_reg);
5917     }
5918 
5919     // Use clcle instruction.
5920     {
5921       bind(Lclcle);
5922       add2reg(even_reg, base_offset, ary2); // string addr (right operand)
5923       add2reg(Z_R0, base_offset, ary1);     // string addr (left operand)
5924 
5925       z_lgr(Z_R1, odd_reg); // string len in bytes (left operand)
5926       if (is_byte) {
5927         compare_long_ext(Z_R0, even_reg, 0);
5928       } else {
5929         compare_long_uni(Z_R0, even_reg, 0);
5930       }
5931       z_lghi(result, 0); // Preserve flags.
5932       z_brne(Ldone);
5933     }
5934   }
5935   // fall through
5936 
5937   bind(Ldone_true);
5938   z_lghi(result, 1); // All characters are equal.
5939   bind(Ldone);
5940 
5941   BLOCK_COMMENT("} array_equals");
5942 
5943   return offset() - block_start;
5944 }
5945 
5946 // kill: haycnt, needlecnt, odd_reg, even_reg; early clobber: result
5947 unsigned int MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt,
5948                                             Register needle, Register needlecnt, int needlecntval,
5949                                             Register odd_reg, Register even_reg, int ae) {
5950   int block_start = offset();
5951 
5952   // Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite!
5953   assert(ae != StrIntrinsicNode::LU, "Invalid encoding");
5954   const int h_csize = (ae == StrIntrinsicNode::LL) ? 1 : 2;
5955   const int n_csize = (ae == StrIntrinsicNode::UU) ? 2 : 1;
5956   Label L_needle1, L_Found, L_NotFound;
5957 
5958   BLOCK_COMMENT("string_indexof {");
5959 
5960   if (needle == haystack) {
5961     z_lhi(result, 0);
5962   } else {
5963 
5964   // Load first character of needle (R0 used by search_string instructions).
5965   if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); }
5966 
5967   // Compute last haystack addr to use if no match gets found.
5968   if (needlecnt != noreg) { // variable needlecnt
5969     z_ahi(needlecnt, -1); // Remaining characters after first one.
5970     z_sr(haycnt, needlecnt); // Compute index succeeding last element to compare.
5971     if (n_csize == 2) { z_sll(needlecnt, 1); } // In bytes.
5972   } else { // constant needlecnt
5973     assert((needlecntval & 0x7fff) == needlecntval, "must be positive simm16 immediate");
5974     // Compute index succeeding last element to compare.
5975     if (needlecntval != 1) { z_ahi(haycnt, 1 - needlecntval); }
5976   }
5977 
5978   z_llgfr(haycnt, haycnt); // Clear high half.
5979   z_lgr(result, haystack); // Final result will be computed from needle start pointer.
5980   if (h_csize == 2) { z_sll(haycnt, 1); } // Scale to number of bytes.
5981   z_agr(haycnt, haystack); // Point to address succeeding last element (haystack+scale*(haycnt-needlecnt+1)).
5982 
5983   if (h_csize != n_csize) {
5984     assert(ae == StrIntrinsicNode::UL, "Invalid encoding");
5985 
5986     if (needlecnt != noreg || needlecntval != 1) {
5987       if (needlecnt != noreg) {
5988         compare32_and_branch(needlecnt, (intptr_t)0, Assembler::bcondEqual, L_needle1);
5989       }
5990 
5991       // Main Loop: UL version (now we have at least 2 characters).
5992       Label L_OuterLoop, L_InnerLoop, L_Skip;
5993       bind(L_OuterLoop); // Search for 1st 2 characters.
5994       z_lgr(Z_R1, haycnt);
5995       MacroAssembler::search_string_uni(Z_R1, result);
5996       z_brc(Assembler::bcondNotFound, L_NotFound);
5997       z_lgr(result, Z_R1);
5998 
5999       z_lghi(Z_R1, n_csize);
6000       z_lghi(even_reg, h_csize);
6001       bind(L_InnerLoop);
6002       z_llgc(odd_reg, Address(needle, Z_R1));
6003       z_ch(odd_reg, Address(result, even_reg));
6004       z_brne(L_Skip);
6005       if (needlecnt != noreg) { z_cr(Z_R1, needlecnt); } else { z_chi(Z_R1, needlecntval - 1); }
6006       z_brnl(L_Found);
6007       z_aghi(Z_R1, n_csize);
6008       z_aghi(even_reg, h_csize);
6009       z_bru(L_InnerLoop);
6010 
6011       bind(L_Skip);
6012       z_aghi(result, h_csize); // This is the new address we want to use for comparing.
6013       z_bru(L_OuterLoop);
6014     }
6015 
6016   } else {
6017     const intptr_t needle_bytes = (n_csize == 2) ? ((needlecntval - 1) << 1) : (needlecntval - 1);
6018     Label L_clcle;
6019 
6020     if (needlecnt != noreg || (needlecntval != 1 && needle_bytes <= 256)) {
6021       if (needlecnt != noreg) {
6022         compare32_and_branch(needlecnt, 256, Assembler::bcondHigh, L_clcle);
6023         z_ahi(needlecnt, -1); // remaining bytes -1 (for CLC)
6024         z_brl(L_needle1);
6025       }
6026 
6027       // Main Loop: clc version (now we have at least 2 characters).
6028       Label L_OuterLoop, CLC_template;
6029       bind(L_OuterLoop); // Search for 1st 2 characters.
6030       z_lgr(Z_R1, haycnt);
6031       if (h_csize == 1) {
6032         MacroAssembler::search_string(Z_R1, result);
6033       } else {
6034         MacroAssembler::search_string_uni(Z_R1, result);
6035       }
6036       z_brc(Assembler::bcondNotFound, L_NotFound);
6037       z_lgr(result, Z_R1);
6038 
6039       if (needlecnt != noreg) {
6040         assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware");
6041         z_exrl(needlecnt, CLC_template);
6042       } else {
6043         z_clc(h_csize, needle_bytes -1, Z_R1, n_csize, needle);
6044       }
6045       z_bre(L_Found);
6046       z_aghi(result, h_csize); // This is the new address we want to use for comparing.
6047       z_bru(L_OuterLoop);
6048 
6049       if (needlecnt != noreg) {
6050         bind(CLC_template);
6051         z_clc(h_csize, 0, Z_R1, n_csize, needle);
6052       }
6053     }
6054 
6055     if (needlecnt != noreg || needle_bytes > 256) {
6056       bind(L_clcle);
6057 
6058       // Main Loop: clcle version (now we have at least 256 bytes).
6059       Label L_OuterLoop, CLC_template;
6060       bind(L_OuterLoop); // Search for 1st 2 characters.
6061       z_lgr(Z_R1, haycnt);
6062       if (h_csize == 1) {
6063         MacroAssembler::search_string(Z_R1, result);
6064       } else {
6065         MacroAssembler::search_string_uni(Z_R1, result);
6066       }
6067       z_brc(Assembler::bcondNotFound, L_NotFound);
6068 
6069       add2reg(Z_R0, n_csize, needle);
6070       add2reg(even_reg, h_csize, Z_R1);
6071       z_lgr(result, Z_R1);
6072       if (needlecnt != noreg) {
6073         z_llgfr(Z_R1, needlecnt); // needle len in bytes (left operand)
6074         z_llgfr(odd_reg, needlecnt);
6075       } else {
6076         load_const_optimized(Z_R1, needle_bytes);
6077         if (Immediate::is_simm16(needle_bytes)) { z_lghi(odd_reg, needle_bytes); } else { z_lgr(odd_reg, Z_R1); }
6078       }
6079       if (h_csize == 1) {
6080         compare_long_ext(Z_R0, even_reg, 0);
6081       } else {
6082         compare_long_uni(Z_R0, even_reg, 0);
6083       }
6084       z_bre(L_Found);
6085 
6086       if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); } // Reload.
6087       z_aghi(result, h_csize); // This is the new address we want to use for comparing.
6088       z_bru(L_OuterLoop);
6089     }
6090   }
6091 
6092   if (needlecnt != noreg || needlecntval == 1) {
6093     bind(L_needle1);
6094 
6095     // Single needle character version.
6096     if (h_csize == 1) {
6097       MacroAssembler::search_string(haycnt, result);
6098     } else {
6099       MacroAssembler::search_string_uni(haycnt, result);
6100     }
6101     z_lgr(result, haycnt);
6102     z_brc(Assembler::bcondFound, L_Found);
6103   }
6104 
6105   bind(L_NotFound);
6106   add2reg(result, -1, haystack); // Return -1.
6107 
6108   bind(L_Found); // Return index (or -1 in fallthrough case).
6109   z_sgr(result, haystack);
6110   if (h_csize == 2) { z_srag(result, result, exact_log2(sizeof(jchar))); }
6111   }
6112   BLOCK_COMMENT("} string_indexof");
6113 
6114   return offset() - block_start;
6115 }
6116 
6117 // early clobber: result
6118 unsigned int MacroAssembler::string_indexof_char(Register result, Register haystack, Register haycnt,
6119                                                  Register needle, jchar needleChar, Register odd_reg, Register even_reg, bool is_byte) {
6120   int block_start = offset();
6121 
6122   BLOCK_COMMENT("string_indexof_char {");
6123 
6124   if (needle == haystack) {
6125     z_lhi(result, 0);
6126   } else {
6127 
6128   Label Ldone;
6129 
6130   z_llgfr(odd_reg, haycnt);  // Preset loop ctr/searchrange end.
6131   if (needle == noreg) {
6132     load_const_optimized(Z_R0, (unsigned long)needleChar);
6133   } else {
6134     if (is_byte) {
6135       z_llgcr(Z_R0, needle); // First (and only) needle char.
6136     } else {
6137       z_llghr(Z_R0, needle); // First (and only) needle char.
6138     }
6139   }
6140 
6141   if (!is_byte) {
6142     z_agr(odd_reg, odd_reg); // Calc #bytes to be processed with SRSTU.
6143   }
6144 
6145   z_lgr(even_reg, haystack); // haystack addr
6146   z_agr(odd_reg, haystack);  // First char after range end.
6147   z_lghi(result, -1);
6148 
6149   if (is_byte) {
6150     MacroAssembler::search_string(odd_reg, even_reg);
6151   } else {
6152     MacroAssembler::search_string_uni(odd_reg, even_reg);
6153   }
6154   z_brc(Assembler::bcondNotFound, Ldone);
6155   if (is_byte) {
6156     if (VM_Version::has_DistinctOpnds()) {
6157       z_sgrk(result, odd_reg, haystack);
6158     } else {
6159       z_sgr(odd_reg, haystack);
6160       z_lgr(result, odd_reg);
6161     }
6162   } else {
6163     z_slgr(odd_reg, haystack);
6164     z_srlg(result, odd_reg, exact_log2(sizeof(jchar)));
6165   }
6166 
6167   bind(Ldone);
6168   }
6169   BLOCK_COMMENT("} string_indexof_char");
6170 
6171   return offset() - block_start;
6172 }
6173 
6174 
6175 //-------------------------------------------------
6176 //   Constants (scalar and oop) in constant pool
6177 //-------------------------------------------------
6178 
6179 // Add a non-relocated constant to the CP.
6180 int MacroAssembler::store_const_in_toc(AddressLiteral& val) {
6181   long    value  = val.value();
6182   address tocPos = long_constant(value);
6183 
6184   if (tocPos != NULL) {
6185     int tocOffset = (int)(tocPos - code()->consts()->start());
6186     return tocOffset;
6187   }
6188   // Address_constant returned NULL, so no constant entry has been created.
6189   // In that case, we return a "fatal" offset, just in case that subsequently
6190   // generated access code is executed.
6191   return -1;
6192 }
6193 
6194 // Returns the TOC offset where the address is stored.
6195 // Add a relocated constant to the CP.
6196 int MacroAssembler::store_oop_in_toc(AddressLiteral& oop) {
6197   // Use RelocationHolder::none for the constant pool entry.
6198   // Otherwise we will end up with a failing NativeCall::verify(x),
6199   // where x is the address of the constant pool entry.
6200   address tocPos = address_constant((address)oop.value(), RelocationHolder::none);
6201 
6202   if (tocPos != NULL) {
6203     int              tocOffset = (int)(tocPos - code()->consts()->start());
6204     RelocationHolder rsp = oop.rspec();
6205     Relocation      *rel = rsp.reloc();
6206 
6207     // Store toc_offset in relocation, used by call_far_patchable.
6208     if ((relocInfo::relocType)rel->type() == relocInfo::runtime_call_w_cp_type) {
6209       ((runtime_call_w_cp_Relocation *)(rel))->set_constant_pool_offset(tocOffset);
6210     }
6211     // Relocate at the load's pc.
6212     relocate(rsp);
6213 
6214     return tocOffset;
6215   }
6216   // Address_constant returned NULL, so no constant entry has been created
6217   // in that case, we return a "fatal" offset, just in case that subsequently
6218   // generated access code is executed.
6219   return -1;
6220 }
6221 
6222 bool MacroAssembler::load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc) {
6223   int     tocOffset = store_const_in_toc(a);
6224   if (tocOffset == -1) return false;
6225   address tocPos    = tocOffset + code()->consts()->start();
6226   assert((address)code()->consts()->start() != NULL, "Please add CP address");
6227 
6228   load_long_pcrelative(dst, tocPos);
6229   return true;
6230 }
6231 
6232 bool MacroAssembler::load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc) {
6233   int     tocOffset = store_oop_in_toc(a);
6234   if (tocOffset == -1) return false;
6235   address tocPos    = tocOffset + code()->consts()->start();
6236   assert((address)code()->consts()->start() != NULL, "Please add CP address");
6237 
6238   load_addr_pcrelative(dst, tocPos);
6239   return true;
6240 }
6241 
6242 // If the instruction sequence at the given pc is a load_const_from_toc
6243 // sequence, return the value currently stored at the referenced position
6244 // in the TOC.
6245 intptr_t MacroAssembler::get_const_from_toc(address pc) {
6246 
6247   assert(is_load_const_from_toc(pc), "must be load_const_from_pool");
6248 
6249   long    offset  = get_load_const_from_toc_offset(pc);
6250   address dataLoc = NULL;
6251   if (is_load_const_from_toc_pcrelative(pc)) {
6252     dataLoc = pc + offset;
6253   } else {
6254     CodeBlob* cb = CodeCache::find_blob_unsafe(pc);   // Else we get assertion if nmethod is zombie.
6255     assert(cb && cb->is_nmethod(), "sanity");
6256     nmethod* nm = (nmethod*)cb;
6257     dataLoc = nm->ctable_begin() + offset;
6258   }
6259   return *(intptr_t *)dataLoc;
6260 }
6261 
6262 // If the instruction sequence at the given pc is a load_const_from_toc
6263 // sequence, copy the passed-in new_data value into the referenced
6264 // position in the TOC.
6265 void MacroAssembler::set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb) {
6266   assert(is_load_const_from_toc(pc), "must be load_const_from_pool");
6267 
6268   long    offset = MacroAssembler::get_load_const_from_toc_offset(pc);
6269   address dataLoc = NULL;
6270   if (is_load_const_from_toc_pcrelative(pc)) {
6271     dataLoc = pc+offset;
6272   } else {
6273     nmethod* nm = CodeCache::find_nmethod(pc);
6274     assert((cb == NULL) || (nm == (nmethod*)cb), "instruction address should be in CodeBlob");
6275     dataLoc = nm->ctable_begin() + offset;
6276   }
6277   if (*(unsigned long *)dataLoc != new_data) { // Prevent cache invalidation: update only if necessary.
6278     *(unsigned long *)dataLoc = new_data;
6279   }
6280 }
6281 
6282 // Dynamic TOC. Getter must only be called if "a" is a load_const_from_toc
6283 // site. Verify by calling is_load_const_from_toc() before!!
6284 // Offset is +/- 2**32 -> use long.
6285 long MacroAssembler::get_load_const_from_toc_offset(address a) {
6286   assert(is_load_const_from_toc_pcrelative(a), "expected pc relative load");
6287   //  expected code sequence:
6288   //    z_lgrl(t, simm32);    len = 6
6289   unsigned long inst;
6290   unsigned int  len = get_instruction(a, &inst);
6291   return get_pcrel_offset(inst);
6292 }
6293 
6294 //**********************************************************************************
6295 //  inspection of generated instruction sequences for a particular pattern
6296 //**********************************************************************************
6297 
6298 bool MacroAssembler::is_load_const_from_toc_pcrelative(address a) {
6299 #ifdef ASSERT
6300   unsigned long inst;
6301   unsigned int  len = get_instruction(a+2, &inst);
6302   if ((len == 6) && is_load_pcrelative_long(a) && is_call_pcrelative_long(inst)) {
6303     const int range = 128;
6304     Assembler::dump_code_range(tty, a, range, "instr(a) == z_lgrl && instr(a+2) == z_brasl");
6305     VM_Version::z_SIGSEGV();
6306   }
6307 #endif
6308   // expected code sequence:
6309   //   z_lgrl(t, relAddr32);    len = 6
6310   //TODO: verify accessed data is in CP, if possible.
6311   return is_load_pcrelative_long(a);  // TODO: might be too general. Currently, only lgrl is used.
6312 }
6313 
6314 bool MacroAssembler::is_load_const_from_toc_call(address a) {
6315   return is_load_const_from_toc(a) && is_call_byregister(a + load_const_from_toc_size());
6316 }
6317 
6318 bool MacroAssembler::is_load_const_call(address a) {
6319   return is_load_const(a) && is_call_byregister(a + load_const_size());
6320 }
6321 
6322 //-------------------------------------------------
6323 //   Emitters for some really CICS instructions
6324 //-------------------------------------------------
6325 
6326 void MacroAssembler::move_long_ext(Register dst, Register src, unsigned int pad) {
6327   assert(dst->encoding()%2==0, "must be an even/odd register pair");
6328   assert(src->encoding()%2==0, "must be an even/odd register pair");
6329   assert(pad<256, "must be a padding BYTE");
6330 
6331   Label retry;
6332   bind(retry);
6333   Assembler::z_mvcle(dst, src, pad);
6334   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6335 }
6336 
6337 void MacroAssembler::compare_long_ext(Register left, Register right, unsigned int pad) {
6338   assert(left->encoding() % 2 == 0, "must be an even/odd register pair");
6339   assert(right->encoding() % 2 == 0, "must be an even/odd register pair");
6340   assert(pad<256, "must be a padding BYTE");
6341 
6342   Label retry;
6343   bind(retry);
6344   Assembler::z_clcle(left, right, pad, Z_R0);
6345   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6346 }
6347 
6348 void MacroAssembler::compare_long_uni(Register left, Register right, unsigned int pad) {
6349   assert(left->encoding() % 2 == 0, "must be an even/odd register pair");
6350   assert(right->encoding() % 2 == 0, "must be an even/odd register pair");
6351   assert(pad<=0xfff, "must be a padding HALFWORD");
6352   assert(VM_Version::has_ETF2(), "instruction must be available");
6353 
6354   Label retry;
6355   bind(retry);
6356   Assembler::z_clclu(left, right, pad, Z_R0);
6357   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6358 }
6359 
6360 void MacroAssembler::search_string(Register end, Register start) {
6361   assert(end->encoding() != 0, "end address must not be in R0");
6362   assert(start->encoding() != 0, "start address must not be in R0");
6363 
6364   Label retry;
6365   bind(retry);
6366   Assembler::z_srst(end, start);
6367   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6368 }
6369 
6370 void MacroAssembler::search_string_uni(Register end, Register start) {
6371   assert(end->encoding() != 0, "end address must not be in R0");
6372   assert(start->encoding() != 0, "start address must not be in R0");
6373   assert(VM_Version::has_ETF3(), "instruction must be available");
6374 
6375   Label retry;
6376   bind(retry);
6377   Assembler::z_srstu(end, start);
6378   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6379 }
6380 
6381 void MacroAssembler::kmac(Register srcBuff) {
6382   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
6383   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
6384 
6385   Label retry;
6386   bind(retry);
6387   Assembler::z_kmac(Z_R0, srcBuff);
6388   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6389 }
6390 
6391 void MacroAssembler::kimd(Register srcBuff) {
6392   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
6393   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
6394 
6395   Label retry;
6396   bind(retry);
6397   Assembler::z_kimd(Z_R0, srcBuff);
6398   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6399 }
6400 
6401 void MacroAssembler::klmd(Register srcBuff) {
6402   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
6403   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
6404 
6405   Label retry;
6406   bind(retry);
6407   Assembler::z_klmd(Z_R0, srcBuff);
6408   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6409 }
6410 
6411 void MacroAssembler::km(Register dstBuff, Register srcBuff) {
6412   // DstBuff and srcBuff are allowed to be the same register (encryption in-place).
6413   // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.
6414   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
6415   assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");
6416   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
6417 
6418   Label retry;
6419   bind(retry);
6420   Assembler::z_km(dstBuff, srcBuff);
6421   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6422 }
6423 
6424 void MacroAssembler::kmc(Register dstBuff, Register srcBuff) {
6425   // DstBuff and srcBuff are allowed to be the same register (encryption in-place).
6426   // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.
6427   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
6428   assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");
6429   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
6430 
6431   Label retry;
6432   bind(retry);
6433   Assembler::z_kmc(dstBuff, srcBuff);
6434   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6435 }
6436 
6437 void MacroAssembler::cksm(Register crcBuff, Register srcBuff) {
6438   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
6439 
6440   Label retry;
6441   bind(retry);
6442   Assembler::z_cksm(crcBuff, srcBuff);
6443   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6444 }
6445 
6446 void MacroAssembler::translate_oo(Register r1, Register r2, uint m3) {
6447   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
6448   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
6449 
6450   Label retry;
6451   bind(retry);
6452   Assembler::z_troo(r1, r2, m3);
6453   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6454 }
6455 
6456 void MacroAssembler::translate_ot(Register r1, Register r2, uint m3) {
6457   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
6458   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
6459 
6460   Label retry;
6461   bind(retry);
6462   Assembler::z_trot(r1, r2, m3);
6463   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6464 }
6465 
6466 void MacroAssembler::translate_to(Register r1, Register r2, uint m3) {
6467   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
6468   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
6469 
6470   Label retry;
6471   bind(retry);
6472   Assembler::z_trto(r1, r2, m3);
6473   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6474 }
6475 
6476 void MacroAssembler::translate_tt(Register r1, Register r2, uint m3) {
6477   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
6478   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
6479 
6480   Label retry;
6481   bind(retry);
6482   Assembler::z_trtt(r1, r2, m3);
6483   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6484 }
6485 
6486 
6487 void MacroAssembler::generate_type_profiling(const Register Rdata,
6488                                              const Register Rreceiver_klass,
6489                                              const Register Rwanted_receiver_klass,
6490                                              const Register Rmatching_row,
6491                                              bool is_virtual_call) {
6492   const int row_size = in_bytes(ReceiverTypeData::receiver_offset(1)) -
6493                        in_bytes(ReceiverTypeData::receiver_offset(0));
6494   const int num_rows = ReceiverTypeData::row_limit();
6495   NearLabel found_free_row;
6496   NearLabel do_increment;
6497   NearLabel found_no_slot;
6498 
6499   BLOCK_COMMENT("type profiling {");
6500 
6501   // search for:
6502   //    a) The type given in Rwanted_receiver_klass.
6503   //    b) The *first* empty row.
6504 
6505   // First search for a) only, just running over b) with no regard.
6506   // This is possible because
6507   //    wanted_receiver_class == receiver_class  &&  wanted_receiver_class == 0
6508   // is never true (receiver_class can't be zero).
6509   for (int row_num = 0; row_num < num_rows; row_num++) {
6510     // Row_offset should be a well-behaved positive number. The generated code relies
6511     // on that wrt constant code size. Add2reg can handle all row_offset values, but
6512     // will have to vary generated code size.
6513     int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num));
6514     assert(Displacement::is_shortDisp(row_offset), "Limitation of generated code");
6515 
6516     // Is Rwanted_receiver_klass in this row?
6517     if (VM_Version::has_CompareBranch()) {
6518       z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata);
6519       // Rmatching_row = Rdata + row_offset;
6520       add2reg(Rmatching_row, row_offset, Rdata);
6521       // if (*row_recv == (intptr_t) receiver_klass) goto fill_existing_slot;
6522       compare64_and_branch(Rwanted_receiver_klass, Rreceiver_klass, Assembler::bcondEqual, do_increment);
6523     } else {
6524       add2reg(Rmatching_row, row_offset, Rdata);
6525       z_cg(Rreceiver_klass, row_offset, Z_R0, Rdata);
6526       z_bre(do_increment);
6527     }
6528   }
6529 
6530   // Now that we did not find a match, let's search for b).
6531 
6532   // We could save the first calculation of Rmatching_row if we woud search for a) in reverse order.
6533   // We would then end up here with Rmatching_row containing the value for row_num == 0.
6534   // We would not see much benefit, if any at all, because the CPU can schedule
6535   // two instructions together with a branch anyway.
6536   for (int row_num = 0; row_num < num_rows; row_num++) {
6537     int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num));
6538 
6539     // Has this row a zero receiver_klass, i.e. is it empty?
6540     if (VM_Version::has_CompareBranch()) {
6541       z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata);
6542       // Rmatching_row = Rdata + row_offset
6543       add2reg(Rmatching_row, row_offset, Rdata);
6544       // if (*row_recv == (intptr_t) 0) goto found_free_row
6545       compare64_and_branch(Rwanted_receiver_klass, (intptr_t)0, Assembler::bcondEqual, found_free_row);
6546     } else {
6547       add2reg(Rmatching_row, row_offset, Rdata);
6548       load_and_test_long(Rwanted_receiver_klass, Address(Rdata, row_offset));
6549       z_bre(found_free_row);  // zero -> Found a free row.
6550     }
6551   }
6552 
6553   // No match, no empty row found.
6554   // Increment total counter to indicate polymorphic case.
6555   if (is_virtual_call) {
6556     add2mem_64(Address(Rdata, CounterData::count_offset()), 1, Rmatching_row);
6557   }
6558   z_bru(found_no_slot);
6559 
6560   // Here we found an empty row, but we have not found Rwanted_receiver_klass.
6561   // Rmatching_row holds the address to the first empty row.
6562   bind(found_free_row);
6563   // Store receiver_klass into empty slot.
6564   z_stg(Rreceiver_klass, 0, Z_R0, Rmatching_row);
6565 
6566   // Increment the counter of Rmatching_row.
6567   bind(do_increment);
6568   ByteSize counter_offset = ReceiverTypeData::receiver_count_offset(0) - ReceiverTypeData::receiver_offset(0);
6569   add2mem_64(Address(Rmatching_row, counter_offset), 1, Rdata);
6570 
6571   bind(found_no_slot);
6572 
6573   BLOCK_COMMENT("} type profiling");
6574 }
6575 
6576 //---------------------------------------
6577 // Helpers for Intrinsic Emitters
6578 //---------------------------------------
6579 
6580 /**
6581  * uint32_t crc;
6582  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
6583  */
6584 void MacroAssembler::fold_byte_crc32(Register crc, Register val, Register table, Register tmp) {
6585   assert_different_registers(crc, table, tmp);
6586   assert_different_registers(val, table);
6587   if (crc == val) {      // Must rotate first to use the unmodified value.
6588     rotate_then_insert(tmp, val, 56-2, 63-2, 2, true);  // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
6589     z_srl(crc, 8);       // Unsigned shift, clear leftmost 8 bits.
6590   } else {
6591     z_srl(crc, 8);       // Unsigned shift, clear leftmost 8 bits.
6592     rotate_then_insert(tmp, val, 56-2, 63-2, 2, true);  // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
6593   }
6594   z_x(crc, Address(table, tmp, 0));
6595 }
6596 
6597 /**
6598  * uint32_t crc;
6599  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
6600  */
6601 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
6602   fold_byte_crc32(crc, crc, table, tmp);
6603 }
6604 
6605 /**
6606  * Emits code to update CRC-32 with a byte value according to constants in table.
6607  *
6608  * @param [in,out]crc Register containing the crc.
6609  * @param [in]val     Register containing the byte to fold into the CRC.
6610  * @param [in]table   Register containing the table of crc constants.
6611  *
6612  * uint32_t crc;
6613  * val = crc_table[(val ^ crc) & 0xFF];
6614  * crc = val ^ (crc >> 8);
6615  */
6616 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
6617   z_xr(val, crc);
6618   fold_byte_crc32(crc, val, table, val);
6619 }
6620 
6621 
6622 /**
6623  * @param crc   register containing existing CRC (32-bit)
6624  * @param buf   register pointing to input byte buffer (byte*)
6625  * @param len   register containing number of bytes
6626  * @param table register pointing to CRC table
6627  */
6628 void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, Register data) {
6629   assert_different_registers(crc, buf, len, table, data);
6630 
6631   Label L_mainLoop, L_done;
6632   const int mainLoop_stepping = 1;
6633 
6634   // Process all bytes in a single-byte loop.
6635   z_ltr(len, len);
6636   z_brnh(L_done);
6637 
6638   bind(L_mainLoop);
6639     z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
6640     add2reg(buf, mainLoop_stepping);        // Advance buffer position.
6641     update_byte_crc32(crc, data, table);
6642     z_brct(len, L_mainLoop);                // Iterate.
6643 
6644   bind(L_done);
6645 }
6646 
6647 /**
6648  * Emits code to update CRC-32 with a 4-byte value according to constants in table.
6649  * Implementation according to jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.c.
6650  *
6651  */
6652 void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
6653                                         Register t0,  Register t1,  Register t2,    Register t3) {
6654   // This is what we implement (the DOBIG4 part):
6655   //
6656   // #define DOBIG4 c ^= *++buf4; \
6657   //         c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \
6658   //             crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]
6659   // #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4
6660   // Pre-calculate (constant) column offsets, use columns 4..7 for big-endian.
6661   const int ix0 = 4*(4*CRC32_COLUMN_SIZE);
6662   const int ix1 = 5*(4*CRC32_COLUMN_SIZE);
6663   const int ix2 = 6*(4*CRC32_COLUMN_SIZE);
6664   const int ix3 = 7*(4*CRC32_COLUMN_SIZE);
6665 
6666   // XOR crc with next four bytes of buffer.
6667   lgr_if_needed(t0, crc);
6668   z_x(t0, Address(buf, bufDisp));
6669   if (bufInc != 0) {
6670     add2reg(buf, bufInc);
6671   }
6672 
6673   // Chop crc into 4 single-byte pieces, shifted left 2 bits, to form the table indices.
6674   rotate_then_insert(t3, t0, 56-2, 63-2, 2,    true);  // ((c >>  0) & 0xff) << 2
6675   rotate_then_insert(t2, t0, 56-2, 63-2, 2-8,  true);  // ((c >>  8) & 0xff) << 2
6676   rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true);  // ((c >> 16) & 0xff) << 2
6677   rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true);  // ((c >> 24) & 0xff) << 2
6678 
6679   // XOR indexed table values to calculate updated crc.
6680   z_ly(t2, Address(table, t2, (intptr_t)ix1));
6681   z_ly(t0, Address(table, t0, (intptr_t)ix3));
6682   z_xy(t2, Address(table, t3, (intptr_t)ix0));
6683   z_xy(t0, Address(table, t1, (intptr_t)ix2));
6684   z_xr(t0, t2);           // Now t0 contains the updated CRC value.
6685   lgr_if_needed(crc, t0);
6686 }
6687 
6688 /**
6689  * @param crc   register containing existing CRC (32-bit)
6690  * @param buf   register pointing to input byte buffer (byte*)
6691  * @param len   register containing number of bytes
6692  * @param table register pointing to CRC table
6693  *
6694  * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
6695  */
6696 void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
6697                                         Register t0,  Register t1,  Register t2,  Register t3,
6698                                         bool invertCRC) {
6699   assert_different_registers(crc, buf, len, table);
6700 
6701   Label L_mainLoop, L_tail;
6702   Register  data = t0;
6703   Register  ctr  = Z_R0;
6704   const int mainLoop_stepping = 8;
6705   const int tailLoop_stepping = 1;
6706   const int log_stepping      = exact_log2(mainLoop_stepping);
6707 
6708   // Don't test for len <= 0 here. This pathological case should not occur anyway.
6709   // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
6710   // The situation itself is detected and handled correctly by the conditional branches
6711   // following aghi(len, -stepping) and aghi(len, +stepping).
6712 
6713   if (invertCRC) {
6714     not_(crc, noreg, false);           // 1s complement of crc
6715   }
6716 
6717 #if 0
6718   {
6719     // Pre-mainLoop alignment did not show any positive effect on performance.
6720     // We leave the code in for reference. Maybe the vector instructions in z13 depend on alignment.
6721 
6722     z_cghi(len, mainLoop_stepping);    // Alignment is useless for short data streams.
6723     z_brnh(L_tail);
6724 
6725     // Align buf to word (4-byte) boundary.
6726     z_lcr(ctr, buf);
6727     rotate_then_insert(ctr, ctr, 62, 63, 0, true); // TODO: should set cc
6728     z_sgfr(len, ctr);                  // Remaining len after alignment.
6729 
6730     update_byteLoop_crc32(crc, buf, ctr, table, data);
6731   }
6732 #endif
6733 
6734   // Check for short (<mainLoop_stepping bytes) buffer.
6735   z_srag(ctr, len, log_stepping);
6736   z_brnh(L_tail);
6737 
6738   z_lrvr(crc, crc);          // Revert byte order because we are dealing with big-endian data.
6739   rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop
6740 
6741   BIND(L_mainLoop);
6742     update_1word_crc32(crc, buf, table, 0, 0, crc, t1, t2, t3);
6743     update_1word_crc32(crc, buf, table, 4, mainLoop_stepping, crc, t1, t2, t3);
6744     z_brct(ctr, L_mainLoop); // Iterate.
6745 
6746   z_lrvr(crc, crc);          // Revert byte order back to original.
6747 
6748   // Process last few (<8) bytes of buffer.
6749   BIND(L_tail);
6750   update_byteLoop_crc32(crc, buf, len, table, data);
6751 
6752   if (invertCRC) {
6753     not_(crc, noreg, false);           // 1s complement of crc
6754   }
6755 }
6756 
6757 /**
6758  * @param crc   register containing existing CRC (32-bit)
6759  * @param buf   register pointing to input byte buffer (byte*)
6760  * @param len   register containing number of bytes
6761  * @param table register pointing to CRC table
6762  *
6763  * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
6764  */
6765 void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
6766                                         Register t0,  Register t1,  Register t2,  Register t3,
6767                                         bool invertCRC) {
6768   assert_different_registers(crc, buf, len, table);
6769 
6770   Label L_mainLoop, L_tail;
6771   Register  data = t0;
6772   Register  ctr  = Z_R0;
6773   const int mainLoop_stepping = 4;
6774   const int log_stepping      = exact_log2(mainLoop_stepping);
6775 
6776   // Don't test for len <= 0 here. This pathological case should not occur anyway.
6777   // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
6778   // The situation itself is detected and handled correctly by the conditional branches
6779   // following aghi(len, -stepping) and aghi(len, +stepping).
6780 
6781   if (invertCRC) {
6782     not_(crc, noreg, false);           // 1s complement of crc
6783   }
6784 
6785   // Check for short (<4 bytes) buffer.
6786   z_srag(ctr, len, log_stepping);
6787   z_brnh(L_tail);
6788 
6789   z_lrvr(crc, crc);          // Revert byte order because we are dealing with big-endian data.
6790   rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop
6791 
6792   BIND(L_mainLoop);
6793     update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3);
6794     z_brct(ctr, L_mainLoop); // Iterate.
6795 
6796   z_lrvr(crc, crc);          // Revert byte order back to original.
6797 
6798   // Process last few (<8) bytes of buffer.
6799   BIND(L_tail);
6800   update_byteLoop_crc32(crc, buf, len, table, data);
6801 
6802   if (invertCRC) {
6803     not_(crc, noreg, false);           // 1s complement of crc
6804   }
6805 }
6806 
6807 /**
6808  * @param crc   register containing existing CRC (32-bit)
6809  * @param buf   register pointing to input byte buffer (byte*)
6810  * @param len   register containing number of bytes
6811  * @param table register pointing to CRC table
6812  */
6813 void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
6814                                         Register t0,  Register t1,  Register t2,  Register t3,
6815                                         bool invertCRC) {
6816   assert_different_registers(crc, buf, len, table);
6817   Register data = t0;
6818 
6819   if (invertCRC) {
6820     not_(crc, noreg, false);           // 1s complement of crc
6821   }
6822 
6823   update_byteLoop_crc32(crc, buf, len, table, data);
6824 
6825   if (invertCRC) {
6826     not_(crc, noreg, false);           // 1s complement of crc
6827   }
6828 }
6829 
6830 void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
6831                                              bool invertCRC) {
6832   assert_different_registers(crc, buf, len, table, tmp);
6833 
6834   if (invertCRC) {
6835     not_(crc, noreg, false);           // 1s complement of crc
6836   }
6837 
6838   z_llgc(tmp, Address(buf, (intptr_t)0));  // Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
6839   update_byte_crc32(crc, tmp, table);
6840 
6841   if (invertCRC) {
6842     not_(crc, noreg, false);           // 1s complement of crc
6843   }
6844 }
6845 
6846 void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table,
6847                                                 bool invertCRC) {
6848   assert_different_registers(crc, val, table);
6849 
6850   if (invertCRC) {
6851     not_(crc, noreg, false);           // 1s complement of crc
6852   }
6853 
6854   update_byte_crc32(crc, val, table);
6855 
6856   if (invertCRC) {
6857     not_(crc, noreg, false);           // 1s complement of crc
6858   }
6859 }
6860 
6861 //
6862 // Code for BigInteger::multiplyToLen() intrinsic.
6863 //
6864 
6865 // dest_lo += src1 + src2
6866 // dest_hi += carry1 + carry2
6867 // Z_R7 is destroyed !
6868 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo,
6869                                      Register src1, Register src2) {
6870   clear_reg(Z_R7);
6871   z_algr(dest_lo, src1);
6872   z_alcgr(dest_hi, Z_R7);
6873   z_algr(dest_lo, src2);
6874   z_alcgr(dest_hi, Z_R7);
6875 }
6876 
6877 // Multiply 64 bit by 64 bit first loop.
6878 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart,
6879                                            Register x_xstart,
6880                                            Register y, Register y_idx,
6881                                            Register z,
6882                                            Register carry,
6883                                            Register product,
6884                                            Register idx, Register kdx) {
6885   // jlong carry, x[], y[], z[];
6886   // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) {
6887   //   huge_128 product = y[idx] * x[xstart] + carry;
6888   //   z[kdx] = (jlong)product;
6889   //   carry  = (jlong)(product >>> 64);
6890   // }
6891   // z[xstart] = carry;
6892 
6893   Label L_first_loop, L_first_loop_exit;
6894   Label L_one_x, L_one_y, L_multiply;
6895 
6896   z_aghi(xstart, -1);
6897   z_brl(L_one_x);   // Special case: length of x is 1.
6898 
6899   // Load next two integers of x.
6900   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
6901   mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0));
6902 
6903 
6904   bind(L_first_loop);
6905 
6906   z_aghi(idx, -1);
6907   z_brl(L_first_loop_exit);
6908   z_aghi(idx, -1);
6909   z_brl(L_one_y);
6910 
6911   // Load next two integers of y.
6912   z_sllg(Z_R1_scratch, idx, LogBytesPerInt);
6913   mem2reg_opt(y_idx, Address(y, Z_R1_scratch, 0));
6914 
6915 
6916   bind(L_multiply);
6917 
6918   Register multiplicand = product->successor();
6919   Register product_low = multiplicand;
6920 
6921   lgr_if_needed(multiplicand, x_xstart);
6922   z_mlgr(product, y_idx);     // multiplicand * y_idx -> product::multiplicand
6923   clear_reg(Z_R7);
6924   z_algr(product_low, carry); // Add carry to result.
6925   z_alcgr(product, Z_R7);     // Add carry of the last addition.
6926   add2reg(kdx, -2);
6927 
6928   // Store result.
6929   z_sllg(Z_R7, kdx, LogBytesPerInt);
6930   reg2mem_opt(product_low, Address(z, Z_R7, 0));
6931   lgr_if_needed(carry, product);
6932   z_bru(L_first_loop);
6933 
6934 
6935   bind(L_one_y); // Load one 32 bit portion of y as (0,value).
6936 
6937   clear_reg(y_idx);
6938   mem2reg_opt(y_idx, Address(y, (intptr_t) 0), false);
6939   z_bru(L_multiply);
6940 
6941 
6942   bind(L_one_x); // Load one 32 bit portion of x as (0,value).
6943 
6944   clear_reg(x_xstart);
6945   mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false);
6946   z_bru(L_first_loop);
6947 
6948   bind(L_first_loop_exit);
6949 }
6950 
6951 // Multiply 64 bit by 64 bit and add 128 bit.
6952 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y,
6953                                             Register z,
6954                                             Register yz_idx, Register idx,
6955                                             Register carry, Register product,
6956                                             int offset) {
6957   // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
6958   // z[kdx] = (jlong)product;
6959 
6960   Register multiplicand = product->successor();
6961   Register product_low = multiplicand;
6962 
6963   z_sllg(Z_R7, idx, LogBytesPerInt);
6964   mem2reg_opt(yz_idx, Address(y, Z_R7, offset));
6965 
6966   lgr_if_needed(multiplicand, x_xstart);
6967   z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand
6968   mem2reg_opt(yz_idx, Address(z, Z_R7, offset));
6969 
6970   add2_with_carry(product, product_low, carry, yz_idx);
6971 
6972   z_sllg(Z_R7, idx, LogBytesPerInt);
6973   reg2mem_opt(product_low, Address(z, Z_R7, offset));
6974 
6975 }
6976 
6977 // Multiply 128 bit by 128 bit. Unrolled inner loop.
6978 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart,
6979                                              Register y, Register z,
6980                                              Register yz_idx, Register idx,
6981                                              Register jdx,
6982                                              Register carry, Register product,
6983                                              Register carry2) {
6984   // jlong carry, x[], y[], z[];
6985   // int kdx = ystart+1;
6986   // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
6987   //   huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
6988   //   z[kdx+idx+1] = (jlong)product;
6989   //   jlong carry2 = (jlong)(product >>> 64);
6990   //   product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
6991   //   z[kdx+idx] = (jlong)product;
6992   //   carry = (jlong)(product >>> 64);
6993   // }
6994   // idx += 2;
6995   // if (idx > 0) {
6996   //   product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
6997   //   z[kdx+idx] = (jlong)product;
6998   //   carry = (jlong)(product >>> 64);
6999   // }
7000 
7001   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
7002 
7003   // scale the index
7004   lgr_if_needed(jdx, idx);
7005   and_imm(jdx, 0xfffffffffffffffcL);
7006   rshift(jdx, 2);
7007 
7008 
7009   bind(L_third_loop);
7010 
7011   z_aghi(jdx, -1);
7012   z_brl(L_third_loop_exit);
7013   add2reg(idx, -4);
7014 
7015   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
7016   lgr_if_needed(carry2, product);
7017 
7018   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
7019   lgr_if_needed(carry, product);
7020   z_bru(L_third_loop);
7021 
7022 
7023   bind(L_third_loop_exit);  // Handle any left-over operand parts.
7024 
7025   and_imm(idx, 0x3);
7026   z_brz(L_post_third_loop_done);
7027 
7028   Label L_check_1;
7029 
7030   z_aghi(idx, -2);
7031   z_brl(L_check_1);
7032 
7033   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
7034   lgr_if_needed(carry, product);
7035 
7036 
7037   bind(L_check_1);
7038 
7039   add2reg(idx, 0x2);
7040   and_imm(idx, 0x1);
7041   z_aghi(idx, -1);
7042   z_brl(L_post_third_loop_done);
7043 
7044   Register   multiplicand = product->successor();
7045   Register   product_low = multiplicand;
7046 
7047   z_sllg(Z_R7, idx, LogBytesPerInt);
7048   clear_reg(yz_idx);
7049   mem2reg_opt(yz_idx, Address(y, Z_R7, 0), false);
7050   lgr_if_needed(multiplicand, x_xstart);
7051   z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand
7052   clear_reg(yz_idx);
7053   mem2reg_opt(yz_idx, Address(z, Z_R7, 0), false);
7054 
7055   add2_with_carry(product, product_low, yz_idx, carry);
7056 
7057   z_sllg(Z_R7, idx, LogBytesPerInt);
7058   reg2mem_opt(product_low, Address(z, Z_R7, 0), false);
7059   rshift(product_low, 32);
7060 
7061   lshift(product, 32);
7062   z_ogr(product_low, product);
7063   lgr_if_needed(carry, product_low);
7064 
7065   bind(L_post_third_loop_done);
7066 }
7067 
7068 void MacroAssembler::multiply_to_len(Register x, Register xlen,
7069                                      Register y, Register ylen,
7070                                      Register z,
7071                                      Register tmp1, Register tmp2,
7072                                      Register tmp3, Register tmp4,
7073                                      Register tmp5) {
7074   ShortBranchVerifier sbv(this);
7075 
7076   assert_different_registers(x, xlen, y, ylen, z,
7077                              tmp1, tmp2, tmp3, tmp4, tmp5, Z_R1_scratch, Z_R7);
7078   assert_different_registers(x, xlen, y, ylen, z,
7079                              tmp1, tmp2, tmp3, tmp4, tmp5, Z_R8);
7080 
7081   z_stmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);
7082 
7083   // In openJdk, we store the argument as 32-bit value to slot.
7084   Address zlen(Z_SP, _z_abi(remaining_cargs));  // Int in long on big endian.
7085 
7086   const Register idx = tmp1;
7087   const Register kdx = tmp2;
7088   const Register xstart = tmp3;
7089 
7090   const Register y_idx = tmp4;
7091   const Register carry = tmp5;
7092   const Register product  = Z_R0_scratch;
7093   const Register x_xstart = Z_R8;
7094 
7095   // First Loop.
7096   //
7097   //   final static long LONG_MASK = 0xffffffffL;
7098   //   int xstart = xlen - 1;
7099   //   int ystart = ylen - 1;
7100   //   long carry = 0;
7101   //   for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
7102   //     long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
7103   //     z[kdx] = (int)product;
7104   //     carry = product >>> 32;
7105   //   }
7106   //   z[xstart] = (int)carry;
7107   //
7108 
7109   lgr_if_needed(idx, ylen);  // idx = ylen
7110   z_llgf(kdx, zlen);         // C2 does not respect int to long conversion for stub calls, thus load zero-extended.
7111   clear_reg(carry);          // carry = 0
7112 
7113   Label L_done;
7114 
7115   lgr_if_needed(xstart, xlen);
7116   z_aghi(xstart, -1);
7117   z_brl(L_done);
7118 
7119   multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
7120 
7121   NearLabel L_second_loop;
7122   compare64_and_branch(kdx, RegisterOrConstant((intptr_t) 0), bcondEqual, L_second_loop);
7123 
7124   NearLabel L_carry;
7125   z_aghi(kdx, -1);
7126   z_brz(L_carry);
7127 
7128   // Store lower 32 bits of carry.
7129   z_sllg(Z_R1_scratch, kdx, LogBytesPerInt);
7130   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
7131   rshift(carry, 32);
7132   z_aghi(kdx, -1);
7133 
7134 
7135   bind(L_carry);
7136 
7137   // Store upper 32 bits of carry.
7138   z_sllg(Z_R1_scratch, kdx, LogBytesPerInt);
7139   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
7140 
7141   // Second and third (nested) loops.
7142   //
7143   // for (int i = xstart-1; i >= 0; i--) { // Second loop
7144   //   carry = 0;
7145   //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
7146   //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
7147   //                    (z[k] & LONG_MASK) + carry;
7148   //     z[k] = (int)product;
7149   //     carry = product >>> 32;
7150   //   }
7151   //   z[i] = (int)carry;
7152   // }
7153   //
7154   // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
7155 
7156   const Register jdx = tmp1;
7157 
7158   bind(L_second_loop);
7159 
7160   clear_reg(carry);           // carry = 0;
7161   lgr_if_needed(jdx, ylen);   // j = ystart+1
7162 
7163   z_aghi(xstart, -1);         // i = xstart-1;
7164   z_brl(L_done);
7165 
7166   // Use free slots in the current stackframe instead of push/pop.
7167   Address zsave(Z_SP, _z_abi(carg_1));
7168   reg2mem_opt(z, zsave);
7169 
7170 
7171   Label L_last_x;
7172 
7173   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
7174   load_address(z, Address(z, Z_R1_scratch, 4)); // z = z + k - j
7175   z_aghi(xstart, -1);                           // i = xstart-1;
7176   z_brl(L_last_x);
7177 
7178   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
7179   mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0));
7180 
7181 
7182   Label L_third_loop_prologue;
7183 
7184   bind(L_third_loop_prologue);
7185 
7186   Address xsave(Z_SP, _z_abi(carg_2));
7187   Address xlensave(Z_SP, _z_abi(carg_3));
7188   Address ylensave(Z_SP, _z_abi(carg_4));
7189 
7190   reg2mem_opt(x, xsave);
7191   reg2mem_opt(xstart, xlensave);
7192   reg2mem_opt(ylen, ylensave);
7193 
7194 
7195   multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
7196 
7197   mem2reg_opt(z, zsave);
7198   mem2reg_opt(x, xsave);
7199   mem2reg_opt(xlen, xlensave);   // This is the decrement of the loop counter!
7200   mem2reg_opt(ylen, ylensave);
7201 
7202   add2reg(tmp3, 1, xlen);
7203   z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt);
7204   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
7205   z_aghi(tmp3, -1);
7206   z_brl(L_done);
7207 
7208   rshift(carry, 32);
7209   z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt);
7210   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
7211   z_bru(L_second_loop);
7212 
7213   // Next infrequent code is moved outside loops.
7214   bind(L_last_x);
7215 
7216   clear_reg(x_xstart);
7217   mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false);
7218   z_bru(L_third_loop_prologue);
7219 
7220   bind(L_done);
7221 
7222   z_lmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);
7223 }
7224 
7225 #ifndef PRODUCT
7226 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false).
7227 void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) {
7228   Label ok;
7229   if (check_equal) {
7230     z_bre(ok);
7231   } else {
7232     z_brne(ok);
7233   }
7234   stop(msg, id);
7235   bind(ok);
7236 }
7237 
7238 // Assert if CC indicates "low".
7239 void MacroAssembler::asm_assert_low(const char *msg, int id) {
7240   Label ok;
7241   z_brnl(ok);
7242   stop(msg, id);
7243   bind(ok);
7244 }
7245 
7246 // Assert if CC indicates "high".
7247 void MacroAssembler::asm_assert_high(const char *msg, int id) {
7248   Label ok;
7249   z_brnh(ok);
7250   stop(msg, id);
7251   bind(ok);
7252 }
7253 
7254 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false)
7255 // generate non-relocatable code.
7256 void MacroAssembler::asm_assert_static(bool check_equal, const char *msg, int id) {
7257   Label ok;
7258   if (check_equal) { z_bre(ok); }
7259   else             { z_brne(ok); }
7260   stop_static(msg, id);
7261   bind(ok);
7262 }
7263 
7264 void MacroAssembler::asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset,
7265                                           Register mem_base, const char* msg, int id) {
7266   switch (size) {
7267     case 4:
7268       load_and_test_int(Z_R0, Address(mem_base, mem_offset));
7269       break;
7270     case 8:
7271       load_and_test_long(Z_R0,  Address(mem_base, mem_offset));
7272       break;
7273     default:
7274       ShouldNotReachHere();
7275   }
7276   if (allow_relocation) { asm_assert(check_equal, msg, id); }
7277   else                  { asm_assert_static(check_equal, msg, id); }
7278 }
7279 
7280 // Check the condition
7281 //   expected_size == FP - SP
7282 // after transformation:
7283 //   expected_size - FP + SP == 0
7284 // Destroys Register expected_size if no tmp register is passed.
7285 void MacroAssembler::asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) {
7286   if (tmp == noreg) {
7287     tmp = expected_size;
7288   } else {
7289     if (tmp != expected_size) {
7290       z_lgr(tmp, expected_size);
7291     }
7292     z_algr(tmp, Z_SP);
7293     z_slg(tmp, 0, Z_R0, Z_SP);
7294     asm_assert_eq(msg, id);
7295   }
7296 }
7297 #endif // !PRODUCT
7298 
7299 void MacroAssembler::verify_thread() {
7300   if (VerifyThread) {
7301     unimplemented("", 117);
7302   }
7303 }
7304 
7305 // Plausibility check for oops.
7306 void MacroAssembler::verify_oop(Register oop, const char* msg) {
7307   if (!VerifyOops) return;
7308 
7309   BLOCK_COMMENT("verify_oop {");
7310   Register tmp = Z_R0;
7311   unsigned int nbytes_save = 5*BytesPerWord;
7312   address entry = StubRoutines::verify_oop_subroutine_entry_address();
7313 
7314   save_return_pc();
7315   push_frame_abi160(nbytes_save);
7316   z_stmg(Z_R1, Z_R5, frame::z_abi_160_size, Z_SP);
7317 
7318   z_lgr(Z_ARG2, oop);
7319   load_const(Z_ARG1, (address) msg);
7320   load_const(Z_R1, entry);
7321   z_lg(Z_R1, 0, Z_R1);
7322   call_c(Z_R1);
7323 
7324   z_lmg(Z_R1, Z_R5, frame::z_abi_160_size, Z_SP);
7325   pop_frame();
7326   restore_return_pc();
7327 
7328   BLOCK_COMMENT("} verify_oop ");
7329 }
7330 
7331 const char* MacroAssembler::stop_types[] = {
7332   "stop",
7333   "untested",
7334   "unimplemented",
7335   "shouldnotreachhere"
7336 };
7337 
7338 static void stop_on_request(const char* tp, const char* msg) {
7339   tty->print("Z assembly code requires stop: (%s) %s\n", tp, msg);
7340   guarantee(false, "Z assembly code requires stop: %s", msg);
7341 }
7342 
7343 void MacroAssembler::stop(int type, const char* msg, int id) {
7344   BLOCK_COMMENT(err_msg("stop: %s {", msg));
7345 
7346   // Setup arguments.
7347   load_const(Z_ARG1, (void*) stop_types[type%stop_end]);
7348   load_const(Z_ARG2, (void*) msg);
7349   get_PC(Z_R14);     // Following code pushes a frame without entering a new function. Use current pc as return address.
7350   save_return_pc();  // Saves return pc Z_R14.
7351   push_frame_abi160(0);
7352   call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
7353   // The plain disassembler does not recognize illtrap. It instead displays
7354   // a 32-bit value. Issueing two illtraps assures the disassembler finds
7355   // the proper beginning of the next instruction.
7356   z_illtrap(); // Illegal instruction.
7357   z_illtrap(); // Illegal instruction.
7358 
7359   BLOCK_COMMENT(" } stop");
7360 }
7361 
7362 // Special version of stop() for code size reduction.
7363 // Reuses the previously generated call sequence, if any.
7364 // Generates the call sequence on its own, if necessary.
7365 // Note: This code will work only in non-relocatable code!
7366 //       The relative address of the data elements (arg1, arg2) must not change.
7367 //       The reentry point must not move relative to it's users. This prerequisite
7368 //       should be given for "hand-written" code, if all chain calls are in the same code blob.
7369 //       Generated code must not undergo any transformation, e.g. ShortenBranches, to be safe.
7370 address MacroAssembler::stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation) {
7371   BLOCK_COMMENT(err_msg("stop_chain(%s,%s): %s {", reentry==NULL?"init":"cont", allow_relocation?"reloc ":"static", msg));
7372 
7373   // Setup arguments.
7374   if (allow_relocation) {
7375     // Relocatable version (for comparison purposes). Remove after some time.
7376     load_const(Z_ARG1, (void*) stop_types[type%stop_end]);
7377     load_const(Z_ARG2, (void*) msg);
7378   } else {
7379     load_absolute_address(Z_ARG1, (address)stop_types[type%stop_end]);
7380     load_absolute_address(Z_ARG2, (address)msg);
7381   }
7382   if ((reentry != NULL) && RelAddr::is_in_range_of_RelAddr16(reentry, pc())) {
7383     BLOCK_COMMENT("branch to reentry point:");
7384     z_brc(bcondAlways, reentry);
7385   } else {
7386     BLOCK_COMMENT("reentry point:");
7387     reentry = pc();      // Re-entry point for subsequent stop calls.
7388     save_return_pc();    // Saves return pc Z_R14.
7389     push_frame_abi160(0);
7390     if (allow_relocation) {
7391       reentry = NULL;    // Prevent reentry if code relocation is allowed.
7392       call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
7393     } else {
7394       call_VM_leaf_static(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
7395     }
7396     z_illtrap(); // Illegal instruction as emergency stop, should the above call return.
7397   }
7398   BLOCK_COMMENT(" } stop_chain");
7399 
7400   return reentry;
7401 }
7402 
7403 // Special version of stop() for code size reduction.
7404 // Assumes constant relative addresses for data and runtime call.
7405 void MacroAssembler::stop_static(int type, const char* msg, int id) {
7406   stop_chain(NULL, type, msg, id, false);
7407 }
7408 
7409 void MacroAssembler::stop_subroutine() {
7410   unimplemented("stop_subroutine", 710);
7411 }
7412 
7413 // Prints msg to stdout from within generated code..
7414 void MacroAssembler::warn(const char* msg) {
7415   RegisterSaver::save_live_registers(this, RegisterSaver::all_registers, Z_R14);
7416   load_absolute_address(Z_R1, (address) warning);
7417   load_absolute_address(Z_ARG1, (address) msg);
7418   (void) call(Z_R1);
7419   RegisterSaver::restore_live_registers(this, RegisterSaver::all_registers);
7420 }
7421 
7422 #ifndef PRODUCT
7423 
7424 // Write pattern 0x0101010101010101 in region [low-before, high+after].
7425 void MacroAssembler::zap_from_to(Register low, Register high, Register val, Register addr, int before, int after) {
7426   if (!ZapEmptyStackFields) return;
7427   BLOCK_COMMENT("zap memory region {");
7428   load_const_optimized(val, 0x0101010101010101);
7429   int size = before + after;
7430   if (low == high && size < 5 && size > 0) {
7431     int offset = -before*BytesPerWord;
7432     for (int i = 0; i < size; ++i) {
7433       z_stg(val, Address(low, offset));
7434       offset +=(1*BytesPerWord);
7435     }
7436   } else {
7437     add2reg(addr, -before*BytesPerWord, low);
7438     if (after) {
7439 #ifdef ASSERT
7440       jlong check = after * BytesPerWord;
7441       assert(Immediate::is_simm32(check) && Immediate::is_simm32(-check), "value not encodable !");
7442 #endif
7443       add2reg(high, after * BytesPerWord);
7444     }
7445     NearLabel loop;
7446     bind(loop);
7447     z_stg(val, Address(addr));
7448     add2reg(addr, 8);
7449     compare64_and_branch(addr, high, bcondNotHigh, loop);
7450     if (after) {
7451       add2reg(high, -after * BytesPerWord);
7452     }
7453   }
7454   BLOCK_COMMENT("} zap memory region");
7455 }
7456 #endif // !PRODUCT
7457 
7458 SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value, Register _rscratch) {
7459   _masm = masm;
7460   _masm->load_absolute_address(_rscratch, (address)flag_addr);
7461   _masm->load_and_test_int(_rscratch, Address(_rscratch));
7462   if (value) {
7463     _masm->z_brne(_label); // Skip if true, i.e. != 0.
7464   } else {
7465     _masm->z_bre(_label);  // Skip if false, i.e. == 0.
7466   }
7467 }
7468 
7469 SkipIfEqual::~SkipIfEqual() {
7470   _masm->bind(_label);
7471 }