1 /*
   2  * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2016, 2018, SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/codeBuffer.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "compiler/disassembler.hpp"
  30 #include "gc/shared/barrierSet.hpp"
  31 #include "gc/shared/barrierSetAssembler.hpp"
  32 #include "gc/shared/collectedHeap.inline.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "gc/shared/cardTableBarrierSet.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/universe.hpp"
  37 #include "oops/accessDecorators.hpp"
  38 #include "oops/compressedOops.inline.hpp"
  39 #include "oops/klass.inline.hpp"
  40 #include "opto/compile.hpp"
  41 #include "opto/intrinsicnode.hpp"
  42 #include "opto/matcher.hpp"
  43 #include "prims/methodHandles.hpp"
  44 #include "registerSaver_s390.hpp"
  45 #include "runtime/biasedLocking.hpp"
  46 #include "runtime/icache.hpp"
  47 #include "runtime/interfaceSupport.inline.hpp"
  48 #include "runtime/objectMonitor.hpp"
  49 #include "runtime/os.hpp"
  50 #include "runtime/safepoint.hpp"
  51 #include "runtime/safepointMechanism.hpp"
  52 #include "runtime/sharedRuntime.hpp"
  53 #include "runtime/stubRoutines.hpp"
  54 #include "utilities/events.hpp"
  55 #include "utilities/macros.hpp"
  56 
  57 #include <ucontext.h>
  58 
  59 #define BLOCK_COMMENT(str) block_comment(str)
  60 #define BIND(label)        bind(label); BLOCK_COMMENT(#label ":")
  61 
  62 // Move 32-bit register if destination and source are different.
  63 void MacroAssembler::lr_if_needed(Register rd, Register rs) {
  64   if (rs != rd) { z_lr(rd, rs); }
  65 }
  66 
  67 // Move register if destination and source are different.
  68 void MacroAssembler::lgr_if_needed(Register rd, Register rs) {
  69   if (rs != rd) { z_lgr(rd, rs); }
  70 }
  71 
  72 // Zero-extend 32-bit register into 64-bit register if destination and source are different.
  73 void MacroAssembler::llgfr_if_needed(Register rd, Register rs) {
  74   if (rs != rd) { z_llgfr(rd, rs); }
  75 }
  76 
  77 // Move float register if destination and source are different.
  78 void MacroAssembler::ldr_if_needed(FloatRegister rd, FloatRegister rs) {
  79   if (rs != rd) { z_ldr(rd, rs); }
  80 }
  81 
  82 // Move integer register if destination and source are different.
  83 // It is assumed that shorter-than-int types are already
  84 // appropriately sign-extended.
  85 void MacroAssembler::move_reg_if_needed(Register dst, BasicType dst_type, Register src,
  86                                         BasicType src_type) {
  87   assert((dst_type != T_FLOAT) && (dst_type != T_DOUBLE), "use move_freg for float types");
  88   assert((src_type != T_FLOAT) && (src_type != T_DOUBLE), "use move_freg for float types");
  89 
  90   if (dst_type == src_type) {
  91     lgr_if_needed(dst, src); // Just move all 64 bits.
  92     return;
  93   }
  94 
  95   switch (dst_type) {
  96     // Do not support these types for now.
  97     //  case T_BOOLEAN:
  98     case T_BYTE:  // signed byte
  99       switch (src_type) {
 100         case T_INT:
 101           z_lgbr(dst, src);
 102           break;
 103         default:
 104           ShouldNotReachHere();
 105       }
 106       return;
 107 
 108     case T_CHAR:
 109     case T_SHORT:
 110       switch (src_type) {
 111         case T_INT:
 112           if (dst_type == T_CHAR) {
 113             z_llghr(dst, src);
 114           } else {
 115             z_lghr(dst, src);
 116           }
 117           break;
 118         default:
 119           ShouldNotReachHere();
 120       }
 121       return;
 122 
 123     case T_INT:
 124       switch (src_type) {
 125         case T_BOOLEAN:
 126         case T_BYTE:
 127         case T_CHAR:
 128         case T_SHORT:
 129         case T_INT:
 130         case T_LONG:
 131         case T_OBJECT:
 132         case T_ARRAY:
 133         case T_VOID:
 134         case T_ADDRESS:
 135           lr_if_needed(dst, src);
 136           // llgfr_if_needed(dst, src);  // zero-extend (in case we need to find a bug).
 137           return;
 138 
 139         default:
 140           assert(false, "non-integer src type");
 141           return;
 142       }
 143     case T_LONG:
 144       switch (src_type) {
 145         case T_BOOLEAN:
 146         case T_BYTE:
 147         case T_CHAR:
 148         case T_SHORT:
 149         case T_INT:
 150           z_lgfr(dst, src); // sign extension
 151           return;
 152 
 153         case T_LONG:
 154         case T_OBJECT:
 155         case T_ARRAY:
 156         case T_VOID:
 157         case T_ADDRESS:
 158           lgr_if_needed(dst, src);
 159           return;
 160 
 161         default:
 162           assert(false, "non-integer src type");
 163           return;
 164       }
 165       return;
 166     case T_OBJECT:
 167     case T_ARRAY:
 168     case T_VOID:
 169     case T_ADDRESS:
 170       switch (src_type) {
 171         // These types don't make sense to be converted to pointers:
 172         //      case T_BOOLEAN:
 173         //      case T_BYTE:
 174         //      case T_CHAR:
 175         //      case T_SHORT:
 176 
 177         case T_INT:
 178           z_llgfr(dst, src); // zero extension
 179           return;
 180 
 181         case T_LONG:
 182         case T_OBJECT:
 183         case T_ARRAY:
 184         case T_VOID:
 185         case T_ADDRESS:
 186           lgr_if_needed(dst, src);
 187           return;
 188 
 189         default:
 190           assert(false, "non-integer src type");
 191           return;
 192       }
 193       return;
 194     default:
 195       assert(false, "non-integer dst type");
 196       return;
 197   }
 198 }
 199 
 200 // Move float register if destination and source are different.
 201 void MacroAssembler::move_freg_if_needed(FloatRegister dst, BasicType dst_type,
 202                                          FloatRegister src, BasicType src_type) {
 203   assert((dst_type == T_FLOAT) || (dst_type == T_DOUBLE), "use move_reg for int types");
 204   assert((src_type == T_FLOAT) || (src_type == T_DOUBLE), "use move_reg for int types");
 205   if (dst_type == src_type) {
 206     ldr_if_needed(dst, src); // Just move all 64 bits.
 207   } else {
 208     switch (dst_type) {
 209       case T_FLOAT:
 210         assert(src_type == T_DOUBLE, "invalid float type combination");
 211         z_ledbr(dst, src);
 212         return;
 213       case T_DOUBLE:
 214         assert(src_type == T_FLOAT, "invalid float type combination");
 215         z_ldebr(dst, src);
 216         return;
 217       default:
 218         assert(false, "non-float dst type");
 219         return;
 220     }
 221   }
 222 }
 223 
 224 // Optimized emitter for reg to mem operations.
 225 // Uses modern instructions if running on modern hardware, classic instructions
 226 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 227 // Data register (reg) cannot be used as work register.
 228 //
 229 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default).
 230 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs!
 231 void MacroAssembler::freg2mem_opt(FloatRegister reg,
 232                                   int64_t       disp,
 233                                   Register      index,
 234                                   Register      base,
 235                                   void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
 236                                   void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
 237                                   Register      scratch) {
 238   index = (index == noreg) ? Z_R0 : index;
 239   if (Displacement::is_shortDisp(disp)) {
 240     (this->*classic)(reg, disp, index, base);
 241   } else {
 242     if (Displacement::is_validDisp(disp)) {
 243       (this->*modern)(reg, disp, index, base);
 244     } else {
 245       if (scratch != Z_R0 && scratch != Z_R1) {
 246         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 247       } else {
 248         if (scratch != Z_R0) {   // scratch == Z_R1
 249           if ((scratch == index) || (index == base)) {
 250             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 251           } else {
 252             add2reg(scratch, disp, base);
 253             (this->*classic)(reg, 0, index, scratch);
 254             if (base == scratch) {
 255               add2reg(base, -disp);  // Restore base.
 256             }
 257           }
 258         } else {   // scratch == Z_R0
 259           z_lgr(scratch, base);
 260           add2reg(base, disp);
 261           (this->*classic)(reg, 0, index, base);
 262           z_lgr(base, scratch);      // Restore base.
 263         }
 264       }
 265     }
 266   }
 267 }
 268 
 269 void MacroAssembler::freg2mem_opt(FloatRegister reg, const Address &a, bool is_double) {
 270   if (is_double) {
 271     freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stdy), CLASSIC_FFUN(z_std));
 272   } else {
 273     freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stey), CLASSIC_FFUN(z_ste));
 274   }
 275 }
 276 
 277 // Optimized emitter for mem to reg operations.
 278 // Uses modern instructions if running on modern hardware, classic instructions
 279 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 280 // data register (reg) cannot be used as work register.
 281 //
 282 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default).
 283 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs!
 284 void MacroAssembler::mem2freg_opt(FloatRegister reg,
 285                                   int64_t       disp,
 286                                   Register      index,
 287                                   Register      base,
 288                                   void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
 289                                   void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
 290                                   Register      scratch) {
 291   index = (index == noreg) ? Z_R0 : index;
 292   if (Displacement::is_shortDisp(disp)) {
 293     (this->*classic)(reg, disp, index, base);
 294   } else {
 295     if (Displacement::is_validDisp(disp)) {
 296       (this->*modern)(reg, disp, index, base);
 297     } else {
 298       if (scratch != Z_R0 && scratch != Z_R1) {
 299         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 300       } else {
 301         if (scratch != Z_R0) {   // scratch == Z_R1
 302           if ((scratch == index) || (index == base)) {
 303             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 304           } else {
 305             add2reg(scratch, disp, base);
 306             (this->*classic)(reg, 0, index, scratch);
 307             if (base == scratch) {
 308               add2reg(base, -disp);  // Restore base.
 309             }
 310           }
 311         } else {   // scratch == Z_R0
 312           z_lgr(scratch, base);
 313           add2reg(base, disp);
 314           (this->*classic)(reg, 0, index, base);
 315           z_lgr(base, scratch);      // Restore base.
 316         }
 317       }
 318     }
 319   }
 320 }
 321 
 322 void MacroAssembler::mem2freg_opt(FloatRegister reg, const Address &a, bool is_double) {
 323   if (is_double) {
 324     mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ldy), CLASSIC_FFUN(z_ld));
 325   } else {
 326     mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ley), CLASSIC_FFUN(z_le));
 327   }
 328 }
 329 
 330 // Optimized emitter for reg to mem operations.
 331 // Uses modern instructions if running on modern hardware, classic instructions
 332 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 333 // Data register (reg) cannot be used as work register.
 334 //
 335 // Don't rely on register locking, instead pass a scratch register
 336 // (Z_R0 by default)
 337 // CAUTION! passing registers >= Z_R2 may produce bad results on old CPUs!
 338 void MacroAssembler::reg2mem_opt(Register reg,
 339                                  int64_t  disp,
 340                                  Register index,
 341                                  Register base,
 342                                  void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
 343                                  void (MacroAssembler::*classic)(Register, int64_t, Register, Register),
 344                                  Register scratch) {
 345   index = (index == noreg) ? Z_R0 : index;
 346   if (Displacement::is_shortDisp(disp)) {
 347     (this->*classic)(reg, disp, index, base);
 348   } else {
 349     if (Displacement::is_validDisp(disp)) {
 350       (this->*modern)(reg, disp, index, base);
 351     } else {
 352       if (scratch != Z_R0 && scratch != Z_R1) {
 353         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 354       } else {
 355         if (scratch != Z_R0) {   // scratch == Z_R1
 356           if ((scratch == index) || (index == base)) {
 357             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 358           } else {
 359             add2reg(scratch, disp, base);
 360             (this->*classic)(reg, 0, index, scratch);
 361             if (base == scratch) {
 362               add2reg(base, -disp);  // Restore base.
 363             }
 364           }
 365         } else {   // scratch == Z_R0
 366           if ((scratch == reg) || (scratch == base) || (reg == base)) {
 367             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 368           } else {
 369             z_lgr(scratch, base);
 370             add2reg(base, disp);
 371             (this->*classic)(reg, 0, index, base);
 372             z_lgr(base, scratch);    // Restore base.
 373           }
 374         }
 375       }
 376     }
 377   }
 378 }
 379 
 380 int MacroAssembler::reg2mem_opt(Register reg, const Address &a, bool is_double) {
 381   int store_offset = offset();
 382   if (is_double) {
 383     reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_stg), CLASSIC_IFUN(z_stg));
 384   } else {
 385     reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_sty), CLASSIC_IFUN(z_st));
 386   }
 387   return store_offset;
 388 }
 389 
 390 // Optimized emitter for mem to reg operations.
 391 // Uses modern instructions if running on modern hardware, classic instructions
 392 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 393 // Data register (reg) will be used as work register where possible.
 394 void MacroAssembler::mem2reg_opt(Register reg,
 395                                  int64_t  disp,
 396                                  Register index,
 397                                  Register base,
 398                                  void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
 399                                  void (MacroAssembler::*classic)(Register, int64_t, Register, Register)) {
 400   index = (index == noreg) ? Z_R0 : index;
 401   if (Displacement::is_shortDisp(disp)) {
 402     (this->*classic)(reg, disp, index, base);
 403   } else {
 404     if (Displacement::is_validDisp(disp)) {
 405       (this->*modern)(reg, disp, index, base);
 406     } else {
 407       if ((reg == index) && (reg == base)) {
 408         z_sllg(reg, reg, 1);
 409         add2reg(reg, disp);
 410         (this->*classic)(reg, 0, noreg, reg);
 411       } else if ((reg == index) && (reg != Z_R0)) {
 412         add2reg(reg, disp);
 413         (this->*classic)(reg, 0, reg, base);
 414       } else if (reg == base) {
 415         add2reg(reg, disp);
 416         (this->*classic)(reg, 0, index, reg);
 417       } else if (reg != Z_R0) {
 418         add2reg(reg, disp, base);
 419         (this->*classic)(reg, 0, index, reg);
 420       } else { // reg == Z_R0 && reg != base here
 421         add2reg(base, disp);
 422         (this->*classic)(reg, 0, index, base);
 423         add2reg(base, -disp);
 424       }
 425     }
 426   }
 427 }
 428 
 429 void MacroAssembler::mem2reg_opt(Register reg, const Address &a, bool is_double) {
 430   if (is_double) {
 431     z_lg(reg, a);
 432   } else {
 433     mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_ly), CLASSIC_IFUN(z_l));
 434   }
 435 }
 436 
 437 void MacroAssembler::mem2reg_signed_opt(Register reg, const Address &a) {
 438   mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_lgf), CLASSIC_IFUN(z_lgf));
 439 }
 440 
 441 void MacroAssembler::and_imm(Register r, long mask,
 442                              Register tmp /* = Z_R0 */,
 443                              bool wide    /* = false */) {
 444   assert(wide || Immediate::is_simm32(mask), "mask value too large");
 445 
 446   if (!wide) {
 447     z_nilf(r, mask);
 448     return;
 449   }
 450 
 451   assert(r != tmp, " need a different temporary register !");
 452   load_const_optimized(tmp, mask);
 453   z_ngr(r, tmp);
 454 }
 455 
 456 // Calculate the 1's complement.
 457 // Note: The condition code is neither preserved nor correctly set by this code!!!
 458 // Note: (wide == false) does not protect the high order half of the target register
 459 //       from alteration. It only serves as optimization hint for 32-bit results.
 460 void MacroAssembler::not_(Register r1, Register r2, bool wide) {
 461 
 462   if ((r2 == noreg) || (r2 == r1)) { // Calc 1's complement in place.
 463     z_xilf(r1, -1);
 464     if (wide) {
 465       z_xihf(r1, -1);
 466     }
 467   } else { // Distinct src and dst registers.
 468     if (VM_Version::has_DistinctOpnds()) {
 469       load_const_optimized(r1, -1);
 470       z_xgrk(r1, r2, r1);
 471     } else {
 472       if (wide) {
 473         z_lgr(r1, r2);
 474         z_xilf(r1, -1);
 475         z_xihf(r1, -1);
 476       } else {
 477         z_lr(r1, r2);
 478         z_xilf(r1, -1);
 479       }
 480     }
 481   }
 482 }
 483 
 484 unsigned long MacroAssembler::create_mask(int lBitPos, int rBitPos) {
 485   assert(lBitPos >=  0,      "zero is  leftmost bit position");
 486   assert(rBitPos <= 63,      "63   is rightmost bit position");
 487   assert(lBitPos <= rBitPos, "inverted selection interval");
 488   return (lBitPos == 0 ? (unsigned long)(-1L) : ((1UL<<(63-lBitPos+1))-1)) & (~((1UL<<(63-rBitPos))-1));
 489 }
 490 
 491 // Helper function for the "Rotate_then_<logicalOP>" emitters.
 492 // Rotate src, then mask register contents such that only bits in range survive.
 493 // For oneBits == false, all bits not in range are set to 0. Useful for deleting all bits outside range.
 494 // For oneBits == true,  all bits not in range are set to 1. Useful for preserving all bits outside range.
 495 // The caller must ensure that the selected range only contains bits with defined value.
 496 void MacroAssembler::rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos,
 497                                       int nRotate, bool src32bit, bool dst32bit, bool oneBits) {
 498   assert(!(dst32bit && lBitPos < 32), "selection interval out of range for int destination");
 499   bool sll4rll = (nRotate >= 0) && (nRotate <= (63-rBitPos)); // Substitute SLL(G) for RLL(G).
 500   bool srl4rll = (nRotate <  0) && (-nRotate <= lBitPos);     // Substitute SRL(G) for RLL(G).
 501   //  Pre-determine which parts of dst will be zero after shift/rotate.
 502   bool llZero  =  sll4rll && (nRotate >= 16);
 503   bool lhZero  = (sll4rll && (nRotate >= 32)) || (srl4rll && (nRotate <= -48));
 504   bool lfZero  = llZero && lhZero;
 505   bool hlZero  = (sll4rll && (nRotate >= 48)) || (srl4rll && (nRotate <= -32));
 506   bool hhZero  =                                 (srl4rll && (nRotate <= -16));
 507   bool hfZero  = hlZero && hhZero;
 508 
 509   // rotate then mask src operand.
 510   // if oneBits == true,  all bits outside selected range are 1s.
 511   // if oneBits == false, all bits outside selected range are 0s.
 512   if (src32bit) {   // There might be garbage in the upper 32 bits which will get masked away.
 513     if (dst32bit) {
 514       z_rll(dst, src, nRotate);   // Copy and rotate, upper half of reg remains undisturbed.
 515     } else {
 516       if      (sll4rll) { z_sllg(dst, src,  nRotate); }
 517       else if (srl4rll) { z_srlg(dst, src, -nRotate); }
 518       else              { z_rllg(dst, src,  nRotate); }
 519     }
 520   } else {
 521     if      (sll4rll) { z_sllg(dst, src,  nRotate); }
 522     else if (srl4rll) { z_srlg(dst, src, -nRotate); }
 523     else              { z_rllg(dst, src,  nRotate); }
 524   }
 525 
 526   unsigned long  range_mask    = create_mask(lBitPos, rBitPos);
 527   unsigned int   range_mask_h  = (unsigned int)(range_mask >> 32);
 528   unsigned int   range_mask_l  = (unsigned int)range_mask;
 529   unsigned short range_mask_hh = (unsigned short)(range_mask >> 48);
 530   unsigned short range_mask_hl = (unsigned short)(range_mask >> 32);
 531   unsigned short range_mask_lh = (unsigned short)(range_mask >> 16);
 532   unsigned short range_mask_ll = (unsigned short)range_mask;
 533   // Works for z9 and newer H/W.
 534   if (oneBits) {
 535     if ((~range_mask_l) != 0)                { z_oilf(dst, ~range_mask_l); } // All bits outside range become 1s.
 536     if (((~range_mask_h) != 0) && !dst32bit) { z_oihf(dst, ~range_mask_h); }
 537   } else {
 538     // All bits outside range become 0s
 539     if (((~range_mask_l) != 0) &&              !lfZero) {
 540       z_nilf(dst, range_mask_l);
 541     }
 542     if (((~range_mask_h) != 0) && !dst32bit && !hfZero) {
 543       z_nihf(dst, range_mask_h);
 544     }
 545   }
 546 }
 547 
 548 // Rotate src, then insert selected range from rotated src into dst.
 549 // Clear dst before, if requested.
 550 void MacroAssembler::rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos,
 551                                         int nRotate, bool clear_dst) {
 552   // This version does not depend on src being zero-extended int2long.
 553   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 554   z_risbg(dst, src, lBitPos, rBitPos, nRotate, clear_dst); // Rotate, then insert selected, clear the rest.
 555 }
 556 
 557 // Rotate src, then and selected range from rotated src into dst.
 558 // Set condition code only if so requested. Otherwise it is unpredictable.
 559 // See performance note in macroAssembler_s390.hpp for important information.
 560 void MacroAssembler::rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos,
 561                                      int nRotate, bool test_only) {
 562   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 563   // This version does not depend on src being zero-extended int2long.
 564   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 565   z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 566 }
 567 
 568 // Rotate src, then or selected range from rotated src into dst.
 569 // Set condition code only if so requested. Otherwise it is unpredictable.
 570 // See performance note in macroAssembler_s390.hpp for important information.
 571 void MacroAssembler::rotate_then_or(Register dst, Register src,  int  lBitPos,  int  rBitPos,
 572                                     int nRotate, bool test_only) {
 573   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 574   // This version does not depend on src being zero-extended int2long.
 575   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 576   z_rosbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 577 }
 578 
 579 // Rotate src, then xor selected range from rotated src into dst.
 580 // Set condition code only if so requested. Otherwise it is unpredictable.
 581 // See performance note in macroAssembler_s390.hpp for important information.
 582 void MacroAssembler::rotate_then_xor(Register dst, Register src,  int  lBitPos,  int  rBitPos,
 583                                      int nRotate, bool test_only) {
 584   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 585     // This version does not depend on src being zero-extended int2long.
 586   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 587   z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 588 }
 589 
 590 void MacroAssembler::add64(Register r1, RegisterOrConstant inc) {
 591   if (inc.is_register()) {
 592     z_agr(r1, inc.as_register());
 593   } else { // constant
 594     intptr_t imm = inc.as_constant();
 595     add2reg(r1, imm);
 596   }
 597 }
 598 // Helper function to multiply the 64bit contents of a register by a 16bit constant.
 599 // The optimization tries to avoid the mghi instruction, since it uses the FPU for
 600 // calculation and is thus rather slow.
 601 //
 602 // There is no handling for special cases, e.g. cval==0 or cval==1.
 603 //
 604 // Returns len of generated code block.
 605 unsigned int MacroAssembler::mul_reg64_const16(Register rval, Register work, int cval) {
 606   int block_start = offset();
 607 
 608   bool sign_flip = cval < 0;
 609   cval = sign_flip ? -cval : cval;
 610 
 611   BLOCK_COMMENT("Reg64*Con16 {");
 612 
 613   int bit1 = cval & -cval;
 614   if (bit1 == cval) {
 615     z_sllg(rval, rval, exact_log2(bit1));
 616     if (sign_flip) { z_lcgr(rval, rval); }
 617   } else {
 618     int bit2 = (cval-bit1) & -(cval-bit1);
 619     if ((bit1+bit2) == cval) {
 620       z_sllg(work, rval, exact_log2(bit1));
 621       z_sllg(rval, rval, exact_log2(bit2));
 622       z_agr(rval, work);
 623       if (sign_flip) { z_lcgr(rval, rval); }
 624     } else {
 625       if (sign_flip) { z_mghi(rval, -cval); }
 626       else           { z_mghi(rval,  cval); }
 627     }
 628   }
 629   BLOCK_COMMENT("} Reg64*Con16");
 630 
 631   int block_end = offset();
 632   return block_end - block_start;
 633 }
 634 
 635 // Generic operation r1 := r2 + imm.
 636 //
 637 // Should produce the best code for each supported CPU version.
 638 // r2 == noreg yields r1 := r1 + imm
 639 // imm == 0 emits either no instruction or r1 := r2 !
 640 // NOTES: 1) Don't use this function where fixed sized
 641 //           instruction sequences are required!!!
 642 //        2) Don't use this function if condition code
 643 //           setting is required!
 644 //        3) Despite being declared as int64_t, the parameter imm
 645 //           must be a simm_32 value (= signed 32-bit integer).
 646 void MacroAssembler::add2reg(Register r1, int64_t imm, Register r2) {
 647   assert(Immediate::is_simm32(imm), "probably an implicit conversion went wrong");
 648 
 649   if (r2 == noreg) { r2 = r1; }
 650 
 651   // Handle special case imm == 0.
 652   if (imm == 0) {
 653     lgr_if_needed(r1, r2);
 654     // Nothing else to do.
 655     return;
 656   }
 657 
 658   if (!PreferLAoverADD || (r2 == Z_R0)) {
 659     bool distinctOpnds = VM_Version::has_DistinctOpnds();
 660 
 661     // Can we encode imm in 16 bits signed?
 662     if (Immediate::is_simm16(imm)) {
 663       if (r1 == r2) {
 664         z_aghi(r1, imm);
 665         return;
 666       }
 667       if (distinctOpnds) {
 668         z_aghik(r1, r2, imm);
 669         return;
 670       }
 671       z_lgr(r1, r2);
 672       z_aghi(r1, imm);
 673       return;
 674     }
 675   } else {
 676     // Can we encode imm in 12 bits unsigned?
 677     if (Displacement::is_shortDisp(imm)) {
 678       z_la(r1, imm, r2);
 679       return;
 680     }
 681     // Can we encode imm in 20 bits signed?
 682     if (Displacement::is_validDisp(imm)) {
 683       // Always use LAY instruction, so we don't need the tmp register.
 684       z_lay(r1, imm, r2);
 685       return;
 686     }
 687 
 688   }
 689 
 690   // Can handle it (all possible values) with long immediates.
 691   lgr_if_needed(r1, r2);
 692   z_agfi(r1, imm);
 693 }
 694 
 695 // Generic operation r := b + x + d
 696 //
 697 // Addition of several operands with address generation semantics - sort of:
 698 //  - no restriction on the registers. Any register will do for any operand.
 699 //  - x == noreg: operand will be disregarded.
 700 //  - b == noreg: will use (contents of) result reg as operand (r := r + d).
 701 //  - x == Z_R0:  just disregard
 702 //  - b == Z_R0:  use as operand. This is not address generation semantics!!!
 703 //
 704 // The same restrictions as on add2reg() are valid!!!
 705 void MacroAssembler::add2reg_with_index(Register r, int64_t d, Register x, Register b) {
 706   assert(Immediate::is_simm32(d), "probably an implicit conversion went wrong");
 707 
 708   if (x == noreg) { x = Z_R0; }
 709   if (b == noreg) { b = r; }
 710 
 711   // Handle special case x == R0.
 712   if (x == Z_R0) {
 713     // Can simply add the immediate value to the base register.
 714     add2reg(r, d, b);
 715     return;
 716   }
 717 
 718   if (!PreferLAoverADD || (b == Z_R0)) {
 719     bool distinctOpnds = VM_Version::has_DistinctOpnds();
 720     // Handle special case d == 0.
 721     if (d == 0) {
 722       if (b == x)        { z_sllg(r, b, 1); return; }
 723       if (r == x)        { z_agr(r, b);     return; }
 724       if (r == b)        { z_agr(r, x);     return; }
 725       if (distinctOpnds) { z_agrk(r, x, b); return; }
 726       z_lgr(r, b);
 727       z_agr(r, x);
 728     } else {
 729       if (x == b)             { z_sllg(r, x, 1); }
 730       else if (r == x)        { z_agr(r, b); }
 731       else if (r == b)        { z_agr(r, x); }
 732       else if (distinctOpnds) { z_agrk(r, x, b); }
 733       else {
 734         z_lgr(r, b);
 735         z_agr(r, x);
 736       }
 737       add2reg(r, d);
 738     }
 739   } else {
 740     // Can we encode imm in 12 bits unsigned?
 741     if (Displacement::is_shortDisp(d)) {
 742       z_la(r, d, x, b);
 743       return;
 744     }
 745     // Can we encode imm in 20 bits signed?
 746     if (Displacement::is_validDisp(d)) {
 747       z_lay(r, d, x, b);
 748       return;
 749     }
 750     z_la(r, 0, x, b);
 751     add2reg(r, d);
 752   }
 753 }
 754 
 755 // Generic emitter (32bit) for direct memory increment.
 756 // For optimal code, do not specify Z_R0 as temp register.
 757 void MacroAssembler::add2mem_32(const Address &a, int64_t imm, Register tmp) {
 758   if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) {
 759     z_asi(a, imm);
 760   } else {
 761     z_lgf(tmp, a);
 762     add2reg(tmp, imm);
 763     z_st(tmp, a);
 764   }
 765 }
 766 
 767 void MacroAssembler::add2mem_64(const Address &a, int64_t imm, Register tmp) {
 768   if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) {
 769     z_agsi(a, imm);
 770   } else {
 771     z_lg(tmp, a);
 772     add2reg(tmp, imm);
 773     z_stg(tmp, a);
 774   }
 775 }
 776 
 777 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) {
 778   switch (size_in_bytes) {
 779     case  8: z_lg(dst, src); break;
 780     case  4: is_signed ? z_lgf(dst, src) : z_llgf(dst, src); break;
 781     case  2: is_signed ? z_lgh(dst, src) : z_llgh(dst, src); break;
 782     case  1: is_signed ? z_lgb(dst, src) : z_llgc(dst, src); break;
 783     default: ShouldNotReachHere();
 784   }
 785 }
 786 
 787 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) {
 788   switch (size_in_bytes) {
 789     case  8: z_stg(src, dst); break;
 790     case  4: z_st(src, dst); break;
 791     case  2: z_sth(src, dst); break;
 792     case  1: z_stc(src, dst); break;
 793     default: ShouldNotReachHere();
 794   }
 795 }
 796 
 797 // Split a si20 offset (20bit, signed) into an ui12 offset (12bit, unsigned) and
 798 // a high-order summand in register tmp.
 799 //
 800 // return value: <  0: No split required, si20 actually has property uimm12.
 801 //               >= 0: Split performed. Use return value as uimm12 displacement and
 802 //                     tmp as index register.
 803 int MacroAssembler::split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate) {
 804   assert(Immediate::is_simm20(si20_offset), "sanity");
 805   int lg_off = (int)si20_offset &  0x0fff; // Punch out low-order 12 bits, always positive.
 806   int ll_off = (int)si20_offset & ~0x0fff; // Force low-order 12 bits to zero.
 807   assert((Displacement::is_shortDisp(si20_offset) && (ll_off == 0)) ||
 808          !Displacement::is_shortDisp(si20_offset), "unexpected offset values");
 809   assert((lg_off+ll_off) == si20_offset, "offset splitup error");
 810 
 811   Register work = accumulate? Z_R0 : tmp;
 812 
 813   if (fixed_codelen) {          // Len of code = 10 = 4 + 6.
 814     z_lghi(work, ll_off>>12);   // Implicit sign extension.
 815     z_slag(work, work, 12);
 816   } else {                      // Len of code = 0..10.
 817     if (ll_off == 0) { return -1; }
 818     // ll_off has 8 significant bits (at most) plus sign.
 819     if ((ll_off & 0x0000f000) == 0) {    // Non-zero bits only in upper halfbyte.
 820       z_llilh(work, ll_off >> 16);
 821       if (ll_off < 0) {                  // Sign-extension required.
 822         z_lgfr(work, work);
 823       }
 824     } else {
 825       if ((ll_off & 0x000f0000) == 0) {  // Non-zero bits only in lower halfbyte.
 826         z_llill(work, ll_off);
 827       } else {                           // Non-zero bits in both halfbytes.
 828         z_lghi(work, ll_off>>12);        // Implicit sign extension.
 829         z_slag(work, work, 12);
 830       }
 831     }
 832   }
 833   if (accumulate) { z_algr(tmp, work); } // len of code += 4
 834   return lg_off;
 835 }
 836 
 837 void MacroAssembler::load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) {
 838   if (Displacement::is_validDisp(si20)) {
 839     z_ley(t, si20, a);
 840   } else {
 841     // Fixed_codelen = true is a simple way to ensure that the size of load_float_largeoffset
 842     // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant
 843     // pool loads).
 844     bool accumulate    = true;
 845     bool fixed_codelen = true;
 846     Register work;
 847 
 848     if (fixed_codelen) {
 849       z_lgr(tmp, a);  // Lgr_if_needed not applicable due to fixed_codelen.
 850     } else {
 851       accumulate = (a == tmp);
 852     }
 853     work = tmp;
 854 
 855     int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate);
 856     if (disp12 < 0) {
 857       z_le(t, si20, work);
 858     } else {
 859       if (accumulate) {
 860         z_le(t, disp12, work);
 861       } else {
 862         z_le(t, disp12, work, a);
 863       }
 864     }
 865   }
 866 }
 867 
 868 void MacroAssembler::load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) {
 869   if (Displacement::is_validDisp(si20)) {
 870     z_ldy(t, si20, a);
 871   } else {
 872     // Fixed_codelen = true is a simple way to ensure that the size of load_double_largeoffset
 873     // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant
 874     // pool loads).
 875     bool accumulate    = true;
 876     bool fixed_codelen = true;
 877     Register work;
 878 
 879     if (fixed_codelen) {
 880       z_lgr(tmp, a);  // Lgr_if_needed not applicable due to fixed_codelen.
 881     } else {
 882       accumulate = (a == tmp);
 883     }
 884     work = tmp;
 885 
 886     int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate);
 887     if (disp12 < 0) {
 888       z_ld(t, si20, work);
 889     } else {
 890       if (accumulate) {
 891         z_ld(t, disp12, work);
 892       } else {
 893         z_ld(t, disp12, work, a);
 894       }
 895     }
 896   }
 897 }
 898 
 899 // PCrelative TOC access.
 900 // Returns distance (in bytes) from current position to start of consts section.
 901 // Returns 0 (zero) if no consts section exists or if it has size zero.
 902 long MacroAssembler::toc_distance() {
 903   CodeSection* cs = code()->consts();
 904   return (long)((cs != NULL) ? cs->start()-pc() : 0);
 905 }
 906 
 907 // Implementation on x86/sparc assumes that constant and instruction section are
 908 // adjacent, but this doesn't hold. Two special situations may occur, that we must
 909 // be able to handle:
 910 //   1. const section may be located apart from the inst section.
 911 //   2. const section may be empty
 912 // In both cases, we use the const section's start address to compute the "TOC",
 913 // this seems to occur only temporarily; in the final step we always seem to end up
 914 // with the pc-relatice variant.
 915 //
 916 // PC-relative offset could be +/-2**32 -> use long for disp
 917 // Furthermore: makes no sense to have special code for
 918 // adjacent const and inst sections.
 919 void MacroAssembler::load_toc(Register Rtoc) {
 920   // Simply use distance from start of const section (should be patched in the end).
 921   long disp = toc_distance();
 922 
 923   RelocationHolder rspec = internal_word_Relocation::spec(pc() + disp);
 924   relocate(rspec);
 925   z_larl(Rtoc, RelAddr::pcrel_off32(disp));  // Offset is in halfwords.
 926 }
 927 
 928 // PCrelative TOC access.
 929 // Load from anywhere pcrelative (with relocation of load instr)
 930 void MacroAssembler::load_long_pcrelative(Register Rdst, address dataLocation) {
 931   address          pc             = this->pc();
 932   ptrdiff_t        total_distance = dataLocation - pc;
 933   RelocationHolder rspec          = internal_word_Relocation::spec(dataLocation);
 934 
 935   assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory");
 936   assert(total_distance != 0, "sanity");
 937 
 938   // Some extra safety net.
 939   if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
 940     guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance);
 941   }
 942 
 943   (this)->relocate(rspec, relocInfo::pcrel_addr_format);
 944   z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance));
 945 }
 946 
 947 
 948 // PCrelative TOC access.
 949 // Load from anywhere pcrelative (with relocation of load instr)
 950 // loaded addr has to be relocated when added to constant pool.
 951 void MacroAssembler::load_addr_pcrelative(Register Rdst, address addrLocation) {
 952   address          pc             = this->pc();
 953   ptrdiff_t        total_distance = addrLocation - pc;
 954   RelocationHolder rspec          = internal_word_Relocation::spec(addrLocation);
 955 
 956   assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory");
 957 
 958   // Some extra safety net.
 959   if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
 960     guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance);
 961   }
 962 
 963   (this)->relocate(rspec, relocInfo::pcrel_addr_format);
 964   z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance));
 965 }
 966 
 967 // Generic operation: load a value from memory and test.
 968 // CondCode indicates the sign (<0, ==0, >0) of the loaded value.
 969 void MacroAssembler::load_and_test_byte(Register dst, const Address &a) {
 970   z_lb(dst, a);
 971   z_ltr(dst, dst);
 972 }
 973 
 974 void MacroAssembler::load_and_test_short(Register dst, const Address &a) {
 975   int64_t disp = a.disp20();
 976   if (Displacement::is_shortDisp(disp)) {
 977     z_lh(dst, a);
 978   } else if (Displacement::is_longDisp(disp)) {
 979     z_lhy(dst, a);
 980   } else {
 981     guarantee(false, "displacement out of range");
 982   }
 983   z_ltr(dst, dst);
 984 }
 985 
 986 void MacroAssembler::load_and_test_int(Register dst, const Address &a) {
 987   z_lt(dst, a);
 988 }
 989 
 990 void MacroAssembler::load_and_test_int2long(Register dst, const Address &a) {
 991   z_ltgf(dst, a);
 992 }
 993 
 994 void MacroAssembler::load_and_test_long(Register dst, const Address &a) {
 995   z_ltg(dst, a);
 996 }
 997 
 998 // Test a bit in memory.
 999 void MacroAssembler::testbit(const Address &a, unsigned int bit) {
1000   assert(a.index() == noreg, "no index reg allowed in testbit");
1001   if (bit <= 7) {
1002     z_tm(a.disp() + 3, a.base(), 1 << bit);
1003   } else if (bit <= 15) {
1004     z_tm(a.disp() + 2, a.base(), 1 << (bit - 8));
1005   } else if (bit <= 23) {
1006     z_tm(a.disp() + 1, a.base(), 1 << (bit - 16));
1007   } else if (bit <= 31) {
1008     z_tm(a.disp() + 0, a.base(), 1 << (bit - 24));
1009   } else {
1010     ShouldNotReachHere();
1011   }
1012 }
1013 
1014 // Test a bit in a register. Result is reflected in CC.
1015 void MacroAssembler::testbit(Register r, unsigned int bitPos) {
1016   if (bitPos < 16) {
1017     z_tmll(r, 1U<<bitPos);
1018   } else if (bitPos < 32) {
1019     z_tmlh(r, 1U<<(bitPos-16));
1020   } else if (bitPos < 48) {
1021     z_tmhl(r, 1U<<(bitPos-32));
1022   } else if (bitPos < 64) {
1023     z_tmhh(r, 1U<<(bitPos-48));
1024   } else {
1025     ShouldNotReachHere();
1026   }
1027 }
1028 
1029 void MacroAssembler::prefetch_read(Address a) {
1030   z_pfd(1, a.disp20(), a.indexOrR0(), a.base());
1031 }
1032 void MacroAssembler::prefetch_update(Address a) {
1033   z_pfd(2, a.disp20(), a.indexOrR0(), a.base());
1034 }
1035 
1036 // Clear a register, i.e. load const zero into reg.
1037 // Return len (in bytes) of generated instruction(s).
1038 // whole_reg: Clear 64 bits if true, 32 bits otherwise.
1039 // set_cc:    Use instruction that sets the condition code, if true.
1040 int MacroAssembler::clear_reg(Register r, bool whole_reg, bool set_cc) {
1041   unsigned int start_off = offset();
1042   if (whole_reg) {
1043     set_cc ? z_xgr(r, r) : z_laz(r, 0, Z_R0);
1044   } else {  // Only 32bit register.
1045     set_cc ? z_xr(r, r) : z_lhi(r, 0);
1046   }
1047   return offset() - start_off;
1048 }
1049 
1050 #ifdef ASSERT
1051 int MacroAssembler::preset_reg(Register r, unsigned long pattern, int pattern_len) {
1052   switch (pattern_len) {
1053     case 1:
1054       pattern = (pattern & 0x000000ff)  | ((pattern & 0x000000ff)<<8);
1055     case 2:
1056       pattern = (pattern & 0x0000ffff)  | ((pattern & 0x0000ffff)<<16);
1057     case 4:
1058       pattern = (pattern & 0xffffffffL) | ((pattern & 0xffffffffL)<<32);
1059     case 8:
1060       return load_const_optimized_rtn_len(r, pattern, true);
1061       break;
1062     default:
1063       guarantee(false, "preset_reg: bad len");
1064   }
1065   return 0;
1066 }
1067 #endif
1068 
1069 // addr: Address descriptor of memory to clear index register will not be used !
1070 // size: Number of bytes to clear.
1071 //    !!! DO NOT USE THEM FOR ATOMIC MEMORY CLEARING !!!
1072 //    !!! Use store_const() instead                  !!!
1073 void MacroAssembler::clear_mem(const Address& addr, unsigned size) {
1074   guarantee(size <= 256, "MacroAssembler::clear_mem: size too large");
1075 
1076   if (size == 1) {
1077     z_mvi(addr, 0);
1078     return;
1079   }
1080 
1081   switch (size) {
1082     case 2: z_mvhhi(addr, 0);
1083       return;
1084     case 4: z_mvhi(addr, 0);
1085       return;
1086     case 8: z_mvghi(addr, 0);
1087       return;
1088     default: ; // Fallthru to xc.
1089   }
1090 
1091   z_xc(addr, size, addr);
1092 }
1093 
1094 void MacroAssembler::align(int modulus) {
1095   while (offset() % modulus != 0) z_nop();
1096 }
1097 
1098 // Special version for non-relocateable code if required alignment
1099 // is larger than CodeEntryAlignment.
1100 void MacroAssembler::align_address(int modulus) {
1101   while ((uintptr_t)pc() % modulus != 0) z_nop();
1102 }
1103 
1104 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
1105                                          Register temp_reg,
1106                                          int64_t extra_slot_offset) {
1107   // On Z, we can have index and disp in an Address. So don't call argument_offset,
1108   // which issues an unnecessary add instruction.
1109   int stackElementSize = Interpreter::stackElementSize;
1110   int64_t offset = extra_slot_offset * stackElementSize;
1111   const Register argbase = Z_esp;
1112   if (arg_slot.is_constant()) {
1113     offset += arg_slot.as_constant() * stackElementSize;
1114     return Address(argbase, offset);
1115   }
1116   // else
1117   assert(temp_reg != noreg, "must specify");
1118   assert(temp_reg != Z_ARG1, "base and index are conflicting");
1119   z_sllg(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize)); // tempreg = arg_slot << 3
1120   return Address(argbase, temp_reg, offset);
1121 }
1122 
1123 
1124 //===================================================================
1125 //===   START   C O N S T A N T S   I N   C O D E   S T R E A M   ===
1126 //===================================================================
1127 //===            P A T CH A B L E   C O N S T A N T S             ===
1128 //===================================================================
1129 
1130 
1131 //---------------------------------------------------
1132 //  Load (patchable) constant into register
1133 //---------------------------------------------------
1134 
1135 
1136 // Load absolute address (and try to optimize).
1137 //   Note: This method is usable only for position-fixed code,
1138 //         referring to a position-fixed target location.
1139 //         If not so, relocations and patching must be used.
1140 void MacroAssembler::load_absolute_address(Register d, address addr) {
1141   assert(addr != NULL, "should not happen");
1142   BLOCK_COMMENT("load_absolute_address:");
1143   if (addr == NULL) {
1144     z_larl(d, pc()); // Dummy emit for size calc.
1145     return;
1146   }
1147 
1148   if (RelAddr::is_in_range_of_RelAddr32(addr, pc())) {
1149     z_larl(d, addr);
1150     return;
1151   }
1152 
1153   load_const_optimized(d, (long)addr);
1154 }
1155 
1156 // Load a 64bit constant.
1157 // Patchable code sequence, but not atomically patchable.
1158 // Make sure to keep code size constant -> no value-dependent optimizations.
1159 // Do not kill condition code.
1160 void MacroAssembler::load_const(Register t, long x) {
1161   Assembler::z_iihf(t, (int)(x >> 32));
1162   Assembler::z_iilf(t, (int)(x & 0xffffffff));
1163 }
1164 
1165 // Load a 32bit constant into a 64bit register, sign-extend or zero-extend.
1166 // Patchable code sequence, but not atomically patchable.
1167 // Make sure to keep code size constant -> no value-dependent optimizations.
1168 // Do not kill condition code.
1169 void MacroAssembler::load_const_32to64(Register t, int64_t x, bool sign_extend) {
1170   if (sign_extend) { Assembler::z_lgfi(t, x); }
1171   else             { Assembler::z_llilf(t, x); }
1172 }
1173 
1174 // Load narrow oop constant, no decompression.
1175 void MacroAssembler::load_narrow_oop(Register t, narrowOop a) {
1176   assert(UseCompressedOops, "must be on to call this method");
1177   load_const_32to64(t, a, false /*sign_extend*/);
1178 }
1179 
1180 // Load narrow klass constant, compression required.
1181 void MacroAssembler::load_narrow_klass(Register t, Klass* k) {
1182   assert(UseCompressedClassPointers, "must be on to call this method");
1183   narrowKlass encoded_k = Klass::encode_klass(k);
1184   load_const_32to64(t, encoded_k, false /*sign_extend*/);
1185 }
1186 
1187 //------------------------------------------------------
1188 //  Compare (patchable) constant with register.
1189 //------------------------------------------------------
1190 
1191 // Compare narrow oop in reg with narrow oop constant, no decompression.
1192 void MacroAssembler::compare_immediate_narrow_oop(Register oop1, narrowOop oop2) {
1193   assert(UseCompressedOops, "must be on to call this method");
1194 
1195   Assembler::z_clfi(oop1, oop2);
1196 }
1197 
1198 // Compare narrow oop in reg with narrow oop constant, no decompression.
1199 void MacroAssembler::compare_immediate_narrow_klass(Register klass1, Klass* klass2) {
1200   assert(UseCompressedClassPointers, "must be on to call this method");
1201   narrowKlass encoded_k = Klass::encode_klass(klass2);
1202 
1203   Assembler::z_clfi(klass1, encoded_k);
1204 }
1205 
1206 //----------------------------------------------------------
1207 //  Check which kind of load_constant we have here.
1208 //----------------------------------------------------------
1209 
1210 // Detection of CPU version dependent load_const sequence.
1211 // The detection is valid only for code sequences generated by load_const,
1212 // not load_const_optimized.
1213 bool MacroAssembler::is_load_const(address a) {
1214   unsigned long inst1, inst2;
1215   unsigned int  len1,  len2;
1216 
1217   len1 = get_instruction(a, &inst1);
1218   len2 = get_instruction(a + len1, &inst2);
1219 
1220   return is_z_iihf(inst1) && is_z_iilf(inst2);
1221 }
1222 
1223 // Detection of CPU version dependent load_const_32to64 sequence.
1224 // Mostly used for narrow oops and narrow Klass pointers.
1225 // The detection is valid only for code sequences generated by load_const_32to64.
1226 bool MacroAssembler::is_load_const_32to64(address pos) {
1227   unsigned long inst1, inst2;
1228   unsigned int len1;
1229 
1230   len1 = get_instruction(pos, &inst1);
1231   return is_z_llilf(inst1);
1232 }
1233 
1234 // Detection of compare_immediate_narrow sequence.
1235 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop.
1236 bool MacroAssembler::is_compare_immediate32(address pos) {
1237   return is_equal(pos, CLFI_ZOPC, RIL_MASK);
1238 }
1239 
1240 // Detection of compare_immediate_narrow sequence.
1241 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop.
1242 bool MacroAssembler::is_compare_immediate_narrow_oop(address pos) {
1243   return is_compare_immediate32(pos);
1244   }
1245 
1246 // Detection of compare_immediate_narrow sequence.
1247 // The detection is valid only for code sequences generated by compare_immediate_narrow_klass.
1248 bool MacroAssembler::is_compare_immediate_narrow_klass(address pos) {
1249   return is_compare_immediate32(pos);
1250 }
1251 
1252 //-----------------------------------
1253 //  patch the load_constant
1254 //-----------------------------------
1255 
1256 // CPU-version dependend patching of load_const.
1257 void MacroAssembler::patch_const(address a, long x) {
1258   assert(is_load_const(a), "not a load of a constant");
1259   set_imm32((address)a, (int) ((x >> 32) & 0xffffffff));
1260   set_imm32((address)(a + 6), (int)(x & 0xffffffff));
1261 }
1262 
1263 // Patching the value of CPU version dependent load_const_32to64 sequence.
1264 // The passed ptr MUST be in compressed format!
1265 int MacroAssembler::patch_load_const_32to64(address pos, int64_t np) {
1266   assert(is_load_const_32to64(pos), "not a load of a narrow ptr (oop or klass)");
1267 
1268   set_imm32(pos, np);
1269   return 6;
1270 }
1271 
1272 // Patching the value of CPU version dependent compare_immediate_narrow sequence.
1273 // The passed ptr MUST be in compressed format!
1274 int MacroAssembler::patch_compare_immediate_32(address pos, int64_t np) {
1275   assert(is_compare_immediate32(pos), "not a compressed ptr compare");
1276 
1277   set_imm32(pos, np);
1278   return 6;
1279 }
1280 
1281 // Patching the immediate value of CPU version dependent load_narrow_oop sequence.
1282 // The passed ptr must NOT be in compressed format!
1283 int MacroAssembler::patch_load_narrow_oop(address pos, oop o) {
1284   assert(UseCompressedOops, "Can only patch compressed oops");
1285 
1286   narrowOop no = CompressedOops::encode(o);
1287   return patch_load_const_32to64(pos, no);
1288 }
1289 
1290 // Patching the immediate value of CPU version dependent load_narrow_klass sequence.
1291 // The passed ptr must NOT be in compressed format!
1292 int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) {
1293   assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
1294 
1295   narrowKlass nk = Klass::encode_klass(k);
1296   return patch_load_const_32to64(pos, nk);
1297 }
1298 
1299 // Patching the immediate value of CPU version dependent compare_immediate_narrow_oop sequence.
1300 // The passed ptr must NOT be in compressed format!
1301 int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) {
1302   assert(UseCompressedOops, "Can only patch compressed oops");
1303 
1304   narrowOop no = CompressedOops::encode(o);
1305   return patch_compare_immediate_32(pos, no);
1306 }
1307 
1308 // Patching the immediate value of CPU version dependent compare_immediate_narrow_klass sequence.
1309 // The passed ptr must NOT be in compressed format!
1310 int MacroAssembler::patch_compare_immediate_narrow_klass(address pos, Klass* k) {
1311   assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
1312 
1313   narrowKlass nk = Klass::encode_klass(k);
1314   return patch_compare_immediate_32(pos, nk);
1315 }
1316 
1317 //------------------------------------------------------------------------
1318 //  Extract the constant from a load_constant instruction stream.
1319 //------------------------------------------------------------------------
1320 
1321 // Get constant from a load_const sequence.
1322 long MacroAssembler::get_const(address a) {
1323   assert(is_load_const(a), "not a load of a constant");
1324   unsigned long x;
1325   x =  (((unsigned long) (get_imm32(a,0) & 0xffffffff)) << 32);
1326   x |= (((unsigned long) (get_imm32(a,1) & 0xffffffff)));
1327   return (long) x;
1328 }
1329 
1330 //--------------------------------------
1331 //  Store a constant in memory.
1332 //--------------------------------------
1333 
1334 // General emitter to move a constant to memory.
1335 // The store is atomic.
1336 //  o Address must be given in RS format (no index register)
1337 //  o Displacement should be 12bit unsigned for efficiency. 20bit signed also supported.
1338 //  o Constant can be 1, 2, 4, or 8 bytes, signed or unsigned.
1339 //  o Memory slot can be 1, 2, 4, or 8 bytes, signed or unsigned.
1340 //  o Memory slot must be at least as wide as constant, will assert otherwise.
1341 //  o Signed constants will sign-extend, unsigned constants will zero-extend to slot width.
1342 int MacroAssembler::store_const(const Address &dest, long imm,
1343                                 unsigned int lm, unsigned int lc,
1344                                 Register scratch) {
1345   int64_t  disp = dest.disp();
1346   Register base = dest.base();
1347   assert(!dest.has_index(), "not supported");
1348   assert((lm==1)||(lm==2)||(lm==4)||(lm==8), "memory   length not supported");
1349   assert((lc==1)||(lc==2)||(lc==4)||(lc==8), "constant length not supported");
1350   assert(lm>=lc, "memory slot too small");
1351   assert(lc==8 || Immediate::is_simm(imm, lc*8), "const out of range");
1352   assert(Displacement::is_validDisp(disp), "displacement out of range");
1353 
1354   bool is_shortDisp = Displacement::is_shortDisp(disp);
1355   int store_offset = -1;
1356 
1357   // For target len == 1 it's easy.
1358   if (lm == 1) {
1359     store_offset = offset();
1360     if (is_shortDisp) {
1361       z_mvi(disp, base, imm);
1362       return store_offset;
1363     } else {
1364       z_mviy(disp, base, imm);
1365       return store_offset;
1366     }
1367   }
1368 
1369   // All the "good stuff" takes an unsigned displacement.
1370   if (is_shortDisp) {
1371     // NOTE: Cannot use clear_mem for imm==0, because it is not atomic.
1372 
1373     store_offset = offset();
1374     switch (lm) {
1375       case 2:  // Lc == 1 handled correctly here, even for unsigned. Instruction does no widening.
1376         z_mvhhi(disp, base, imm);
1377         return store_offset;
1378       case 4:
1379         if (Immediate::is_simm16(imm)) {
1380           z_mvhi(disp, base, imm);
1381           return store_offset;
1382         }
1383         break;
1384       case 8:
1385         if (Immediate::is_simm16(imm)) {
1386           z_mvghi(disp, base, imm);
1387           return store_offset;
1388         }
1389         break;
1390       default:
1391         ShouldNotReachHere();
1392         break;
1393     }
1394   }
1395 
1396   //  Can't optimize, so load value and store it.
1397   guarantee(scratch != noreg, " need a scratch register here !");
1398   if (imm != 0) {
1399     load_const_optimized(scratch, imm);  // Preserves CC anyway.
1400   } else {
1401     // Leave CC alone!!
1402     (void) clear_reg(scratch, true, false); // Indicate unused result.
1403   }
1404 
1405   store_offset = offset();
1406   if (is_shortDisp) {
1407     switch (lm) {
1408       case 2:
1409         z_sth(scratch, disp, Z_R0, base);
1410         return store_offset;
1411       case 4:
1412         z_st(scratch, disp, Z_R0, base);
1413         return store_offset;
1414       case 8:
1415         z_stg(scratch, disp, Z_R0, base);
1416         return store_offset;
1417       default:
1418         ShouldNotReachHere();
1419         break;
1420     }
1421   } else {
1422     switch (lm) {
1423       case 2:
1424         z_sthy(scratch, disp, Z_R0, base);
1425         return store_offset;
1426       case 4:
1427         z_sty(scratch, disp, Z_R0, base);
1428         return store_offset;
1429       case 8:
1430         z_stg(scratch, disp, Z_R0, base);
1431         return store_offset;
1432       default:
1433         ShouldNotReachHere();
1434         break;
1435     }
1436   }
1437   return -1; // should not reach here
1438 }
1439 
1440 //===================================================================
1441 //===       N O T   P A T CH A B L E   C O N S T A N T S          ===
1442 //===================================================================
1443 
1444 // Load constant x into register t with a fast instrcution sequence
1445 // depending on the bits in x. Preserves CC under all circumstances.
1446 int MacroAssembler::load_const_optimized_rtn_len(Register t, long x, bool emit) {
1447   if (x == 0) {
1448     int len;
1449     if (emit) {
1450       len = clear_reg(t, true, false);
1451     } else {
1452       len = 4;
1453     }
1454     return len;
1455   }
1456 
1457   if (Immediate::is_simm16(x)) {
1458     if (emit) { z_lghi(t, x); }
1459     return 4;
1460   }
1461 
1462   // 64 bit value: | part1 | part2 | part3 | part4 |
1463   // At least one part is not zero!
1464   int part1 = ((x >> 32) & 0xffff0000) >> 16;
1465   int part2 = (x >> 32) & 0x0000ffff;
1466   int part3 = (x & 0xffff0000) >> 16;
1467   int part4 = (x & 0x0000ffff);
1468 
1469   // Lower word only (unsigned).
1470   if ((part1 == 0) && (part2 == 0)) {
1471     if (part3 == 0) {
1472       if (emit) z_llill(t, part4);
1473       return 4;
1474     }
1475     if (part4 == 0) {
1476       if (emit) z_llilh(t, part3);
1477       return 4;
1478     }
1479     if (emit) z_llilf(t, (int)(x & 0xffffffff));
1480     return 6;
1481   }
1482 
1483   // Upper word only.
1484   if ((part3 == 0) && (part4 == 0)) {
1485     if (part1 == 0) {
1486       if (emit) z_llihl(t, part2);
1487       return 4;
1488     }
1489     if (part2 == 0) {
1490       if (emit) z_llihh(t, part1);
1491       return 4;
1492     }
1493     if (emit) z_llihf(t, (int)(x >> 32));
1494     return 6;
1495   }
1496 
1497   // Lower word only (signed).
1498   if ((part1 == 0x0000ffff) && (part2 == 0x0000ffff) && ((part3 & 0x00008000) != 0)) {
1499     if (emit) z_lgfi(t, (int)(x & 0xffffffff));
1500     return 6;
1501   }
1502 
1503   int len = 0;
1504 
1505   if ((part1 == 0) || (part2 == 0)) {
1506     if (part1 == 0) {
1507       if (emit) z_llihl(t, part2);
1508       len += 4;
1509     } else {
1510       if (emit) z_llihh(t, part1);
1511       len += 4;
1512     }
1513   } else {
1514     if (emit) z_llihf(t, (int)(x >> 32));
1515     len += 6;
1516   }
1517 
1518   if ((part3 == 0) || (part4 == 0)) {
1519     if (part3 == 0) {
1520       if (emit) z_iill(t, part4);
1521       len += 4;
1522     } else {
1523       if (emit) z_iilh(t, part3);
1524       len += 4;
1525     }
1526   } else {
1527     if (emit) z_iilf(t, (int)(x & 0xffffffff));
1528     len += 6;
1529   }
1530   return len;
1531 }
1532 
1533 //=====================================================================
1534 //===     H I G H E R   L E V E L   B R A N C H   E M I T T E R S   ===
1535 //=====================================================================
1536 
1537 // Note: In the worst case, one of the scratch registers is destroyed!!!
1538 void MacroAssembler::compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1539   // Right operand is constant.
1540   if (x2.is_constant()) {
1541     jlong value = x2.as_constant();
1542     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/true);
1543     return;
1544   }
1545 
1546   // Right operand is in register.
1547   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/true);
1548 }
1549 
1550 // Note: In the worst case, one of the scratch registers is destroyed!!!
1551 void MacroAssembler::compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1552   // Right operand is constant.
1553   if (x2.is_constant()) {
1554     jlong value = x2.as_constant();
1555     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/false);
1556     return;
1557   }
1558 
1559   // Right operand is in register.
1560   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/false);
1561 }
1562 
1563 // Note: In the worst case, one of the scratch registers is destroyed!!!
1564 void MacroAssembler::compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1565   // Right operand is constant.
1566   if (x2.is_constant()) {
1567     jlong value = x2.as_constant();
1568     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/true);
1569     return;
1570   }
1571 
1572   // Right operand is in register.
1573   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/true);
1574 }
1575 
1576 void MacroAssembler::compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1577   // Right operand is constant.
1578   if (x2.is_constant()) {
1579     jlong value = x2.as_constant();
1580     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/false);
1581     return;
1582   }
1583 
1584   // Right operand is in register.
1585   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/false);
1586 }
1587 
1588 // Generate an optimal branch to the branch target.
1589 // Optimal means that a relative branch (brc or brcl) is used if the
1590 // branch distance is short enough. Loading the target address into a
1591 // register and branching via reg is used as fallback only.
1592 //
1593 // Used registers:
1594 //   Z_R1 - work reg. Holds branch target address.
1595 //          Used in fallback case only.
1596 //
1597 // This version of branch_optimized is good for cases where the target address is known
1598 // and constant, i.e. is never changed (no relocation, no patching).
1599 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, address branch_addr) {
1600   address branch_origin = pc();
1601 
1602   if (RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) {
1603     z_brc(cond, branch_addr);
1604   } else if (RelAddr::is_in_range_of_RelAddr32(branch_addr, branch_origin)) {
1605     z_brcl(cond, branch_addr);
1606   } else {
1607     load_const_optimized(Z_R1, branch_addr);  // CC must not get killed by load_const_optimized.
1608     z_bcr(cond, Z_R1);
1609   }
1610 }
1611 
1612 // This version of branch_optimized is good for cases where the target address
1613 // is potentially not yet known at the time the code is emitted.
1614 //
1615 // One very common case is a branch to an unbound label which is handled here.
1616 // The caller might know (or hope) that the branch distance is short enough
1617 // to be encoded in a 16bit relative address. In this case he will pass a
1618 // NearLabel branch_target.
1619 // Care must be taken with unbound labels. Each call to target(label) creates
1620 // an entry in the patch queue for that label to patch all references of the label
1621 // once it gets bound. Those recorded patch locations must be patchable. Otherwise,
1622 // an assertion fires at patch time.
1623 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& branch_target) {
1624   if (branch_target.is_bound()) {
1625     address branch_addr = target(branch_target);
1626     branch_optimized(cond, branch_addr);
1627   } else if (branch_target.is_near()) {
1628     z_brc(cond, branch_target);  // Caller assures that the target will be in range for z_brc.
1629   } else {
1630     z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time.
1631   }
1632 }
1633 
1634 // Generate an optimal compare and branch to the branch target.
1635 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the
1636 // branch distance is short enough. Loading the target address into a
1637 // register and branching via reg is used as fallback only.
1638 //
1639 // Input:
1640 //   r1 - left compare operand
1641 //   r2 - right compare operand
1642 void MacroAssembler::compare_and_branch_optimized(Register r1,
1643                                                   Register r2,
1644                                                   Assembler::branch_condition cond,
1645                                                   address  branch_addr,
1646                                                   bool     len64,
1647                                                   bool     has_sign) {
1648   unsigned int casenum = (len64?2:0)+(has_sign?0:1);
1649 
1650   address branch_origin = pc();
1651   if (VM_Version::has_CompareBranch() && RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) {
1652     switch (casenum) {
1653       case 0: z_crj( r1, r2, cond, branch_addr); break;
1654       case 1: z_clrj (r1, r2, cond, branch_addr); break;
1655       case 2: z_cgrj(r1, r2, cond, branch_addr); break;
1656       case 3: z_clgrj(r1, r2, cond, branch_addr); break;
1657       default: ShouldNotReachHere(); break;
1658     }
1659   } else {
1660     switch (casenum) {
1661       case 0: z_cr( r1, r2); break;
1662       case 1: z_clr(r1, r2); break;
1663       case 2: z_cgr(r1, r2); break;
1664       case 3: z_clgr(r1, r2); break;
1665       default: ShouldNotReachHere(); break;
1666     }
1667     branch_optimized(cond, branch_addr);
1668   }
1669 }
1670 
1671 // Generate an optimal compare and branch to the branch target.
1672 // Optimal means that a relative branch (clgij, brc or brcl) is used if the
1673 // branch distance is short enough. Loading the target address into a
1674 // register and branching via reg is used as fallback only.
1675 //
1676 // Input:
1677 //   r1 - left compare operand (in register)
1678 //   x2 - right compare operand (immediate)
1679 void MacroAssembler::compare_and_branch_optimized(Register r1,
1680                                                   jlong    x2,
1681                                                   Assembler::branch_condition cond,
1682                                                   Label&   branch_target,
1683                                                   bool     len64,
1684                                                   bool     has_sign) {
1685   address      branch_origin = pc();
1686   bool         x2_imm8       = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2));
1687   bool         is_RelAddr16  = branch_target.is_near() ||
1688                                (branch_target.is_bound() &&
1689                                 RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin));
1690   unsigned int casenum       = (len64?2:0)+(has_sign?0:1);
1691 
1692   if (VM_Version::has_CompareBranch() && is_RelAddr16 && x2_imm8) {
1693     switch (casenum) {
1694       case 0: z_cij( r1, x2, cond, branch_target); break;
1695       case 1: z_clij(r1, x2, cond, branch_target); break;
1696       case 2: z_cgij(r1, x2, cond, branch_target); break;
1697       case 3: z_clgij(r1, x2, cond, branch_target); break;
1698       default: ShouldNotReachHere(); break;
1699     }
1700     return;
1701   }
1702 
1703   if (x2 == 0) {
1704     switch (casenum) {
1705       case 0: z_ltr(r1, r1); break;
1706       case 1: z_ltr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication!
1707       case 2: z_ltgr(r1, r1); break;
1708       case 3: z_ltgr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication!
1709       default: ShouldNotReachHere(); break;
1710     }
1711   } else {
1712     if ((has_sign && Immediate::is_simm16(x2)) || (!has_sign && Immediate::is_uimm(x2, 15))) {
1713       switch (casenum) {
1714         case 0: z_chi(r1, x2); break;
1715         case 1: z_chi(r1, x2); break; // positive immediate < 2**15
1716         case 2: z_cghi(r1, x2); break;
1717         case 3: z_cghi(r1, x2); break; // positive immediate < 2**15
1718         default: break;
1719       }
1720     } else if ( (has_sign && Immediate::is_simm32(x2)) || (!has_sign && Immediate::is_uimm32(x2)) ) {
1721       switch (casenum) {
1722         case 0: z_cfi( r1, x2); break;
1723         case 1: z_clfi(r1, x2); break;
1724         case 2: z_cgfi(r1, x2); break;
1725         case 3: z_clgfi(r1, x2); break;
1726         default: ShouldNotReachHere(); break;
1727       }
1728     } else {
1729       // No instruction with immediate operand possible, so load into register.
1730       Register scratch = (r1 != Z_R0) ? Z_R0 : Z_R1;
1731       load_const_optimized(scratch, x2);
1732       switch (casenum) {
1733         case 0: z_cr( r1, scratch); break;
1734         case 1: z_clr(r1, scratch); break;
1735         case 2: z_cgr(r1, scratch); break;
1736         case 3: z_clgr(r1, scratch); break;
1737         default: ShouldNotReachHere(); break;
1738       }
1739     }
1740   }
1741   branch_optimized(cond, branch_target);
1742 }
1743 
1744 // Generate an optimal compare and branch to the branch target.
1745 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the
1746 // branch distance is short enough. Loading the target address into a
1747 // register and branching via reg is used as fallback only.
1748 //
1749 // Input:
1750 //   r1 - left compare operand
1751 //   r2 - right compare operand
1752 void MacroAssembler::compare_and_branch_optimized(Register r1,
1753                                                   Register r2,
1754                                                   Assembler::branch_condition cond,
1755                                                   Label&   branch_target,
1756                                                   bool     len64,
1757                                                   bool     has_sign) {
1758   unsigned int casenum = (len64 ? 2 : 0) + (has_sign ? 0 : 1);
1759 
1760   if (branch_target.is_bound()) {
1761     address branch_addr = target(branch_target);
1762     compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign);
1763   } else {
1764     if (VM_Version::has_CompareBranch() && branch_target.is_near()) {
1765       switch (casenum) {
1766         case 0: z_crj(  r1, r2, cond, branch_target); break;
1767         case 1: z_clrj( r1, r2, cond, branch_target); break;
1768         case 2: z_cgrj( r1, r2, cond, branch_target); break;
1769         case 3: z_clgrj(r1, r2, cond, branch_target); break;
1770         default: ShouldNotReachHere(); break;
1771       }
1772     } else {
1773       switch (casenum) {
1774         case 0: z_cr( r1, r2); break;
1775         case 1: z_clr(r1, r2); break;
1776         case 2: z_cgr(r1, r2); break;
1777         case 3: z_clgr(r1, r2); break;
1778         default: ShouldNotReachHere(); break;
1779       }
1780       branch_optimized(cond, branch_target);
1781     }
1782   }
1783 }
1784 
1785 //===========================================================================
1786 //===   END     H I G H E R   L E V E L   B R A N C H   E M I T T E R S   ===
1787 //===========================================================================
1788 
1789 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
1790   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1791   int index = oop_recorder()->allocate_metadata_index(obj);
1792   RelocationHolder rspec = metadata_Relocation::spec(index);
1793   return AddressLiteral((address)obj, rspec);
1794 }
1795 
1796 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
1797   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1798   int index = oop_recorder()->find_index(obj);
1799   RelocationHolder rspec = metadata_Relocation::spec(index);
1800   return AddressLiteral((address)obj, rspec);
1801 }
1802 
1803 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
1804   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1805   int oop_index = oop_recorder()->allocate_oop_index(obj);
1806   return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
1807 }
1808 
1809 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
1810   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1811   int oop_index = oop_recorder()->find_index(obj);
1812   return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
1813 }
1814 
1815 // NOTE: destroys r
1816 void MacroAssembler::c2bool(Register r, Register t) {
1817   z_lcr(t, r);   // t = -r
1818   z_or(r, t);    // r = -r OR r
1819   z_srl(r, 31);  // Yields 0 if r was 0, 1 otherwise.
1820 }
1821 
1822 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
1823                                                       Register tmp,
1824                                                       int offset) {
1825   intptr_t value = *delayed_value_addr;
1826   if (value != 0) {
1827     return RegisterOrConstant(value + offset);
1828   }
1829 
1830   BLOCK_COMMENT("delayed_value {");
1831   // Load indirectly to solve generation ordering problem.
1832   load_absolute_address(tmp, (address) delayed_value_addr); // tmp = a;
1833   z_lg(tmp, 0, tmp);                   // tmp = *tmp;
1834 
1835 #ifdef ASSERT
1836   NearLabel L;
1837   compare64_and_branch(tmp, (intptr_t)0L, Assembler::bcondNotEqual, L);
1838   z_illtrap();
1839   bind(L);
1840 #endif
1841 
1842   if (offset != 0) {
1843     z_agfi(tmp, offset);               // tmp = tmp + offset;
1844   }
1845 
1846   BLOCK_COMMENT("} delayed_value");
1847   return RegisterOrConstant(tmp);
1848 }
1849 
1850 // Patch instruction `inst' at offset `inst_pos' to refer to `dest_pos'
1851 // and return the resulting instruction.
1852 // Dest_pos and inst_pos are 32 bit only. These parms can only designate
1853 // relative positions.
1854 // Use correct argument types. Do not pre-calculate distance.
1855 unsigned long MacroAssembler::patched_branch(address dest_pos, unsigned long inst, address inst_pos) {
1856   int c = 0;
1857   unsigned long patched_inst = 0;
1858   if (is_call_pcrelative_short(inst) ||
1859       is_branch_pcrelative_short(inst) ||
1860       is_branchoncount_pcrelative_short(inst) ||
1861       is_branchonindex32_pcrelative_short(inst)) {
1862     c = 1;
1863     int m = fmask(15, 0);    // simm16(-1, 16, 32);
1864     int v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 32);
1865     patched_inst = (inst & ~m) | v;
1866   } else if (is_compareandbranch_pcrelative_short(inst)) {
1867     c = 2;
1868     long m = fmask(31, 16);  // simm16(-1, 16, 48);
1869     long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48);
1870     patched_inst = (inst & ~m) | v;
1871   } else if (is_branchonindex64_pcrelative_short(inst)) {
1872     c = 3;
1873     long m = fmask(31, 16);  // simm16(-1, 16, 48);
1874     long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48);
1875     patched_inst = (inst & ~m) | v;
1876   } else if (is_call_pcrelative_long(inst) || is_branch_pcrelative_long(inst)) {
1877     c = 4;
1878     long m = fmask(31, 0);  // simm32(-1, 16, 48);
1879     long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48);
1880     patched_inst = (inst & ~m) | v;
1881   } else if (is_pcrelative_long(inst)) { // These are the non-branch pc-relative instructions.
1882     c = 5;
1883     long m = fmask(31, 0);  // simm32(-1, 16, 48);
1884     long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48);
1885     patched_inst = (inst & ~m) | v;
1886   } else {
1887     print_dbg_msg(tty, inst, "not a relative branch", 0);
1888     dump_code_range(tty, inst_pos, 32, "not a pcrelative branch");
1889     ShouldNotReachHere();
1890   }
1891 
1892   long new_off = get_pcrel_offset(patched_inst);
1893   if (new_off != (dest_pos-inst_pos)) {
1894     tty->print_cr("case %d: dest_pos = %p, inst_pos = %p, disp = %ld(%12.12lx)", c, dest_pos, inst_pos, new_off, new_off);
1895     print_dbg_msg(tty, inst,         "<- original instruction: branch patching error", 0);
1896     print_dbg_msg(tty, patched_inst, "<- patched  instruction: branch patching error", 0);
1897 #ifdef LUCY_DBG
1898     VM_Version::z_SIGSEGV();
1899 #endif
1900     ShouldNotReachHere();
1901   }
1902   return patched_inst;
1903 }
1904 
1905 // Only called when binding labels (share/vm/asm/assembler.cpp)
1906 // Pass arguments as intended. Do not pre-calculate distance.
1907 void MacroAssembler::pd_patch_instruction(address branch, address target) {
1908   unsigned long stub_inst;
1909   int           inst_len = get_instruction(branch, &stub_inst);
1910 
1911   set_instruction(branch, patched_branch(target, stub_inst, branch), inst_len);
1912 }
1913 
1914 
1915 // Extract relative address (aka offset).
1916 // inv_simm16 works for 4-byte instructions only.
1917 // compare and branch instructions are 6-byte and have a 16bit offset "in the middle".
1918 long MacroAssembler::get_pcrel_offset(unsigned long inst) {
1919 
1920   if (MacroAssembler::is_pcrelative_short(inst)) {
1921     if (((inst&0xFFFFffff00000000UL) == 0) && ((inst&0x00000000FFFF0000UL) != 0)) {
1922       return RelAddr::inv_pcrel_off16(inv_simm16(inst));
1923     } else {
1924       return RelAddr::inv_pcrel_off16(inv_simm16_48(inst));
1925     }
1926   }
1927 
1928   if (MacroAssembler::is_pcrelative_long(inst)) {
1929     return RelAddr::inv_pcrel_off32(inv_simm32(inst));
1930   }
1931 
1932   print_dbg_msg(tty, inst, "not a pcrelative instruction", 6);
1933 #ifdef LUCY_DBG
1934   VM_Version::z_SIGSEGV();
1935 #else
1936   ShouldNotReachHere();
1937 #endif
1938   return -1;
1939 }
1940 
1941 long MacroAssembler::get_pcrel_offset(address pc) {
1942   unsigned long inst;
1943   unsigned int  len = get_instruction(pc, &inst);
1944 
1945 #ifdef ASSERT
1946   long offset;
1947   if (MacroAssembler::is_pcrelative_short(inst) || MacroAssembler::is_pcrelative_long(inst)) {
1948     offset = get_pcrel_offset(inst);
1949   } else {
1950     offset = -1;
1951   }
1952 
1953   if (offset == -1) {
1954     dump_code_range(tty, pc, 32, "not a pcrelative instruction");
1955 #ifdef LUCY_DBG
1956     VM_Version::z_SIGSEGV();
1957 #else
1958     ShouldNotReachHere();
1959 #endif
1960   }
1961   return offset;
1962 #else
1963   return get_pcrel_offset(inst);
1964 #endif // ASSERT
1965 }
1966 
1967 // Get target address from pc-relative instructions.
1968 address MacroAssembler::get_target_addr_pcrel(address pc) {
1969   assert(is_pcrelative_long(pc), "not a pcrelative instruction");
1970   return pc + get_pcrel_offset(pc);
1971 }
1972 
1973 // Patch pc relative load address.
1974 void MacroAssembler::patch_target_addr_pcrel(address pc, address con) {
1975   unsigned long inst;
1976   // Offset is +/- 2**32 -> use long.
1977   ptrdiff_t distance = con - pc;
1978 
1979   get_instruction(pc, &inst);
1980 
1981   if (is_pcrelative_short(inst)) {
1982     *(short *)(pc+2) = RelAddr::pcrel_off16(con, pc);  // Instructions are at least 2-byte aligned, no test required.
1983 
1984     // Some extra safety net.
1985     if (!RelAddr::is_in_range_of_RelAddr16(distance)) {
1986       print_dbg_msg(tty, inst, "distance out of range (16bit)", 4);
1987       dump_code_range(tty, pc, 32, "distance out of range (16bit)");
1988       guarantee(RelAddr::is_in_range_of_RelAddr16(distance), "too far away (more than +/- 2**16");
1989     }
1990     return;
1991   }
1992 
1993   if (is_pcrelative_long(inst)) {
1994     *(int *)(pc+2)   = RelAddr::pcrel_off32(con, pc);
1995 
1996     // Some Extra safety net.
1997     if (!RelAddr::is_in_range_of_RelAddr32(distance)) {
1998       print_dbg_msg(tty, inst, "distance out of range (32bit)", 6);
1999       dump_code_range(tty, pc, 32, "distance out of range (32bit)");
2000       guarantee(RelAddr::is_in_range_of_RelAddr32(distance), "too far away (more than +/- 2**32");
2001     }
2002     return;
2003   }
2004 
2005   guarantee(false, "not a pcrelative instruction to patch!");
2006 }
2007 
2008 // "Current PC" here means the address just behind the basr instruction.
2009 address MacroAssembler::get_PC(Register result) {
2010   z_basr(result, Z_R0); // Don't branch, just save next instruction address in result.
2011   return pc();
2012 }
2013 
2014 // Get current PC + offset.
2015 // Offset given in bytes, must be even!
2016 // "Current PC" here means the address of the larl instruction plus the given offset.
2017 address MacroAssembler::get_PC(Register result, int64_t offset) {
2018   address here = pc();
2019   z_larl(result, offset/2); // Save target instruction address in result.
2020   return here + offset;
2021 }
2022 
2023 void MacroAssembler::instr_size(Register size, Register pc) {
2024   // Extract 2 most significant bits of current instruction.
2025   z_llgc(size, Address(pc));
2026   z_srl(size, 6);
2027   // Compute (x+3)&6 which translates 0->2, 1->4, 2->4, 3->6.
2028   z_ahi(size, 3);
2029   z_nill(size, 6);
2030 }
2031 
2032 // Resize_frame with SP(new) = SP(old) - [offset].
2033 void MacroAssembler::resize_frame_sub(Register offset, Register fp, bool load_fp)
2034 {
2035   assert_different_registers(offset, fp, Z_SP);
2036   if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); }
2037 
2038   z_sgr(Z_SP, offset);
2039   z_stg(fp, _z_abi(callers_sp), Z_SP);
2040 }
2041 
2042 // Resize_frame with SP(new) = [newSP] + offset.
2043 //   This emitter is useful if we already have calculated a pointer
2044 //   into the to-be-allocated stack space, e.g. with special alignment properties,
2045 //   but need some additional space, e.g. for spilling.
2046 //   newSP    is the pre-calculated pointer. It must not be modified.
2047 //   fp       holds, or is filled with, the frame pointer.
2048 //   offset   is the additional increment which is added to addr to form the new SP.
2049 //            Note: specify a negative value to reserve more space!
2050 //   load_fp == true  only indicates that fp is not pre-filled with the frame pointer.
2051 //                    It does not guarantee that fp contains the frame pointer at the end.
2052 void MacroAssembler::resize_frame_abs_with_offset(Register newSP, Register fp, int offset, bool load_fp) {
2053   assert_different_registers(newSP, fp, Z_SP);
2054 
2055   if (load_fp) {
2056     z_lg(fp, _z_abi(callers_sp), Z_SP);
2057   }
2058 
2059   add2reg(Z_SP, offset, newSP);
2060   z_stg(fp, _z_abi(callers_sp), Z_SP);
2061 }
2062 
2063 // Resize_frame with SP(new) = [newSP].
2064 //   load_fp == true  only indicates that fp is not pre-filled with the frame pointer.
2065 //                    It does not guarantee that fp contains the frame pointer at the end.
2066 void MacroAssembler::resize_frame_absolute(Register newSP, Register fp, bool load_fp) {
2067   assert_different_registers(newSP, fp, Z_SP);
2068 
2069   if (load_fp) {
2070     z_lg(fp, _z_abi(callers_sp), Z_SP); // need to use load/store.
2071   }
2072 
2073   z_lgr(Z_SP, newSP);
2074   if (newSP != Z_R0) { // make sure we generate correct code, no matter what register newSP uses.
2075     z_stg(fp, _z_abi(callers_sp), newSP);
2076   } else {
2077     z_stg(fp, _z_abi(callers_sp), Z_SP);
2078   }
2079 }
2080 
2081 // Resize_frame with SP(new) = SP(old) + offset.
2082 void MacroAssembler::resize_frame(RegisterOrConstant offset, Register fp, bool load_fp) {
2083   assert_different_registers(fp, Z_SP);
2084 
2085   if (load_fp) {
2086     z_lg(fp, _z_abi(callers_sp), Z_SP);
2087   }
2088   add64(Z_SP, offset);
2089   z_stg(fp, _z_abi(callers_sp), Z_SP);
2090 }
2091 
2092 void MacroAssembler::push_frame(Register bytes, Register old_sp, bool copy_sp, bool bytes_with_inverted_sign) {
2093 #ifdef ASSERT
2094   assert_different_registers(bytes, old_sp, Z_SP);
2095   if (!copy_sp) {
2096     z_cgr(old_sp, Z_SP);
2097     asm_assert_eq("[old_sp]!=[Z_SP]", 0x211);
2098   }
2099 #endif
2100   if (copy_sp) { z_lgr(old_sp, Z_SP); }
2101   if (bytes_with_inverted_sign) {
2102     z_agr(Z_SP, bytes);
2103   } else {
2104     z_sgr(Z_SP, bytes); // Z_sgfr sufficient, but probably not faster.
2105   }
2106   z_stg(old_sp, _z_abi(callers_sp), Z_SP);
2107 }
2108 
2109 unsigned int MacroAssembler::push_frame(unsigned int bytes, Register scratch) {
2110   long offset = Assembler::align(bytes, frame::alignment_in_bytes);
2111   assert(offset > 0, "should push a frame with positive size, size = %ld.", offset);
2112   assert(Displacement::is_validDisp(-offset), "frame size out of range, size = %ld", offset);
2113 
2114   // We must not write outside the current stack bounds (given by Z_SP).
2115   // Thus, we have to first update Z_SP and then store the previous SP as stack linkage.
2116   // We rely on Z_R0 by default to be available as scratch.
2117   z_lgr(scratch, Z_SP);
2118   add2reg(Z_SP, -offset);
2119   z_stg(scratch, _z_abi(callers_sp), Z_SP);
2120 #ifdef ASSERT
2121   // Just make sure nobody uses the value in the default scratch register.
2122   // When another register is used, the caller might rely on it containing the frame pointer.
2123   if (scratch == Z_R0) {
2124     z_iihf(scratch, 0xbaadbabe);
2125     z_iilf(scratch, 0xdeadbeef);
2126   }
2127 #endif
2128   return offset;
2129 }
2130 
2131 // Push a frame of size `bytes' plus abi160 on top.
2132 unsigned int MacroAssembler::push_frame_abi160(unsigned int bytes) {
2133   BLOCK_COMMENT("push_frame_abi160 {");
2134   unsigned int res = push_frame(bytes + frame::z_abi_160_size);
2135   BLOCK_COMMENT("} push_frame_abi160");
2136   return res;
2137 }
2138 
2139 // Pop current C frame.
2140 void MacroAssembler::pop_frame() {
2141   BLOCK_COMMENT("pop_frame:");
2142   Assembler::z_lg(Z_SP, _z_abi(callers_sp), Z_SP);
2143 }
2144 
2145 // Pop current C frame and restore return PC register (Z_R14).
2146 void MacroAssembler::pop_frame_restore_retPC(int frame_size_in_bytes) {
2147   BLOCK_COMMENT("pop_frame_restore_retPC:");
2148   int retPC_offset = _z_abi16(return_pc) + frame_size_in_bytes;
2149   // If possible, pop frame by add instead of load (a penny saved is a penny got :-).
2150   if (Displacement::is_validDisp(retPC_offset)) {
2151     z_lg(Z_R14, retPC_offset, Z_SP);
2152     add2reg(Z_SP, frame_size_in_bytes);
2153   } else {
2154     add2reg(Z_SP, frame_size_in_bytes);
2155     restore_return_pc();
2156   }
2157 }
2158 
2159 void MacroAssembler::call_VM_leaf_base(address entry_point, bool allow_relocation) {
2160   if (allow_relocation) {
2161     call_c(entry_point);
2162   } else {
2163     call_c_static(entry_point);
2164   }
2165 }
2166 
2167 void MacroAssembler::call_VM_leaf_base(address entry_point) {
2168   bool allow_relocation = true;
2169   call_VM_leaf_base(entry_point, allow_relocation);
2170 }
2171 
2172 void MacroAssembler::call_VM_base(Register oop_result,
2173                                   Register last_java_sp,
2174                                   address  entry_point,
2175                                   bool     allow_relocation,
2176                                   bool     check_exceptions) { // Defaults to true.
2177   // Allow_relocation indicates, if true, that the generated code shall
2178   // be fit for code relocation or referenced data relocation. In other
2179   // words: all addresses must be considered variable. PC-relative addressing
2180   // is not possible then.
2181   // On the other hand, if (allow_relocation == false), addresses and offsets
2182   // may be considered stable, enabling us to take advantage of some PC-relative
2183   // addressing tweaks. These might improve performance and reduce code size.
2184 
2185   // Determine last_java_sp register.
2186   if (!last_java_sp->is_valid()) {
2187     last_java_sp = Z_SP;  // Load Z_SP as SP.
2188   }
2189 
2190   set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, Z_R1, allow_relocation);
2191 
2192   // ARG1 must hold thread address.
2193   z_lgr(Z_ARG1, Z_thread);
2194 
2195   address return_pc = NULL;
2196   if (allow_relocation) {
2197     return_pc = call_c(entry_point);
2198   } else {
2199     return_pc = call_c_static(entry_point);
2200   }
2201 
2202   reset_last_Java_frame(allow_relocation);
2203 
2204   // C++ interp handles this in the interpreter.
2205   check_and_handle_popframe(Z_thread);
2206   check_and_handle_earlyret(Z_thread);
2207 
2208   // Check for pending exceptions.
2209   if (check_exceptions) {
2210     // Check for pending exceptions (java_thread is set upon return).
2211     load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset()));
2212 
2213     // This used to conditionally jump to forward_exception however it is
2214     // possible if we relocate that the branch will not reach. So we must jump
2215     // around so we can always reach.
2216 
2217     Label ok;
2218     z_bre(ok); // Bcondequal is the same as bcondZero.
2219     call_stub(StubRoutines::forward_exception_entry());
2220     bind(ok);
2221   }
2222 
2223   // Get oop result if there is one and reset the value in the thread.
2224   if (oop_result->is_valid()) {
2225     get_vm_result(oop_result);
2226   }
2227 
2228   _last_calls_return_pc = return_pc;  // Wipe out other (error handling) calls.
2229 }
2230 
2231 void MacroAssembler::call_VM_base(Register oop_result,
2232                                   Register last_java_sp,
2233                                   address  entry_point,
2234                                   bool     check_exceptions) { // Defaults to true.
2235   bool allow_relocation = true;
2236   call_VM_base(oop_result, last_java_sp, entry_point, allow_relocation, check_exceptions);
2237 }
2238 
2239 // VM calls without explicit last_java_sp.
2240 
2241 void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
2242   // Call takes possible detour via InterpreterMacroAssembler.
2243   call_VM_base(oop_result, noreg, entry_point, true, check_exceptions);
2244 }
2245 
2246 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
2247   // Z_ARG1 is reserved for the thread.
2248   lgr_if_needed(Z_ARG2, arg_1);
2249   call_VM(oop_result, entry_point, check_exceptions);
2250 }
2251 
2252 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
2253   // Z_ARG1 is reserved for the thread.
2254   lgr_if_needed(Z_ARG2, arg_1);
2255   assert(arg_2 != Z_ARG2, "smashed argument");
2256   lgr_if_needed(Z_ARG3, arg_2);
2257   call_VM(oop_result, entry_point, check_exceptions);
2258 }
2259 
2260 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2,
2261                              Register arg_3, bool check_exceptions) {
2262   // Z_ARG1 is reserved for the thread.
2263   lgr_if_needed(Z_ARG2, arg_1);
2264   assert(arg_2 != Z_ARG2, "smashed argument");
2265   lgr_if_needed(Z_ARG3, arg_2);
2266   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2267   lgr_if_needed(Z_ARG4, arg_3);
2268   call_VM(oop_result, entry_point, check_exceptions);
2269 }
2270 
2271 // VM static calls without explicit last_java_sp.
2272 
2273 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, bool check_exceptions) {
2274   // Call takes possible detour via InterpreterMacroAssembler.
2275   call_VM_base(oop_result, noreg, entry_point, false, check_exceptions);
2276 }
2277 
2278 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2,
2279                                     Register arg_3, bool check_exceptions) {
2280   // Z_ARG1 is reserved for the thread.
2281   lgr_if_needed(Z_ARG2, arg_1);
2282   assert(arg_2 != Z_ARG2, "smashed argument");
2283   lgr_if_needed(Z_ARG3, arg_2);
2284   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2285   lgr_if_needed(Z_ARG4, arg_3);
2286   call_VM_static(oop_result, entry_point, check_exceptions);
2287 }
2288 
2289 // VM calls with explicit last_java_sp.
2290 
2291 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions) {
2292   // Call takes possible detour via InterpreterMacroAssembler.
2293   call_VM_base(oop_result, last_java_sp, entry_point, true, check_exceptions);
2294 }
2295 
2296 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
2297    // Z_ARG1 is reserved for the thread.
2298    lgr_if_needed(Z_ARG2, arg_1);
2299    call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2300 }
2301 
2302 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1,
2303                              Register arg_2, bool check_exceptions) {
2304    // Z_ARG1 is reserved for the thread.
2305    lgr_if_needed(Z_ARG2, arg_1);
2306    assert(arg_2 != Z_ARG2, "smashed argument");
2307    lgr_if_needed(Z_ARG3, arg_2);
2308    call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2309 }
2310 
2311 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1,
2312                              Register arg_2, Register arg_3, bool check_exceptions) {
2313   // Z_ARG1 is reserved for the thread.
2314   lgr_if_needed(Z_ARG2, arg_1);
2315   assert(arg_2 != Z_ARG2, "smashed argument");
2316   lgr_if_needed(Z_ARG3, arg_2);
2317   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2318   lgr_if_needed(Z_ARG4, arg_3);
2319   call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2320 }
2321 
2322 // VM leaf calls.
2323 
2324 void MacroAssembler::call_VM_leaf(address entry_point) {
2325   // Call takes possible detour via InterpreterMacroAssembler.
2326   call_VM_leaf_base(entry_point, true);
2327 }
2328 
2329 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {
2330   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2331   call_VM_leaf(entry_point);
2332 }
2333 
2334 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
2335   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2336   assert(arg_2 != Z_ARG1, "smashed argument");
2337   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2338   call_VM_leaf(entry_point);
2339 }
2340 
2341 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
2342   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2343   assert(arg_2 != Z_ARG1, "smashed argument");
2344   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2345   assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument");
2346   if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3);
2347   call_VM_leaf(entry_point);
2348 }
2349 
2350 // Static VM leaf calls.
2351 // Really static VM leaf calls are never patched.
2352 
2353 void MacroAssembler::call_VM_leaf_static(address entry_point) {
2354   // Call takes possible detour via InterpreterMacroAssembler.
2355   call_VM_leaf_base(entry_point, false);
2356 }
2357 
2358 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1) {
2359   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2360   call_VM_leaf_static(entry_point);
2361 }
2362 
2363 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2) {
2364   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2365   assert(arg_2 != Z_ARG1, "smashed argument");
2366   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2367   call_VM_leaf_static(entry_point);
2368 }
2369 
2370 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
2371   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2372   assert(arg_2 != Z_ARG1, "smashed argument");
2373   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2374   assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument");
2375   if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3);
2376   call_VM_leaf_static(entry_point);
2377 }
2378 
2379 // Don't use detour via call_c(reg).
2380 address MacroAssembler::call_c(address function_entry) {
2381   load_const(Z_R1, function_entry);
2382   return call(Z_R1);
2383 }
2384 
2385 // Variant for really static (non-relocatable) calls which are never patched.
2386 address MacroAssembler::call_c_static(address function_entry) {
2387   load_absolute_address(Z_R1, function_entry);
2388 #if 0 // def ASSERT
2389   // Verify that call site did not move.
2390   load_const_optimized(Z_R0, function_entry);
2391   z_cgr(Z_R1, Z_R0);
2392   z_brc(bcondEqual, 3);
2393   z_illtrap(0xba);
2394 #endif
2395   return call(Z_R1);
2396 }
2397 
2398 address MacroAssembler::call_c_opt(address function_entry) {
2399   bool success = call_far_patchable(function_entry, -2 /* emit relocation + constant */);
2400   _last_calls_return_pc = success ? pc() : NULL;
2401   return _last_calls_return_pc;
2402 }
2403 
2404 // Identify a call_far_patchable instruction: LARL + LG + BASR
2405 //
2406 //    nop                   ; optionally, if required for alignment
2407 //    lgrl rx,A(TOC entry)  ; PC-relative access into constant pool
2408 //    basr Z_R14,rx         ; end of this instruction must be aligned to a word boundary
2409 //
2410 // Code pattern will eventually get patched into variant2 (see below for detection code).
2411 //
2412 bool MacroAssembler::is_call_far_patchable_variant0_at(address instruction_addr) {
2413   address iaddr = instruction_addr;
2414 
2415   // Check for the actual load instruction.
2416   if (!is_load_const_from_toc(iaddr)) { return false; }
2417   iaddr += load_const_from_toc_size();
2418 
2419   // Check for the call (BASR) instruction, finally.
2420   assert(iaddr-instruction_addr+call_byregister_size() == call_far_patchable_size(), "size mismatch");
2421   return is_call_byregister(iaddr);
2422 }
2423 
2424 // Identify a call_far_patchable instruction: BRASL
2425 //
2426 // Code pattern to suits atomic patching:
2427 //    nop                       ; Optionally, if required for alignment.
2428 //    nop    ...                ; Multiple filler nops to compensate for size difference (variant0 is longer).
2429 //    nop                       ; For code pattern detection: Prepend each BRASL with a nop.
2430 //    brasl  Z_R14,<reladdr>    ; End of code must be 4-byte aligned !
2431 bool MacroAssembler::is_call_far_patchable_variant2_at(address instruction_addr) {
2432   const address call_addr = (address)((intptr_t)instruction_addr + call_far_patchable_size() - call_far_pcrelative_size());
2433 
2434   // Check for correct number of leading nops.
2435   address iaddr;
2436   for (iaddr = instruction_addr; iaddr < call_addr; iaddr += nop_size()) {
2437     if (!is_z_nop(iaddr)) { return false; }
2438   }
2439   assert(iaddr == call_addr, "sanity");
2440 
2441   // --> Check for call instruction.
2442   if (is_call_far_pcrelative(call_addr)) {
2443     assert(call_addr-instruction_addr+call_far_pcrelative_size() == call_far_patchable_size(), "size mismatch");
2444     return true;
2445   }
2446 
2447   return false;
2448 }
2449 
2450 // Emit a NOT mt-safely patchable 64 bit absolute call.
2451 // If toc_offset == -2, then the destination of the call (= target) is emitted
2452 //                      to the constant pool and a runtime_call relocation is added
2453 //                      to the code buffer.
2454 // If toc_offset != -2, target must already be in the constant pool at
2455 //                      _ctableStart+toc_offset (a caller can retrieve toc_offset
2456 //                      from the runtime_call relocation).
2457 // Special handling of emitting to scratch buffer when there is no constant pool.
2458 // Slightly changed code pattern. We emit an additional nop if we would
2459 // not end emitting at a word aligned address. This is to ensure
2460 // an atomically patchable displacement in brasl instructions.
2461 //
2462 // A call_far_patchable comes in different flavors:
2463 //  - LARL(CP) / LG(CP) / BR (address in constant pool, access via CP register)
2464 //  - LGRL(CP) / BR          (address in constant pool, pc-relative accesss)
2465 //  - BRASL                  (relative address of call target coded in instruction)
2466 // All flavors occupy the same amount of space. Length differences are compensated
2467 // by leading nops, such that the instruction sequence always ends at the same
2468 // byte offset. This is required to keep the return offset constant.
2469 // Furthermore, the return address (the end of the instruction sequence) is forced
2470 // to be on a 4-byte boundary. This is required for atomic patching, should we ever
2471 // need to patch the call target of the BRASL flavor.
2472 // RETURN value: false, if no constant pool entry could be allocated, true otherwise.
2473 bool MacroAssembler::call_far_patchable(address target, int64_t tocOffset) {
2474   // Get current pc and ensure word alignment for end of instr sequence.
2475   const address start_pc = pc();
2476   const intptr_t       start_off = offset();
2477   assert(!call_far_patchable_requires_alignment_nop(start_pc), "call_far_patchable requires aligned address");
2478   const ptrdiff_t      dist      = (ptrdiff_t)(target - (start_pc + 2)); // Prepend each BRASL with a nop.
2479   const bool emit_target_to_pool = (tocOffset == -2) && !code_section()->scratch_emit();
2480   const bool emit_relative_call  = !emit_target_to_pool &&
2481                                    RelAddr::is_in_range_of_RelAddr32(dist) &&
2482                                    ReoptimizeCallSequences &&
2483                                    !code_section()->scratch_emit();
2484 
2485   if (emit_relative_call) {
2486     // Add padding to get the same size as below.
2487     const unsigned int padding = call_far_patchable_size() - call_far_pcrelative_size();
2488     unsigned int current_padding;
2489     for (current_padding = 0; current_padding < padding; current_padding += nop_size()) { z_nop(); }
2490     assert(current_padding == padding, "sanity");
2491 
2492     // relative call: len = 2(nop) + 6 (brasl)
2493     // CodeBlob resize cannot occur in this case because
2494     // this call is emitted into pre-existing space.
2495     z_nop(); // Prepend each BRASL with a nop.
2496     z_brasl(Z_R14, target);
2497   } else {
2498     // absolute call: Get address from TOC.
2499     // len = (load TOC){6|0} + (load from TOC){6} + (basr){2} = {14|8}
2500     if (emit_target_to_pool) {
2501       // When emitting the call for the first time, we do not need to use
2502       // the pc-relative version. It will be patched anyway, when the code
2503       // buffer is copied.
2504       // Relocation is not needed when !ReoptimizeCallSequences.
2505       relocInfo::relocType rt = ReoptimizeCallSequences ? relocInfo::runtime_call_w_cp_type : relocInfo::none;
2506       AddressLiteral dest(target, rt);
2507       // Store_oop_in_toc() adds dest to the constant table. As side effect, this kills
2508       // inst_mark(). Reset if possible.
2509       bool reset_mark = (inst_mark() == pc());
2510       tocOffset = store_oop_in_toc(dest);
2511       if (reset_mark) { set_inst_mark(); }
2512       if (tocOffset == -1) {
2513         return false; // Couldn't create constant pool entry.
2514       }
2515     }
2516     assert(offset() == start_off, "emit no code before this point!");
2517 
2518     address tocPos = pc() + tocOffset;
2519     if (emit_target_to_pool) {
2520       tocPos = code()->consts()->start() + tocOffset;
2521     }
2522     load_long_pcrelative(Z_R14, tocPos);
2523     z_basr(Z_R14, Z_R14);
2524   }
2525 
2526 #ifdef ASSERT
2527   // Assert that we can identify the emitted call.
2528   assert(is_call_far_patchable_at(addr_at(start_off)), "can't identify emitted call");
2529   assert(offset() == start_off+call_far_patchable_size(), "wrong size");
2530 
2531   if (emit_target_to_pool) {
2532     assert(get_dest_of_call_far_patchable_at(addr_at(start_off), code()->consts()->start()) == target,
2533            "wrong encoding of dest address");
2534   }
2535 #endif
2536   return true; // success
2537 }
2538 
2539 // Identify a call_far_patchable instruction.
2540 // For more detailed information see header comment of call_far_patchable.
2541 bool MacroAssembler::is_call_far_patchable_at(address instruction_addr) {
2542   return is_call_far_patchable_variant2_at(instruction_addr)  || // short version: BRASL
2543          is_call_far_patchable_variant0_at(instruction_addr);    // long version LARL + LG + BASR
2544 }
2545 
2546 // Does the call_far_patchable instruction use a pc-relative encoding
2547 // of the call destination?
2548 bool MacroAssembler::is_call_far_patchable_pcrelative_at(address instruction_addr) {
2549   // Variant 2 is pc-relative.
2550   return is_call_far_patchable_variant2_at(instruction_addr);
2551 }
2552 
2553 bool MacroAssembler::is_call_far_pcrelative(address instruction_addr) {
2554   // Prepend each BRASL with a nop.
2555   return is_z_nop(instruction_addr) && is_z_brasl(instruction_addr + nop_size());  // Match at position after one nop required.
2556 }
2557 
2558 // Set destination address of a call_far_patchable instruction.
2559 void MacroAssembler::set_dest_of_call_far_patchable_at(address instruction_addr, address dest, int64_t tocOffset) {
2560   ResourceMark rm;
2561 
2562   // Now that CP entry is verified, patch call to a pc-relative call (if circumstances permit).
2563   int code_size = MacroAssembler::call_far_patchable_size();
2564   CodeBuffer buf(instruction_addr, code_size);
2565   MacroAssembler masm(&buf);
2566   masm.call_far_patchable(dest, tocOffset);
2567   ICache::invalidate_range(instruction_addr, code_size); // Empty on z.
2568 }
2569 
2570 // Get dest address of a call_far_patchable instruction.
2571 address MacroAssembler::get_dest_of_call_far_patchable_at(address instruction_addr, address ctable) {
2572   // Dynamic TOC: absolute address in constant pool.
2573   // Check variant2 first, it is more frequent.
2574 
2575   // Relative address encoded in call instruction.
2576   if (is_call_far_patchable_variant2_at(instruction_addr)) {
2577     return MacroAssembler::get_target_addr_pcrel(instruction_addr + nop_size()); // Prepend each BRASL with a nop.
2578 
2579   // Absolute address in constant pool.
2580   } else if (is_call_far_patchable_variant0_at(instruction_addr)) {
2581     address iaddr = instruction_addr;
2582 
2583     long    tocOffset = get_load_const_from_toc_offset(iaddr);
2584     address tocLoc    = iaddr + tocOffset;
2585     return *(address *)(tocLoc);
2586   } else {
2587     fprintf(stderr, "MacroAssembler::get_dest_of_call_far_patchable_at has a problem at %p:\n", instruction_addr);
2588     fprintf(stderr, "not a call_far_patchable: %16.16lx %16.16lx, len = %d\n",
2589             *(unsigned long*)instruction_addr,
2590             *(unsigned long*)(instruction_addr+8),
2591             call_far_patchable_size());
2592     Disassembler::decode(instruction_addr, instruction_addr+call_far_patchable_size());
2593     ShouldNotReachHere();
2594     return NULL;
2595   }
2596 }
2597 
2598 void MacroAssembler::align_call_far_patchable(address pc) {
2599   if (call_far_patchable_requires_alignment_nop(pc)) { z_nop(); }
2600 }
2601 
2602 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
2603 }
2604 
2605 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
2606 }
2607 
2608 // Read from the polling page.
2609 // Use TM or TMY instruction, depending on read offset.
2610 //   offset = 0: Use TM, safepoint polling.
2611 //   offset < 0: Use TMY, profiling safepoint polling.
2612 void MacroAssembler::load_from_polling_page(Register polling_page_address, int64_t offset) {
2613   if (Immediate::is_uimm12(offset)) {
2614     z_tm(offset, polling_page_address, mask_safepoint);
2615   } else {
2616     z_tmy(offset, polling_page_address, mask_profiling);
2617   }
2618 }
2619 
2620 // Check whether z_instruction is a read access to the polling page
2621 // which was emitted by load_from_polling_page(..).
2622 bool MacroAssembler::is_load_from_polling_page(address instr_loc) {
2623   unsigned long z_instruction;
2624   unsigned int  ilen = get_instruction(instr_loc, &z_instruction);
2625 
2626   if (ilen == 2) { return false; } // It's none of the allowed instructions.
2627 
2628   if (ilen == 4) {
2629     if (!is_z_tm(z_instruction)) { return false; } // It's len=4, but not a z_tm. fail.
2630 
2631     int ms = inv_mask(z_instruction,8,32);  // mask
2632     int ra = inv_reg(z_instruction,16,32);  // base register
2633     int ds = inv_uimm12(z_instruction);     // displacement
2634 
2635     if (!(ds == 0 && ra != 0 && ms == mask_safepoint)) {
2636       return false; // It's not a z_tm(0, ra, mask_safepoint). Fail.
2637     }
2638 
2639   } else { /* if (ilen == 6) */
2640 
2641     assert(!is_z_lg(z_instruction), "old form (LG) polling page access. Please fix and use TM(Y).");
2642 
2643     if (!is_z_tmy(z_instruction)) { return false; } // It's len=6, but not a z_tmy. fail.
2644 
2645     int ms = inv_mask(z_instruction,8,48);  // mask
2646     int ra = inv_reg(z_instruction,16,48);  // base register
2647     int ds = inv_simm20(z_instruction);     // displacement
2648   }
2649 
2650   return true;
2651 }
2652 
2653 // Extract poll address from instruction and ucontext.
2654 address MacroAssembler::get_poll_address(address instr_loc, void* ucontext) {
2655   assert(ucontext != NULL, "must have ucontext");
2656   ucontext_t* uc = (ucontext_t*) ucontext;
2657   unsigned long z_instruction;
2658   unsigned int ilen = get_instruction(instr_loc, &z_instruction);
2659 
2660   if (ilen == 4 && is_z_tm(z_instruction)) {
2661     int ra = inv_reg(z_instruction, 16, 32);  // base register
2662     int ds = inv_uimm12(z_instruction);       // displacement
2663     address addr = (address)uc->uc_mcontext.gregs[ra];
2664     return addr + ds;
2665   } else if (ilen == 6 && is_z_tmy(z_instruction)) {
2666     int ra = inv_reg(z_instruction, 16, 48);  // base register
2667     int ds = inv_simm20(z_instruction);       // displacement
2668     address addr = (address)uc->uc_mcontext.gregs[ra];
2669     return addr + ds;
2670   }
2671 
2672   ShouldNotReachHere();
2673   return NULL;
2674 }
2675 
2676 // Extract poll register from instruction.
2677 uint MacroAssembler::get_poll_register(address instr_loc) {
2678   unsigned long z_instruction;
2679   unsigned int ilen = get_instruction(instr_loc, &z_instruction);
2680 
2681   if (ilen == 4 && is_z_tm(z_instruction)) {
2682     return (uint)inv_reg(z_instruction, 16, 32);  // base register
2683   } else if (ilen == 6 && is_z_tmy(z_instruction)) {
2684     return (uint)inv_reg(z_instruction, 16, 48);  // base register
2685   }
2686 
2687   ShouldNotReachHere();
2688   return 0;
2689 }
2690 
2691 bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) {
2692   ShouldNotCallThis();
2693   return false;
2694 }
2695 
2696 // Write serialization page so VM thread can do a pseudo remote membar
2697 // We use the current thread pointer to calculate a thread specific
2698 // offset to write to within the page. This minimizes bus traffic
2699 // due to cache line collision.
2700 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
2701   assert_different_registers(tmp1, tmp2);
2702   z_sllg(tmp2, thread, os::get_serialize_page_shift_count());
2703   load_const_optimized(tmp1, (long) os::get_memory_serialize_page());
2704 
2705   int mask = os::get_serialize_page_mask();
2706   if (Immediate::is_uimm16(mask)) {
2707     z_nill(tmp2, mask);
2708     z_llghr(tmp2, tmp2);
2709   } else {
2710     z_nilf(tmp2, mask);
2711     z_llgfr(tmp2, tmp2);
2712   }
2713 
2714   z_release();
2715   z_st(Z_R0, 0, tmp2, tmp1);
2716 }
2717 
2718 void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) {
2719   if (SafepointMechanism::uses_thread_local_poll()) {
2720     const Address poll_byte_addr(Z_thread, in_bytes(Thread::polling_page_offset()) + 7 /* Big Endian */);
2721     // Armed page has poll_bit set.
2722     z_tm(poll_byte_addr, SafepointMechanism::poll_bit());
2723     z_brnaz(slow_path);
2724   } else {
2725     load_const_optimized(temp_reg, SafepointSynchronize::address_of_state());
2726     z_cli(/*SafepointSynchronize::sz_state()*/4-1, temp_reg, SafepointSynchronize::_not_synchronized);
2727     z_brne(slow_path);
2728   }
2729 }
2730 
2731 // Don't rely on register locking, always use Z_R1 as scratch register instead.
2732 void MacroAssembler::bang_stack_with_offset(int offset) {
2733   // Stack grows down, caller passes positive offset.
2734   assert(offset > 0, "must bang with positive offset");
2735   if (Displacement::is_validDisp(-offset)) {
2736     z_tmy(-offset, Z_SP, mask_stackbang);
2737   } else {
2738     add2reg(Z_R1, -offset, Z_SP);    // Do not destroy Z_SP!!!
2739     z_tm(0, Z_R1, mask_stackbang);  // Just banging.
2740   }
2741 }
2742 
2743 void MacroAssembler::reserved_stack_check(Register return_pc) {
2744   // Test if reserved zone needs to be enabled.
2745   Label no_reserved_zone_enabling;
2746   assert(return_pc == Z_R14, "Return pc must be in R14 before z_br() to StackOverflow stub.");
2747   BLOCK_COMMENT("reserved_stack_check {");
2748 
2749   z_clg(Z_SP, Address(Z_thread, JavaThread::reserved_stack_activation_offset()));
2750   z_brl(no_reserved_zone_enabling);
2751 
2752   // Enable reserved zone again, throw stack overflow exception.
2753   save_return_pc();
2754   push_frame_abi160(0);
2755   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), Z_thread);
2756   pop_frame();
2757   restore_return_pc();
2758 
2759   load_const_optimized(Z_R1, StubRoutines::throw_delayed_StackOverflowError_entry());
2760   // Don't use call() or z_basr(), they will invalidate Z_R14 which contains the return pc.
2761   z_br(Z_R1);
2762 
2763   should_not_reach_here();
2764 
2765   bind(no_reserved_zone_enabling);
2766   BLOCK_COMMENT("} reserved_stack_check");
2767 }
2768 
2769 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
2770 void MacroAssembler::tlab_allocate(Register obj,
2771                                    Register var_size_in_bytes,
2772                                    int con_size_in_bytes,
2773                                    Register t1,
2774                                    Label& slow_case) {
2775   assert_different_registers(obj, var_size_in_bytes, t1);
2776   Register end = t1;
2777   Register thread = Z_thread;
2778 
2779   z_lg(obj, Address(thread, JavaThread::tlab_top_offset()));
2780   if (var_size_in_bytes == noreg) {
2781     z_lay(end, Address(obj, con_size_in_bytes));
2782   } else {
2783     z_lay(end, Address(obj, var_size_in_bytes));
2784   }
2785   z_cg(end, Address(thread, JavaThread::tlab_end_offset()));
2786   branch_optimized(bcondHigh, slow_case);
2787 
2788   // Update the tlab top pointer.
2789   z_stg(end, Address(thread, JavaThread::tlab_top_offset()));
2790 
2791   // Recover var_size_in_bytes if necessary.
2792   if (var_size_in_bytes == end) {
2793     z_sgr(var_size_in_bytes, obj);
2794   }
2795 }
2796 
2797 // Emitter for interface method lookup.
2798 //   input: recv_klass, intf_klass, itable_index
2799 //   output: method_result
2800 //   kills: itable_index, temp1_reg, Z_R0, Z_R1
2801 // TODO: Temp2_reg is unused. we may use this emitter also in the itable stubs.
2802 // If the register is still not needed then, remove it.
2803 void MacroAssembler::lookup_interface_method(Register           recv_klass,
2804                                              Register           intf_klass,
2805                                              RegisterOrConstant itable_index,
2806                                              Register           method_result,
2807                                              Register           temp1_reg,
2808                                              Label&             no_such_interface,
2809                                              bool               return_method) {
2810 
2811   const Register vtable_len = temp1_reg;    // Used to compute itable_entry_addr.
2812   const Register itable_entry_addr = Z_R1_scratch;
2813   const Register itable_interface = Z_R0_scratch;
2814 
2815   BLOCK_COMMENT("lookup_interface_method {");
2816 
2817   // Load start of itable entries into itable_entry_addr.
2818   z_llgf(vtable_len, Address(recv_klass, Klass::vtable_length_offset()));
2819   z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
2820 
2821   // Loop over all itable entries until desired interfaceOop(Rinterface) found.
2822   const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
2823 
2824   add2reg_with_index(itable_entry_addr,
2825                      vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(),
2826                      recv_klass, vtable_len);
2827 
2828   const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize;
2829   Label     search;
2830 
2831   bind(search);
2832 
2833   // Handle IncompatibleClassChangeError.
2834   // If the entry is NULL then we've reached the end of the table
2835   // without finding the expected interface, so throw an exception.
2836   load_and_test_long(itable_interface, Address(itable_entry_addr));
2837   z_bre(no_such_interface);
2838 
2839   add2reg(itable_entry_addr, itable_offset_search_inc);
2840   z_cgr(itable_interface, intf_klass);
2841   z_brne(search);
2842 
2843   // Entry found and itable_entry_addr points to it, get offset of vtable for interface.
2844   if (return_method) {
2845     const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() -
2846                                       itableOffsetEntry::interface_offset_in_bytes()) -
2847                                      itable_offset_search_inc;
2848 
2849     // Compute itableMethodEntry and get method and entry point
2850     // we use addressing with index and displacement, since the formula
2851     // for computing the entry's offset has a fixed and a dynamic part,
2852     // the latter depending on the matched interface entry and on the case,
2853     // that the itable index has been passed as a register, not a constant value.
2854     int method_offset = itableMethodEntry::method_offset_in_bytes();
2855                              // Fixed part (displacement), common operand.
2856     Register itable_offset = method_result;  // Dynamic part (index register).
2857 
2858     if (itable_index.is_register()) {
2859        // Compute the method's offset in that register, for the formula, see the
2860        // else-clause below.
2861        z_sllg(itable_offset, itable_index.as_register(), exact_log2(itableMethodEntry::size() * wordSize));
2862        z_agf(itable_offset, vtable_offset_offset, itable_entry_addr);
2863     } else {
2864       // Displacement increases.
2865       method_offset += itableMethodEntry::size() * wordSize * itable_index.as_constant();
2866 
2867       // Load index from itable.
2868       z_llgf(itable_offset, vtable_offset_offset, itable_entry_addr);
2869     }
2870 
2871     // Finally load the method's oop.
2872     z_lg(method_result, method_offset, itable_offset, recv_klass);
2873   }
2874   BLOCK_COMMENT("} lookup_interface_method");
2875 }
2876 
2877 // Lookup for virtual method invocation.
2878 void MacroAssembler::lookup_virtual_method(Register           recv_klass,
2879                                            RegisterOrConstant vtable_index,
2880                                            Register           method_result) {
2881   assert_different_registers(recv_klass, vtable_index.register_or_noreg());
2882   assert(vtableEntry::size() * wordSize == wordSize,
2883          "else adjust the scaling in the code below");
2884 
2885   BLOCK_COMMENT("lookup_virtual_method {");
2886 
2887   const int base = in_bytes(Klass::vtable_start_offset());
2888 
2889   if (vtable_index.is_constant()) {
2890     // Load with base + disp.
2891     Address vtable_entry_addr(recv_klass,
2892                               vtable_index.as_constant() * wordSize +
2893                               base +
2894                               vtableEntry::method_offset_in_bytes());
2895 
2896     z_lg(method_result, vtable_entry_addr);
2897   } else {
2898     // Shift index properly and load with base + index + disp.
2899     Register vindex = vtable_index.as_register();
2900     Address  vtable_entry_addr(recv_klass, vindex,
2901                                base + vtableEntry::method_offset_in_bytes());
2902 
2903     z_sllg(vindex, vindex, exact_log2(wordSize));
2904     z_lg(method_result, vtable_entry_addr);
2905   }
2906   BLOCK_COMMENT("} lookup_virtual_method");
2907 }
2908 
2909 // Factor out code to call ic_miss_handler.
2910 // Generate code to call the inline cache miss handler.
2911 //
2912 // In most cases, this code will be generated out-of-line.
2913 // The method parameters are intended to provide some variability.
2914 //   ICM          - Label which has to be bound to the start of useful code (past any traps).
2915 //   trapMarker   - Marking byte for the generated illtrap instructions (if any).
2916 //                  Any value except 0x00 is supported.
2917 //                  = 0x00 - do not generate illtrap instructions.
2918 //                         use nops to fill ununsed space.
2919 //   requiredSize - required size of the generated code. If the actually
2920 //                  generated code is smaller, use padding instructions to fill up.
2921 //                  = 0 - no size requirement, no padding.
2922 //   scratch      - scratch register to hold branch target address.
2923 //
2924 //  The method returns the code offset of the bound label.
2925 unsigned int MacroAssembler::call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch) {
2926   intptr_t startOffset = offset();
2927 
2928   // Prevent entry at content_begin().
2929   if (trapMarker != 0) {
2930     z_illtrap(trapMarker);
2931   }
2932 
2933   // Load address of inline cache miss code into scratch register
2934   // and branch to cache miss handler.
2935   BLOCK_COMMENT("IC miss handler {");
2936   BIND(ICM);
2937   unsigned int   labelOffset = offset();
2938   AddressLiteral icmiss(SharedRuntime::get_ic_miss_stub());
2939 
2940   load_const_optimized(scratch, icmiss);
2941   z_br(scratch);
2942 
2943   // Fill unused space.
2944   if (requiredSize > 0) {
2945     while ((offset() - startOffset) < requiredSize) {
2946       if (trapMarker == 0) {
2947         z_nop();
2948       } else {
2949         z_illtrap(trapMarker);
2950       }
2951     }
2952   }
2953   BLOCK_COMMENT("} IC miss handler");
2954   return labelOffset;
2955 }
2956 
2957 void MacroAssembler::nmethod_UEP(Label& ic_miss) {
2958   Register ic_reg       = as_Register(Matcher::inline_cache_reg_encode());
2959   int      klass_offset = oopDesc::klass_offset_in_bytes();
2960   if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) {
2961     if (VM_Version::has_CompareBranch()) {
2962       z_cgij(Z_ARG1, 0, Assembler::bcondEqual, ic_miss);
2963     } else {
2964       z_ltgr(Z_ARG1, Z_ARG1);
2965       z_bre(ic_miss);
2966     }
2967   }
2968   // Compare cached class against klass from receiver.
2969   compare_klass_ptr(ic_reg, klass_offset, Z_ARG1, false);
2970   z_brne(ic_miss);
2971 }
2972 
2973 void MacroAssembler::check_klass_subtype_fast_path(Register   sub_klass,
2974                                                    Register   super_klass,
2975                                                    Register   temp1_reg,
2976                                                    Label*     L_success,
2977                                                    Label*     L_failure,
2978                                                    Label*     L_slow_path,
2979                                                    RegisterOrConstant super_check_offset) {
2980 
2981   const int sc_offset  = in_bytes(Klass::secondary_super_cache_offset());
2982   const int sco_offset = in_bytes(Klass::super_check_offset_offset());
2983 
2984   bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
2985   bool need_slow_path = (must_load_sco ||
2986                          super_check_offset.constant_or_zero() == sc_offset);
2987 
2988   // Input registers must not overlap.
2989   assert_different_registers(sub_klass, super_klass, temp1_reg);
2990   if (super_check_offset.is_register()) {
2991     assert_different_registers(sub_klass, super_klass,
2992                                super_check_offset.as_register());
2993   } else if (must_load_sco) {
2994     assert(temp1_reg != noreg, "supply either a temp or a register offset");
2995   }
2996 
2997   const Register Rsuper_check_offset = temp1_reg;
2998 
2999   NearLabel L_fallthrough;
3000   int label_nulls = 0;
3001   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
3002   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
3003   if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
3004   assert(label_nulls <= 1 ||
3005          (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
3006          "at most one NULL in the batch, usually");
3007 
3008   BLOCK_COMMENT("check_klass_subtype_fast_path {");
3009   // If the pointers are equal, we are done (e.g., String[] elements).
3010   // This self-check enables sharing of secondary supertype arrays among
3011   // non-primary types such as array-of-interface. Otherwise, each such
3012   // type would need its own customized SSA.
3013   // We move this check to the front of the fast path because many
3014   // type checks are in fact trivially successful in this manner,
3015   // so we get a nicely predicted branch right at the start of the check.
3016   compare64_and_branch(sub_klass, super_klass, bcondEqual, *L_success);
3017 
3018   // Check the supertype display, which is uint.
3019   if (must_load_sco) {
3020     z_llgf(Rsuper_check_offset, sco_offset, super_klass);
3021     super_check_offset = RegisterOrConstant(Rsuper_check_offset);
3022   }
3023   Address super_check_addr(sub_klass, super_check_offset, 0);
3024   z_cg(super_klass, super_check_addr); // compare w/ displayed supertype
3025 
3026   // This check has worked decisively for primary supers.
3027   // Secondary supers are sought in the super_cache ('super_cache_addr').
3028   // (Secondary supers are interfaces and very deeply nested subtypes.)
3029   // This works in the same check above because of a tricky aliasing
3030   // between the super_cache and the primary super display elements.
3031   // (The 'super_check_addr' can address either, as the case requires.)
3032   // Note that the cache is updated below if it does not help us find
3033   // what we need immediately.
3034   // So if it was a primary super, we can just fail immediately.
3035   // Otherwise, it's the slow path for us (no success at this point).
3036 
3037   // Hacked jmp, which may only be used just before L_fallthrough.
3038 #define final_jmp(label)                                                \
3039   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
3040   else                            { branch_optimized(Assembler::bcondAlways, label); } /*omit semicolon*/
3041 
3042   if (super_check_offset.is_register()) {
3043     branch_optimized(Assembler::bcondEqual, *L_success);
3044     z_cfi(super_check_offset.as_register(), sc_offset);
3045     if (L_failure == &L_fallthrough) {
3046       branch_optimized(Assembler::bcondEqual, *L_slow_path);
3047     } else {
3048       branch_optimized(Assembler::bcondNotEqual, *L_failure);
3049       final_jmp(*L_slow_path);
3050     }
3051   } else if (super_check_offset.as_constant() == sc_offset) {
3052     // Need a slow path; fast failure is impossible.
3053     if (L_slow_path == &L_fallthrough) {
3054       branch_optimized(Assembler::bcondEqual, *L_success);
3055     } else {
3056       branch_optimized(Assembler::bcondNotEqual, *L_slow_path);
3057       final_jmp(*L_success);
3058     }
3059   } else {
3060     // No slow path; it's a fast decision.
3061     if (L_failure == &L_fallthrough) {
3062       branch_optimized(Assembler::bcondEqual, *L_success);
3063     } else {
3064       branch_optimized(Assembler::bcondNotEqual, *L_failure);
3065       final_jmp(*L_success);
3066     }
3067   }
3068 
3069   bind(L_fallthrough);
3070 #undef local_brc
3071 #undef final_jmp
3072   BLOCK_COMMENT("} check_klass_subtype_fast_path");
3073   // fallthru (to slow path)
3074 }
3075 
3076 void MacroAssembler::check_klass_subtype_slow_path(Register Rsubklass,
3077                                                    Register Rsuperklass,
3078                                                    Register Rarray_ptr,  // tmp
3079                                                    Register Rlength,     // tmp
3080                                                    Label* L_success,
3081                                                    Label* L_failure) {
3082   // Input registers must not overlap.
3083   // Also check for R1 which is explicitely used here.
3084   assert_different_registers(Z_R1, Rsubklass, Rsuperklass, Rarray_ptr, Rlength);
3085   NearLabel L_fallthrough, L_loop;
3086   int label_nulls = 0;
3087   if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
3088   if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
3089   assert(label_nulls <= 1, "at most one NULL in the batch");
3090 
3091   const int ss_offset = in_bytes(Klass::secondary_supers_offset());
3092   const int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
3093 
3094   const int length_offset = Array<Klass*>::length_offset_in_bytes();
3095   const int base_offset   = Array<Klass*>::base_offset_in_bytes();
3096 
3097   // Hacked jmp, which may only be used just before L_fallthrough.
3098 #define final_jmp(label)                                                \
3099   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
3100   else                            branch_optimized(Assembler::bcondAlways, label) /*omit semicolon*/
3101 
3102   NearLabel loop_iterate, loop_count, match;
3103 
3104   BLOCK_COMMENT("check_klass_subtype_slow_path {");
3105   z_lg(Rarray_ptr, ss_offset, Rsubklass);
3106 
3107   load_and_test_int(Rlength, Address(Rarray_ptr, length_offset));
3108   branch_optimized(Assembler::bcondZero, *L_failure);
3109 
3110   // Oops in table are NO MORE compressed.
3111   z_cg(Rsuperklass, base_offset, Rarray_ptr); // Check array element for match.
3112   z_bre(match);                               // Shortcut for array length = 1.
3113 
3114   // No match yet, so we must walk the array's elements.
3115   z_lngfr(Rlength, Rlength);
3116   z_sllg(Rlength, Rlength, LogBytesPerWord); // -#bytes of cache array
3117   z_llill(Z_R1, BytesPerWord);               // Set increment/end index.
3118   add2reg(Rlength, 2 * BytesPerWord);        // start index  = -(n-2)*BytesPerWord
3119   z_slgr(Rarray_ptr, Rlength);               // start addr: +=  (n-2)*BytesPerWord
3120   z_bru(loop_count);
3121 
3122   BIND(loop_iterate);
3123   z_cg(Rsuperklass, base_offset, Rlength, Rarray_ptr); // Check array element for match.
3124   z_bre(match);
3125   BIND(loop_count);
3126   z_brxlg(Rlength, Z_R1, loop_iterate);
3127 
3128   // Rsuperklass not found among secondary super classes -> failure.
3129   branch_optimized(Assembler::bcondAlways, *L_failure);
3130 
3131   // Got a hit. Return success (zero result). Set cache.
3132   // Cache load doesn't happen here. For speed it is directly emitted by the compiler.
3133 
3134   BIND(match);
3135 
3136   z_stg(Rsuperklass, sc_offset, Rsubklass); // Save result to cache.
3137 
3138   final_jmp(*L_success);
3139 
3140   // Exit to the surrounding code.
3141   BIND(L_fallthrough);
3142 #undef local_brc
3143 #undef final_jmp
3144   BLOCK_COMMENT("} check_klass_subtype_slow_path");
3145 }
3146 
3147 // Emitter for combining fast and slow path.
3148 void MacroAssembler::check_klass_subtype(Register sub_klass,
3149                                          Register super_klass,
3150                                          Register temp1_reg,
3151                                          Register temp2_reg,
3152                                          Label&   L_success) {
3153   NearLabel failure;
3154   BLOCK_COMMENT(err_msg("check_klass_subtype(%s subclass of %s) {", sub_klass->name(), super_klass->name()));
3155   check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg,
3156                                 &L_success, &failure, NULL);
3157   check_klass_subtype_slow_path(sub_klass, super_klass,
3158                                 temp1_reg, temp2_reg, &L_success, NULL);
3159   BIND(failure);
3160   BLOCK_COMMENT("} check_klass_subtype");
3161 }
3162 
3163 // Increment a counter at counter_address when the eq condition code is
3164 // set. Kills registers tmp1_reg and tmp2_reg and preserves the condition code.
3165 void MacroAssembler::increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg) {
3166   Label l;
3167   z_brne(l);
3168   load_const(tmp1_reg, counter_address);
3169   add2mem_32(Address(tmp1_reg), 1, tmp2_reg);
3170   z_cr(tmp1_reg, tmp1_reg); // Set cc to eq.
3171   bind(l);
3172 }
3173 
3174 // Semantics are dependent on the slow_case label:
3175 //   If the slow_case label is not NULL, failure to biased-lock the object
3176 //   transfers control to the location of the slow_case label. If the
3177 //   object could be biased-locked, control is transferred to the done label.
3178 //   The condition code is unpredictable.
3179 //
3180 //   If the slow_case label is NULL, failure to biased-lock the object results
3181 //   in a transfer of control to the done label with a condition code of not_equal.
3182 //   If the biased-lock could be successfully obtained, control is transfered to
3183 //   the done label with a condition code of equal.
3184 //   It is mandatory to react on the condition code At the done label.
3185 //
3186 void MacroAssembler::biased_locking_enter(Register  obj_reg,
3187                                           Register  mark_reg,
3188                                           Register  temp_reg,
3189                                           Register  temp2_reg,    // May be Z_RO!
3190                                           Label    &done,
3191                                           Label    *slow_case) {
3192   assert(UseBiasedLocking, "why call this otherwise?");
3193   assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg);
3194 
3195   Label cas_label; // Try, if implemented, CAS locking. Fall thru to slow path otherwise.
3196 
3197   BLOCK_COMMENT("biased_locking_enter {");
3198 
3199   // Biased locking
3200   // See whether the lock is currently biased toward our thread and
3201   // whether the epoch is still valid.
3202   // Note that the runtime guarantees sufficient alignment of JavaThread
3203   // pointers to allow age to be placed into low bits.
3204   assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits,
3205          "biased locking makes assumptions about bit layout");
3206   z_lr(temp_reg, mark_reg);
3207   z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place);
3208   z_chi(temp_reg, markOopDesc::biased_lock_pattern);
3209   z_brne(cas_label);  // Try cas if object is not biased, i.e. cannot be biased locked.
3210 
3211   load_prototype_header(temp_reg, obj_reg);
3212   load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
3213 
3214   z_ogr(temp_reg, Z_thread);
3215   z_xgr(temp_reg, mark_reg);
3216   z_ngr(temp_reg, temp2_reg);
3217   if (PrintBiasedLockingStatistics) {
3218     increment_counter_eq((address) BiasedLocking::biased_lock_entry_count_addr(), mark_reg, temp2_reg);
3219     // Restore mark_reg.
3220     z_lg(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
3221   }
3222   branch_optimized(Assembler::bcondEqual, done);  // Biased lock obtained, return success.
3223 
3224   Label try_revoke_bias;
3225   Label try_rebias;
3226   Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes());
3227 
3228   //----------------------------------------------------------------------------
3229   // At this point we know that the header has the bias pattern and
3230   // that we are not the bias owner in the current epoch. We need to
3231   // figure out more details about the state of the header in order to
3232   // know what operations can be legally performed on the object's
3233   // header.
3234 
3235   // If the low three bits in the xor result aren't clear, that means
3236   // the prototype header is no longer biased and we have to revoke
3237   // the bias on this object.
3238   z_tmll(temp_reg, markOopDesc::biased_lock_mask_in_place);
3239   z_brnaz(try_revoke_bias);
3240 
3241   // Biasing is still enabled for this data type. See whether the
3242   // epoch of the current bias is still valid, meaning that the epoch
3243   // bits of the mark word are equal to the epoch bits of the
3244   // prototype header. (Note that the prototype header's epoch bits
3245   // only change at a safepoint.) If not, attempt to rebias the object
3246   // toward the current thread. Note that we must be absolutely sure
3247   // that the current epoch is invalid in order to do this because
3248   // otherwise the manipulations it performs on the mark word are
3249   // illegal.
3250   z_tmll(temp_reg, markOopDesc::epoch_mask_in_place);
3251   z_brnaz(try_rebias);
3252 
3253   //----------------------------------------------------------------------------
3254   // The epoch of the current bias is still valid but we know nothing
3255   // about the owner; it might be set or it might be clear. Try to
3256   // acquire the bias of the object using an atomic operation. If this
3257   // fails we will go in to the runtime to revoke the object's bias.
3258   // Note that we first construct the presumed unbiased header so we
3259   // don't accidentally blow away another thread's valid bias.
3260   z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place |
3261          markOopDesc::epoch_mask_in_place);
3262   z_lgr(temp_reg, Z_thread);
3263   z_llgfr(mark_reg, mark_reg);
3264   z_ogr(temp_reg, mark_reg);
3265 
3266   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3267 
3268   z_csg(mark_reg, temp_reg, 0, obj_reg);
3269 
3270   // If the biasing toward our thread failed, this means that
3271   // another thread succeeded in biasing it toward itself and we
3272   // need to revoke that bias. The revocation will occur in the
3273   // interpreter runtime in the slow case.
3274 
3275   if (PrintBiasedLockingStatistics) {
3276     increment_counter_eq((address) BiasedLocking::anonymously_biased_lock_entry_count_addr(),
3277                          temp_reg, temp2_reg);
3278   }
3279   if (slow_case != NULL) {
3280     branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way.
3281   }
3282   branch_optimized(Assembler::bcondAlways, done);           // Biased lock status given in condition code.
3283 
3284   //----------------------------------------------------------------------------
3285   bind(try_rebias);
3286   // At this point we know the epoch has expired, meaning that the
3287   // current "bias owner", if any, is actually invalid. Under these
3288   // circumstances _only_, we are allowed to use the current header's
3289   // value as the comparison value when doing the cas to acquire the
3290   // bias in the current epoch. In other words, we allow transfer of
3291   // the bias from one thread to another directly in this situation.
3292 
3293   z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
3294   load_prototype_header(temp_reg, obj_reg);
3295   z_llgfr(mark_reg, mark_reg);
3296 
3297   z_ogr(temp_reg, Z_thread);
3298 
3299   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3300 
3301   z_csg(mark_reg, temp_reg, 0, obj_reg);
3302 
3303   // If the biasing toward our thread failed, this means that
3304   // another thread succeeded in biasing it toward itself and we
3305   // need to revoke that bias. The revocation will occur in the
3306   // interpreter runtime in the slow case.
3307 
3308   if (PrintBiasedLockingStatistics) {
3309     increment_counter_eq((address) BiasedLocking::rebiased_lock_entry_count_addr(), temp_reg, temp2_reg);
3310   }
3311   if (slow_case != NULL) {
3312     branch_optimized(Assembler::bcondNotEqual, *slow_case);  // Biased lock not obtained, need to go the long way.
3313   }
3314   z_bru(done);           // Biased lock status given in condition code.
3315 
3316   //----------------------------------------------------------------------------
3317   bind(try_revoke_bias);
3318   // The prototype mark in the klass doesn't have the bias bit set any
3319   // more, indicating that objects of this data type are not supposed
3320   // to be biased any more. We are going to try to reset the mark of
3321   // this object to the prototype value and fall through to the
3322   // CAS-based locking scheme. Note that if our CAS fails, it means
3323   // that another thread raced us for the privilege of revoking the
3324   // bias of this particular object, so it's okay to continue in the
3325   // normal locking code.
3326   load_prototype_header(temp_reg, obj_reg);
3327 
3328   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3329 
3330   z_csg(mark_reg, temp_reg, 0, obj_reg);
3331 
3332   // Fall through to the normal CAS-based lock, because no matter what
3333   // the result of the above CAS, some thread must have succeeded in
3334   // removing the bias bit from the object's header.
3335   if (PrintBiasedLockingStatistics) {
3336     // z_cgr(mark_reg, temp2_reg);
3337     increment_counter_eq((address) BiasedLocking::revoked_lock_entry_count_addr(), temp_reg, temp2_reg);
3338   }
3339 
3340   bind(cas_label);
3341   BLOCK_COMMENT("} biased_locking_enter");
3342 }
3343 
3344 void MacroAssembler::biased_locking_exit(Register mark_addr, Register temp_reg, Label& done) {
3345   // Check for biased locking unlock case, which is a no-op
3346   // Note: we do not have to check the thread ID for two reasons.
3347   // First, the interpreter checks for IllegalMonitorStateException at
3348   // a higher level. Second, if the bias was revoked while we held the
3349   // lock, the object could not be rebiased toward another thread, so
3350   // the bias bit would be clear.
3351   BLOCK_COMMENT("biased_locking_exit {");
3352 
3353   z_lg(temp_reg, 0, mark_addr);
3354   z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place);
3355 
3356   z_chi(temp_reg, markOopDesc::biased_lock_pattern);
3357   z_bre(done);
3358   BLOCK_COMMENT("} biased_locking_exit");
3359 }
3360 
3361 void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) {
3362   Register displacedHeader = temp1;
3363   Register currentHeader = temp1;
3364   Register temp = temp2;
3365   NearLabel done, object_has_monitor;
3366 
3367   BLOCK_COMMENT("compiler_fast_lock_object {");
3368 
3369   // Load markOop from oop into mark.
3370   z_lg(displacedHeader, 0, oop);
3371 
3372   if (try_bias) {
3373     biased_locking_enter(oop, displacedHeader, temp, Z_R0, done);
3374   }
3375 
3376   // Handle existing monitor.
3377   // The object has an existing monitor iff (mark & monitor_value) != 0.
3378   guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word");
3379   z_lr(temp, displacedHeader);
3380   z_nill(temp, markOopDesc::monitor_value);
3381   z_brne(object_has_monitor);
3382 
3383   // Set mark to markOop | markOopDesc::unlocked_value.
3384   z_oill(displacedHeader, markOopDesc::unlocked_value);
3385 
3386   // Load Compare Value application register.
3387 
3388   // Initialize the box (must happen before we update the object mark).
3389   z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box);
3390 
3391   // Memory Fence (in cmpxchgd)
3392   // Compare object markOop with mark and if equal exchange scratch1 with object markOop.
3393 
3394   // If the compare-and-swap succeeded, then we found an unlocked object and we
3395   // have now locked it.
3396   z_csg(displacedHeader, box, 0, oop);
3397   assert(currentHeader==displacedHeader, "must be same register"); // Identified two registers from z/Architecture.
3398   z_bre(done);
3399 
3400   // We did not see an unlocked object so try the fast recursive case.
3401 
3402   z_sgr(currentHeader, Z_SP);
3403   load_const_optimized(temp, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
3404 
3405   z_ngr(currentHeader, temp);
3406   //   z_brne(done);
3407   //   z_release();
3408   z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box);
3409 
3410   z_bru(done);
3411 
3412   Register zero = temp;
3413   Register monitor_tagged = displacedHeader; // Tagged with markOopDesc::monitor_value.
3414   bind(object_has_monitor);
3415   // The object's monitor m is unlocked iff m->owner == NULL,
3416   // otherwise m->owner may contain a thread or a stack address.
3417   //
3418   // Try to CAS m->owner from NULL to current thread.
3419   z_lghi(zero, 0);
3420   // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ.
3421   z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged);
3422   // Store a non-null value into the box.
3423   z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box);
3424 #ifdef ASSERT
3425   z_brne(done);
3426   // We've acquired the monitor, check some invariants.
3427   // Invariant 1: _recursions should be 0.
3428   asm_assert_mem8_is_zero(OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), monitor_tagged,
3429                           "monitor->_recursions should be 0", -1);
3430   z_ltgr(zero, zero); // Set CR=EQ.
3431 #endif
3432   bind(done);
3433 
3434   BLOCK_COMMENT("} compiler_fast_lock_object");
3435   // If locking was successful, CR should indicate 'EQ'.
3436   // The compiler or the native wrapper generates a branch to the runtime call
3437   // _complete_monitor_locking_Java.
3438 }
3439 
3440 void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) {
3441   Register displacedHeader = temp1;
3442   Register currentHeader = temp2;
3443   Register temp = temp1;
3444   Register monitor = temp2;
3445 
3446   Label done, object_has_monitor;
3447 
3448   BLOCK_COMMENT("compiler_fast_unlock_object {");
3449 
3450   if (try_bias) {
3451     biased_locking_exit(oop, currentHeader, done);
3452   }
3453 
3454   // Find the lock address and load the displaced header from the stack.
3455   // if the displaced header is zero, we have a recursive unlock.
3456   load_and_test_long(displacedHeader, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3457   z_bre(done);
3458 
3459   // Handle existing monitor.
3460   // The object has an existing monitor iff (mark & monitor_value) != 0.
3461   z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);
3462   guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word");
3463   z_nill(currentHeader, markOopDesc::monitor_value);
3464   z_brne(object_has_monitor);
3465 
3466   // Check if it is still a light weight lock, this is true if we see
3467   // the stack address of the basicLock in the markOop of the object
3468   // copy box to currentHeader such that csg does not kill it.
3469   z_lgr(currentHeader, box);
3470   z_csg(currentHeader, displacedHeader, 0, oop);
3471   z_bru(done); // Csg sets CR as desired.
3472 
3473   // Handle existing monitor.
3474   bind(object_has_monitor);
3475   z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);    // CurrentHeader is tagged with monitor_value set.
3476   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
3477   z_brne(done);
3478   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
3479   z_brne(done);
3480   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
3481   z_brne(done);
3482   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
3483   z_brne(done);
3484   z_release();
3485   z_stg(temp/*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), currentHeader);
3486 
3487   bind(done);
3488 
3489   BLOCK_COMMENT("} compiler_fast_unlock_object");
3490   // flag == EQ indicates success
3491   // flag == NE indicates failure
3492 }
3493 
3494 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) {
3495   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3496   bs->resolve_jobject(this, value, tmp1, tmp2);
3497 }
3498 
3499 // Last_Java_sp must comply to the rules in frame_s390.hpp.
3500 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation) {
3501   BLOCK_COMMENT("set_last_Java_frame {");
3502 
3503   // Always set last_Java_pc and flags first because once last_Java_sp
3504   // is visible has_last_Java_frame is true and users will look at the
3505   // rest of the fields. (Note: flags should always be zero before we
3506   // get here so doesn't need to be set.)
3507 
3508   // Verify that last_Java_pc was zeroed on return to Java.
3509   if (allow_relocation) {
3510     asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()),
3511                             Z_thread,
3512                             "last_Java_pc not zeroed before leaving Java",
3513                             0x200);
3514   } else {
3515     asm_assert_mem8_is_zero_static(in_bytes(JavaThread::last_Java_pc_offset()),
3516                                    Z_thread,
3517                                    "last_Java_pc not zeroed before leaving Java",
3518                                    0x200);
3519   }
3520 
3521   // When returning from calling out from Java mode the frame anchor's
3522   // last_Java_pc will always be set to NULL. It is set here so that
3523   // if we are doing a call to native (not VM) that we capture the
3524   // known pc and don't have to rely on the native call having a
3525   // standard frame linkage where we can find the pc.
3526   if (last_Java_pc!=noreg) {
3527     z_stg(last_Java_pc, Address(Z_thread, JavaThread::last_Java_pc_offset()));
3528   }
3529 
3530   // This membar release is not required on z/Architecture, since the sequence of stores
3531   // in maintained. Nevertheless, we leave it in to document the required ordering.
3532   // The implementation of z_release() should be empty.
3533   // z_release();
3534 
3535   z_stg(last_Java_sp, Address(Z_thread, JavaThread::last_Java_sp_offset()));
3536   BLOCK_COMMENT("} set_last_Java_frame");
3537 }
3538 
3539 void MacroAssembler::reset_last_Java_frame(bool allow_relocation) {
3540   BLOCK_COMMENT("reset_last_Java_frame {");
3541 
3542   if (allow_relocation) {
3543     asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()),
3544                                Z_thread,
3545                                "SP was not set, still zero",
3546                                0x202);
3547   } else {
3548     asm_assert_mem8_isnot_zero_static(in_bytes(JavaThread::last_Java_sp_offset()),
3549                                       Z_thread,
3550                                       "SP was not set, still zero",
3551                                       0x202);
3552   }
3553 
3554   // _last_Java_sp = 0
3555   // Clearing storage must be atomic here, so don't use clear_mem()!
3556   store_const(Address(Z_thread, JavaThread::last_Java_sp_offset()), 0);
3557 
3558   // _last_Java_pc = 0
3559   store_const(Address(Z_thread, JavaThread::last_Java_pc_offset()), 0);
3560 
3561   BLOCK_COMMENT("} reset_last_Java_frame");
3562   return;
3563 }
3564 
3565 void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation) {
3566   assert_different_registers(sp, tmp1);
3567 
3568   // We cannot trust that code generated by the C++ compiler saves R14
3569   // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at
3570   // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()).
3571   // Therefore we load the PC into tmp1 and let set_last_Java_frame() save
3572   // it into the frame anchor.
3573   get_PC(tmp1);
3574   set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1, allow_relocation);
3575 }
3576 
3577 void MacroAssembler::set_thread_state(JavaThreadState new_state) {
3578   z_release();
3579 
3580   assert(Immediate::is_uimm16(_thread_max_state), "enum value out of range for instruction");
3581   assert(sizeof(JavaThreadState) == sizeof(int), "enum value must have base type int");
3582   store_const(Address(Z_thread, JavaThread::thread_state_offset()), new_state, Z_R0, false);
3583 }
3584 
3585 void MacroAssembler::get_vm_result(Register oop_result) {
3586   verify_thread();
3587 
3588   z_lg(oop_result, Address(Z_thread, JavaThread::vm_result_offset()));
3589   clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(void*));
3590 
3591   verify_oop(oop_result);
3592 }
3593 
3594 void MacroAssembler::get_vm_result_2(Register result) {
3595   verify_thread();
3596 
3597   z_lg(result, Address(Z_thread, JavaThread::vm_result_2_offset()));
3598   clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(void*));
3599 }
3600 
3601 // We require that C code which does not return a value in vm_result will
3602 // leave it undisturbed.
3603 void MacroAssembler::set_vm_result(Register oop_result) {
3604   z_stg(oop_result, Address(Z_thread, JavaThread::vm_result_offset()));
3605 }
3606 
3607 // Explicit null checks (used for method handle code).
3608 void MacroAssembler::null_check(Register reg, Register tmp, int64_t offset) {
3609   if (!ImplicitNullChecks) {
3610     NearLabel ok;
3611 
3612     compare64_and_branch(reg, (intptr_t) 0, Assembler::bcondNotEqual, ok);
3613 
3614     // We just put the address into reg if it was 0 (tmp==Z_R0 is allowed so we can't use it for the address).
3615     address exception_entry = Interpreter::throw_NullPointerException_entry();
3616     load_absolute_address(reg, exception_entry);
3617     z_br(reg);
3618 
3619     bind(ok);
3620   } else {
3621     if (needs_explicit_null_check((intptr_t)offset)) {
3622       // Provoke OS NULL exception if reg = NULL by
3623       // accessing M[reg] w/o changing any registers.
3624       z_lg(tmp, 0, reg);
3625     }
3626     // else
3627       // Nothing to do, (later) access of M[reg + offset]
3628       // will provoke OS NULL exception if reg = NULL.
3629   }
3630 }
3631 
3632 //-------------------------------------
3633 //  Compressed Klass Pointers
3634 //-------------------------------------
3635 
3636 // Klass oop manipulations if compressed.
3637 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
3638   Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible.
3639   address  base    = Universe::narrow_klass_base();
3640   int      shift   = Universe::narrow_klass_shift();
3641   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3642 
3643   BLOCK_COMMENT("cKlass encoder {");
3644 
3645 #ifdef ASSERT
3646   Label ok;
3647   z_tmll(current, KlassAlignmentInBytes-1); // Check alignment.
3648   z_brc(Assembler::bcondAllZero, ok);
3649   // The plain disassembler does not recognize illtrap. It instead displays
3650   // a 32-bit value. Issueing two illtraps assures the disassembler finds
3651   // the proper beginning of the next instruction.
3652   z_illtrap(0xee);
3653   z_illtrap(0xee);
3654   bind(ok);
3655 #endif
3656 
3657   if (base != NULL) {
3658     unsigned int base_h = ((unsigned long)base)>>32;
3659     unsigned int base_l = (unsigned int)((unsigned long)base);
3660     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3661       lgr_if_needed(dst, current);
3662       z_aih(dst, -((int)base_h));     // Base has no set bits in lower half.
3663     } else if ((base_h == 0) && (base_l != 0)) {
3664       lgr_if_needed(dst, current);
3665       z_agfi(dst, -(int)base_l);
3666     } else {
3667       load_const(Z_R0, base);
3668       lgr_if_needed(dst, current);
3669       z_sgr(dst, Z_R0);
3670     }
3671     current = dst;
3672   }
3673   if (shift != 0) {
3674     assert (LogKlassAlignmentInBytes == shift, "decode alg wrong");
3675     z_srlg(dst, current, shift);
3676     current = dst;
3677   }
3678   lgr_if_needed(dst, current); // Move may be required (if neither base nor shift != 0).
3679 
3680   BLOCK_COMMENT("} cKlass encoder");
3681 }
3682 
3683 // This function calculates the size of the code generated by
3684 //   decode_klass_not_null(register dst, Register src)
3685 // when (Universe::heap() != NULL). Hence, if the instructions
3686 // it generates change, then this method needs to be updated.
3687 int MacroAssembler::instr_size_for_decode_klass_not_null() {
3688   address  base    = Universe::narrow_klass_base();
3689   int shift_size   = Universe::narrow_klass_shift() == 0 ? 0 : 6; /* sllg */
3690   int addbase_size = 0;
3691   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3692 
3693   if (base != NULL) {
3694     unsigned int base_h = ((unsigned long)base)>>32;
3695     unsigned int base_l = (unsigned int)((unsigned long)base);
3696     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3697       addbase_size += 6; /* aih */
3698     } else if ((base_h == 0) && (base_l != 0)) {
3699       addbase_size += 6; /* algfi */
3700     } else {
3701       addbase_size += load_const_size();
3702       addbase_size += 4; /* algr */
3703     }
3704   }
3705 #ifdef ASSERT
3706   addbase_size += 10;
3707   addbase_size += 2; // Extra sigill.
3708 #endif
3709   return addbase_size + shift_size;
3710 }
3711 
3712 // !!! If the instructions that get generated here change
3713 //     then function instr_size_for_decode_klass_not_null()
3714 //     needs to get updated.
3715 // This variant of decode_klass_not_null() must generate predictable code!
3716 // The code must only depend on globally known parameters.
3717 void MacroAssembler::decode_klass_not_null(Register dst) {
3718   address  base    = Universe::narrow_klass_base();
3719   int      shift   = Universe::narrow_klass_shift();
3720   int      beg_off = offset();
3721   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3722 
3723   BLOCK_COMMENT("cKlass decoder (const size) {");
3724 
3725   if (shift != 0) { // Shift required?
3726     z_sllg(dst, dst, shift);
3727   }
3728   if (base != NULL) {
3729     unsigned int base_h = ((unsigned long)base)>>32;
3730     unsigned int base_l = (unsigned int)((unsigned long)base);
3731     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3732       z_aih(dst, base_h);     // Base has no set bits in lower half.
3733     } else if ((base_h == 0) && (base_l != 0)) {
3734       z_algfi(dst, base_l);   // Base has no set bits in upper half.
3735     } else {
3736       load_const(Z_R0, base); // Base has set bits everywhere.
3737       z_algr(dst, Z_R0);
3738     }
3739   }
3740 
3741 #ifdef ASSERT
3742   Label ok;
3743   z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
3744   z_brc(Assembler::bcondAllZero, ok);
3745   // The plain disassembler does not recognize illtrap. It instead displays
3746   // a 32-bit value. Issueing two illtraps assures the disassembler finds
3747   // the proper beginning of the next instruction.
3748   z_illtrap(0xd1);
3749   z_illtrap(0xd1);
3750   bind(ok);
3751 #endif
3752   assert(offset() == beg_off + instr_size_for_decode_klass_not_null(), "Code gen mismatch.");
3753 
3754   BLOCK_COMMENT("} cKlass decoder (const size)");
3755 }
3756 
3757 // This variant of decode_klass_not_null() is for cases where
3758 //  1) the size of the generated instructions may vary
3759 //  2) the result is (potentially) stored in a register different from the source.
3760 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
3761   address base  = Universe::narrow_klass_base();
3762   int     shift = Universe::narrow_klass_shift();
3763   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3764 
3765   BLOCK_COMMENT("cKlass decoder {");
3766 
3767   if (src == noreg) src = dst;
3768 
3769   if (shift != 0) { // Shift or at least move required?
3770     z_sllg(dst, src, shift);
3771   } else {
3772     lgr_if_needed(dst, src);
3773   }
3774 
3775   if (base != NULL) {
3776     unsigned int base_h = ((unsigned long)base)>>32;
3777     unsigned int base_l = (unsigned int)((unsigned long)base);
3778     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3779       z_aih(dst, base_h);     // Base has not set bits in lower half.
3780     } else if ((base_h == 0) && (base_l != 0)) {
3781       z_algfi(dst, base_l);   // Base has no set bits in upper half.
3782     } else {
3783       load_const_optimized(Z_R0, base); // Base has set bits everywhere.
3784       z_algr(dst, Z_R0);
3785     }
3786   }
3787 
3788 #ifdef ASSERT
3789   Label ok;
3790   z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
3791   z_brc(Assembler::bcondAllZero, ok);
3792   // The plain disassembler does not recognize illtrap. It instead displays
3793   // a 32-bit value. Issueing two illtraps assures the disassembler finds
3794   // the proper beginning of the next instruction.
3795   z_illtrap(0xd2);
3796   z_illtrap(0xd2);
3797   bind(ok);
3798 #endif
3799   BLOCK_COMMENT("} cKlass decoder");
3800 }
3801 
3802 void MacroAssembler::load_klass(Register klass, Address mem) {
3803   if (UseCompressedClassPointers) {
3804     z_llgf(klass, mem);
3805     // Attention: no null check here!
3806     decode_klass_not_null(klass);
3807   } else {
3808     z_lg(klass, mem);
3809   }
3810 }
3811 
3812 void MacroAssembler::load_klass(Register klass, Register src_oop) {
3813   if (UseCompressedClassPointers) {
3814     z_llgf(klass, oopDesc::klass_offset_in_bytes(), src_oop);
3815     // Attention: no null check here!
3816     decode_klass_not_null(klass);
3817   } else {
3818     z_lg(klass, oopDesc::klass_offset_in_bytes(), src_oop);
3819   }
3820 }
3821 
3822 void MacroAssembler::load_prototype_header(Register Rheader, Register Rsrc_oop) {
3823   assert_different_registers(Rheader, Rsrc_oop);
3824   load_klass(Rheader, Rsrc_oop);
3825   z_lg(Rheader, Address(Rheader, Klass::prototype_header_offset()));
3826 }
3827 
3828 void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) {
3829   if (UseCompressedClassPointers) {
3830     assert_different_registers(dst_oop, klass, Z_R0);
3831     if (ck == noreg) ck = klass;
3832     encode_klass_not_null(ck, klass);
3833     z_st(ck, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
3834   } else {
3835     z_stg(klass, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
3836   }
3837 }
3838 
3839 void MacroAssembler::store_klass_gap(Register s, Register d) {
3840   if (UseCompressedClassPointers) {
3841     assert(s != d, "not enough registers");
3842     // Support s = noreg.
3843     if (s != noreg) {
3844       z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes()));
3845     } else {
3846       z_mvhi(Address(d, oopDesc::klass_gap_offset_in_bytes()), 0);
3847     }
3848   }
3849 }
3850 
3851 // Compare klass ptr in memory against klass ptr in register.
3852 //
3853 // Rop1            - klass in register, always uncompressed.
3854 // disp            - Offset of klass in memory, compressed/uncompressed, depending on runtime flag.
3855 // Rbase           - Base address of cKlass in memory.
3856 // maybeNULL       - True if Rop1 possibly is a NULL.
3857 void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL) {
3858 
3859   BLOCK_COMMENT("compare klass ptr {");
3860 
3861   if (UseCompressedClassPointers) {
3862     const int shift = Universe::narrow_klass_shift();
3863     address   base  = Universe::narrow_klass_base();
3864 
3865     assert((shift == 0) || (shift == LogKlassAlignmentInBytes), "cKlass encoder detected bad shift");
3866     assert_different_registers(Rop1, Z_R0);
3867     assert_different_registers(Rop1, Rbase, Z_R1);
3868 
3869     // First encode register oop and then compare with cOop in memory.
3870     // This sequence saves an unnecessary cOop load and decode.
3871     if (base == NULL) {
3872       if (shift == 0) {
3873         z_cl(Rop1, disp, Rbase);     // Unscaled
3874       } else {
3875         z_srlg(Z_R0, Rop1, shift);   // ZeroBased
3876         z_cl(Z_R0, disp, Rbase);
3877       }
3878     } else {                         // HeapBased
3879 #ifdef ASSERT
3880       bool     used_R0 = true;
3881       bool     used_R1 = true;
3882 #endif
3883       Register current = Rop1;
3884       Label    done;
3885 
3886       if (maybeNULL) {       // NULL ptr must be preserved!
3887         z_ltgr(Z_R0, current);
3888         z_bre(done);
3889         current = Z_R0;
3890       }
3891 
3892       unsigned int base_h = ((unsigned long)base)>>32;
3893       unsigned int base_l = (unsigned int)((unsigned long)base);
3894       if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3895         lgr_if_needed(Z_R0, current);
3896         z_aih(Z_R0, -((int)base_h));     // Base has no set bits in lower half.
3897       } else if ((base_h == 0) && (base_l != 0)) {
3898         lgr_if_needed(Z_R0, current);
3899         z_agfi(Z_R0, -(int)base_l);
3900       } else {
3901         int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
3902         add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); // Subtract base by adding complement.
3903       }
3904 
3905       if (shift != 0) {
3906         z_srlg(Z_R0, Z_R0, shift);
3907       }
3908       bind(done);
3909       z_cl(Z_R0, disp, Rbase);
3910 #ifdef ASSERT
3911       if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);
3912       if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);
3913 #endif
3914     }
3915   } else {
3916     z_clg(Rop1, disp, Z_R0, Rbase);
3917   }
3918   BLOCK_COMMENT("} compare klass ptr");
3919 }
3920 
3921 //---------------------------
3922 //  Compressed oops
3923 //---------------------------
3924 
3925 void MacroAssembler::encode_heap_oop(Register oop) {
3926   oop_encoder(oop, oop, true /*maybe null*/);
3927 }
3928 
3929 void MacroAssembler::encode_heap_oop_not_null(Register oop) {
3930   oop_encoder(oop, oop, false /*not null*/);
3931 }
3932 
3933 // Called with something derived from the oop base. e.g. oop_base>>3.
3934 int MacroAssembler::get_oop_base_pow2_offset(uint64_t oop_base) {
3935   unsigned int oop_base_ll = ((unsigned int)(oop_base >>  0)) & 0xffff;
3936   unsigned int oop_base_lh = ((unsigned int)(oop_base >> 16)) & 0xffff;
3937   unsigned int oop_base_hl = ((unsigned int)(oop_base >> 32)) & 0xffff;
3938   unsigned int oop_base_hh = ((unsigned int)(oop_base >> 48)) & 0xffff;
3939   unsigned int n_notzero_parts = (oop_base_ll == 0 ? 0:1)
3940                                + (oop_base_lh == 0 ? 0:1)
3941                                + (oop_base_hl == 0 ? 0:1)
3942                                + (oop_base_hh == 0 ? 0:1);
3943 
3944   assert(oop_base != 0, "This is for HeapBased cOops only");
3945 
3946   if (n_notzero_parts != 1) { //  Check if oop_base is just a few pages shy of a power of 2.
3947     uint64_t pow2_offset = 0x10000 - oop_base_ll;
3948     if (pow2_offset < 0x8000) {  // This might not be necessary.
3949       uint64_t oop_base2 = oop_base + pow2_offset;
3950 
3951       oop_base_ll = ((unsigned int)(oop_base2 >>  0)) & 0xffff;
3952       oop_base_lh = ((unsigned int)(oop_base2 >> 16)) & 0xffff;
3953       oop_base_hl = ((unsigned int)(oop_base2 >> 32)) & 0xffff;
3954       oop_base_hh = ((unsigned int)(oop_base2 >> 48)) & 0xffff;
3955       n_notzero_parts = (oop_base_ll == 0 ? 0:1) +
3956                         (oop_base_lh == 0 ? 0:1) +
3957                         (oop_base_hl == 0 ? 0:1) +
3958                         (oop_base_hh == 0 ? 0:1);
3959       if (n_notzero_parts == 1) {
3960         assert(-(int64_t)pow2_offset != (int64_t)-1, "We use -1 to signal uninitialized base register");
3961         return -pow2_offset;
3962       }
3963     }
3964   }
3965   return 0;
3966 }
3967 
3968 // If base address is offset from a straight power of two by just a few pages,
3969 // return this offset to the caller for a possible later composite add.
3970 // TODO/FIX: will only work correctly for 4k pages.
3971 int MacroAssembler::get_oop_base(Register Rbase, uint64_t oop_base) {
3972   int pow2_offset = get_oop_base_pow2_offset(oop_base);
3973 
3974   load_const_optimized(Rbase, oop_base - pow2_offset); // Best job possible.
3975 
3976   return pow2_offset;
3977 }
3978 
3979 int MacroAssembler::get_oop_base_complement(Register Rbase, uint64_t oop_base) {
3980   int offset = get_oop_base(Rbase, oop_base);
3981   z_lcgr(Rbase, Rbase);
3982   return -offset;
3983 }
3984 
3985 // Compare compressed oop in memory against oop in register.
3986 // Rop1            - Oop in register.
3987 // disp            - Offset of cOop in memory.
3988 // Rbase           - Base address of cOop in memory.
3989 // maybeNULL       - True if Rop1 possibly is a NULL.
3990 // maybeNULLtarget - Branch target for Rop1 == NULL, if flow control shall NOT continue with compare instruction.
3991 void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL) {
3992   Register Rbase  = mem.baseOrR0();
3993   Register Rindex = mem.indexOrR0();
3994   int64_t  disp   = mem.disp();
3995 
3996   const int shift = Universe::narrow_oop_shift();
3997   address   base  = Universe::narrow_oop_base();
3998 
3999   assert(UseCompressedOops, "must be on to call this method");
4000   assert(Universe::heap() != NULL, "java heap must be initialized to call this method");
4001   assert((shift == 0) || (shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");
4002   assert_different_registers(Rop1, Z_R0);
4003   assert_different_registers(Rop1, Rbase, Z_R1);
4004   assert_different_registers(Rop1, Rindex, Z_R1);
4005 
4006   BLOCK_COMMENT("compare heap oop {");
4007 
4008   // First encode register oop and then compare with cOop in memory.
4009   // This sequence saves an unnecessary cOop load and decode.
4010   if (base == NULL) {
4011     if (shift == 0) {
4012       z_cl(Rop1, disp, Rindex, Rbase);  // Unscaled
4013     } else {
4014       z_srlg(Z_R0, Rop1, shift);        // ZeroBased
4015       z_cl(Z_R0, disp, Rindex, Rbase);
4016     }
4017   } else {                              // HeapBased
4018 #ifdef ASSERT
4019     bool  used_R0 = true;
4020     bool  used_R1 = true;
4021 #endif
4022     Label done;
4023     int   pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
4024 
4025     if (maybeNULL) {       // NULL ptr must be preserved!
4026       z_ltgr(Z_R0, Rop1);
4027       z_bre(done);
4028     }
4029 
4030     add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1);
4031     z_srlg(Z_R0, Z_R0, shift);
4032 
4033     bind(done);
4034     z_cl(Z_R0, disp, Rindex, Rbase);
4035 #ifdef ASSERT
4036     if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);
4037     if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);
4038 #endif
4039   }
4040   BLOCK_COMMENT("} compare heap oop");
4041 }
4042 
4043 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
4044                                      const Address& addr, Register val,
4045                                      Register tmp1, Register tmp2, Register tmp3) {
4046   assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL |
4047                          ON_UNKNOWN_OOP_REF)) == 0, "unsupported decorator");
4048   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
4049   decorators = AccessInternal::decorator_fixup(decorators);
4050   bool as_raw = (decorators & AS_RAW) != 0;
4051   if (as_raw) {
4052     bs->BarrierSetAssembler::store_at(this, decorators, type,
4053                                       addr, val,
4054                                       tmp1, tmp2, tmp3);
4055   } else {
4056     bs->store_at(this, decorators, type,
4057                  addr, val,
4058                  tmp1, tmp2, tmp3);
4059   }
4060 }
4061 
4062 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
4063                                     const Address& addr, Register dst,
4064                                     Register tmp1, Register tmp2, Label *is_null) {
4065   assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL |
4066                          ON_PHANTOM_OOP_REF | ON_WEAK_OOP_REF)) == 0, "unsupported decorator");
4067   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
4068   decorators = AccessInternal::decorator_fixup(decorators);
4069   bool as_raw = (decorators & AS_RAW) != 0;
4070   if (as_raw) {
4071     bs->BarrierSetAssembler::load_at(this, decorators, type,
4072                                      addr, dst,
4073                                      tmp1, tmp2, is_null);
4074   } else {
4075     bs->load_at(this, decorators, type,
4076                 addr, dst,
4077                 tmp1, tmp2, is_null);
4078   }
4079 }
4080 
4081 void MacroAssembler::load_heap_oop(Register dest, const Address &a,
4082                                    Register tmp1, Register tmp2,
4083                                    DecoratorSet decorators, Label *is_null) {
4084   access_load_at(T_OBJECT, IN_HEAP | decorators, a, dest, tmp1, tmp2, is_null);
4085 }
4086 
4087 void MacroAssembler::store_heap_oop(Register Roop, const Address &a,
4088                                     Register tmp1, Register tmp2, Register tmp3,
4089                                     DecoratorSet decorators) {
4090   access_store_at(T_OBJECT, IN_HEAP | decorators, a, Roop, tmp1, tmp2, tmp3);
4091 }
4092 
4093 //-------------------------------------------------
4094 // Encode compressed oop. Generally usable encoder.
4095 //-------------------------------------------------
4096 // Rsrc - contains regular oop on entry. It remains unchanged.
4097 // Rdst - contains compressed oop on exit.
4098 // Rdst and Rsrc may indicate same register, in which case Rsrc does not remain unchanged.
4099 //
4100 // Rdst must not indicate scratch register Z_R1 (Z_R1_scratch) for functionality.
4101 // Rdst should not indicate scratch register Z_R0 (Z_R0_scratch) for performance.
4102 //
4103 // only32bitValid is set, if later code only uses the lower 32 bits. In this
4104 // case we must not fix the upper 32 bits.
4105 void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL,
4106                                  Register Rbase, int pow2_offset, bool only32bitValid) {
4107 
4108   const address oop_base  = Universe::narrow_oop_base();
4109   const int     oop_shift = Universe::narrow_oop_shift();
4110   const bool    disjoint  = Universe::narrow_oop_base_disjoint();
4111 
4112   assert(UseCompressedOops, "must be on to call this method");
4113   assert(Universe::heap() != NULL, "java heap must be initialized to call this encoder");
4114   assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");
4115 
4116   if (disjoint || (oop_base == NULL)) {
4117     BLOCK_COMMENT("cOop encoder zeroBase {");
4118     if (oop_shift == 0) {
4119       if (oop_base != NULL && !only32bitValid) {
4120         z_llgfr(Rdst, Rsrc); // Clear upper bits in case the register will be decoded again.
4121       } else {
4122         lgr_if_needed(Rdst, Rsrc);
4123       }
4124     } else {
4125       z_srlg(Rdst, Rsrc, oop_shift);
4126       if (oop_base != NULL && !only32bitValid) {
4127         z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4128       }
4129     }
4130     BLOCK_COMMENT("} cOop encoder zeroBase");
4131     return;
4132   }
4133 
4134   bool used_R0 = false;
4135   bool used_R1 = false;
4136 
4137   BLOCK_COMMENT("cOop encoder general {");
4138   assert_different_registers(Rdst, Z_R1);
4139   assert_different_registers(Rsrc, Rbase);
4140   if (maybeNULL) {
4141     Label done;
4142     // We reorder shifting and subtracting, so that we can compare
4143     // and shift in parallel:
4144     //
4145     // cycle 0:  potential LoadN, base = <const>
4146     // cycle 1:  base = !base     dst = src >> 3,    cmp cr = (src != 0)
4147     // cycle 2:  if (cr) br,      dst = dst + base + offset
4148 
4149     // Get oop_base components.
4150     if (pow2_offset == -1) {
4151       if (Rdst == Rbase) {
4152         if (Rdst == Z_R1 || Rsrc == Z_R1) {
4153           Rbase = Z_R0;
4154           used_R0 = true;
4155         } else {
4156           Rdst = Z_R1;
4157           used_R1 = true;
4158         }
4159       }
4160       if (Rbase == Z_R1) {
4161         used_R1 = true;
4162       }
4163       pow2_offset = get_oop_base_complement(Rbase, ((uint64_t)(intptr_t)oop_base) >> oop_shift);
4164     }
4165     assert_different_registers(Rdst, Rbase);
4166 
4167     // Check for NULL oop (must be left alone) and shift.
4168     if (oop_shift != 0) {  // Shift out alignment bits
4169       if (((intptr_t)oop_base&0xc000000000000000L) == 0L) { // We are sure: no single address will have the leftmost bit set.
4170         z_srag(Rdst, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4171       } else {
4172         z_srlg(Rdst, Rsrc, oop_shift);
4173         z_ltgr(Rsrc, Rsrc);  // This is the recommended way of testing for zero.
4174         // This probably is faster, as it does not write a register. No!
4175         // z_cghi(Rsrc, 0);
4176       }
4177     } else {
4178       z_ltgr(Rdst, Rsrc);   // Move NULL to result register.
4179     }
4180     z_bre(done);
4181 
4182     // Subtract oop_base components.
4183     if ((Rdst == Z_R0) || (Rbase == Z_R0)) {
4184       z_algr(Rdst, Rbase);
4185       if (pow2_offset != 0) { add2reg(Rdst, pow2_offset); }
4186     } else {
4187       add2reg_with_index(Rdst, pow2_offset, Rbase, Rdst);
4188     }
4189     if (!only32bitValid) {
4190       z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4191     }
4192     bind(done);
4193 
4194   } else {  // not null
4195     // Get oop_base components.
4196     if (pow2_offset == -1) {
4197       pow2_offset = get_oop_base_complement(Rbase, (uint64_t)(intptr_t)oop_base);
4198     }
4199 
4200     // Subtract oop_base components and shift.
4201     if (Rdst == Z_R0 || Rsrc == Z_R0 || Rbase == Z_R0) {
4202       // Don't use lay instruction.
4203       if (Rdst == Rsrc) {
4204         z_algr(Rdst, Rbase);
4205       } else {
4206         lgr_if_needed(Rdst, Rbase);
4207         z_algr(Rdst, Rsrc);
4208       }
4209       if (pow2_offset != 0) add2reg(Rdst, pow2_offset);
4210     } else {
4211       add2reg_with_index(Rdst, pow2_offset, Rbase, Rsrc);
4212     }
4213     if (oop_shift != 0) {   // Shift out alignment bits.
4214       z_srlg(Rdst, Rdst, oop_shift);
4215     }
4216     if (!only32bitValid) {
4217       z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4218     }
4219   }
4220 #ifdef ASSERT
4221   if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb01bUL, 2); }
4222   if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb02bUL, 2); }
4223 #endif
4224   BLOCK_COMMENT("} cOop encoder general");
4225 }
4226 
4227 //-------------------------------------------------
4228 // decode compressed oop. Generally usable decoder.
4229 //-------------------------------------------------
4230 // Rsrc - contains compressed oop on entry.
4231 // Rdst - contains regular oop on exit.
4232 // Rdst and Rsrc may indicate same register.
4233 // Rdst must not be the same register as Rbase, if Rbase was preloaded (before call).
4234 // Rdst can be the same register as Rbase. Then, either Z_R0 or Z_R1 must be available as scratch.
4235 // Rbase - register to use for the base
4236 // pow2_offset - offset of base to nice value. If -1, base must be loaded.
4237 // For performance, it is good to
4238 //  - avoid Z_R0 for any of the argument registers.
4239 //  - keep Rdst and Rsrc distinct from Rbase. Rdst == Rsrc is ok for performance.
4240 //  - avoid Z_R1 for Rdst if Rdst == Rbase.
4241 void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, Register Rbase, int pow2_offset) {
4242 
4243   const address oop_base  = Universe::narrow_oop_base();
4244   const int     oop_shift = Universe::narrow_oop_shift();
4245   const bool    disjoint  = Universe::narrow_oop_base_disjoint();
4246 
4247   assert(UseCompressedOops, "must be on to call this method");
4248   assert(Universe::heap() != NULL, "java heap must be initialized to call this decoder");
4249   assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes),
4250          "cOop encoder detected bad shift");
4251 
4252   // cOops are always loaded zero-extended from memory. No explicit zero-extension necessary.
4253 
4254   if (oop_base != NULL) {
4255     unsigned int oop_base_hl = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xffff;
4256     unsigned int oop_base_hh = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 48)) & 0xffff;
4257     unsigned int oop_base_hf = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xFFFFffff;
4258     if (disjoint && (oop_base_hl == 0 || oop_base_hh == 0)) {
4259       BLOCK_COMMENT("cOop decoder disjointBase {");
4260       // We do not need to load the base. Instead, we can install the upper bits
4261       // with an OR instead of an ADD.
4262       Label done;
4263 
4264       // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.
4265       if (maybeNULL) {  // NULL ptr must be preserved!
4266         z_slag(Rdst, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4267         z_bre(done);
4268       } else {
4269         z_sllg(Rdst, Rsrc, oop_shift);  // Logical shift leaves condition code alone.
4270       }
4271       if ((oop_base_hl != 0) && (oop_base_hh != 0)) {
4272         z_oihf(Rdst, oop_base_hf);
4273       } else if (oop_base_hl != 0) {
4274         z_oihl(Rdst, oop_base_hl);
4275       } else {
4276         assert(oop_base_hh != 0, "not heapbased mode");
4277         z_oihh(Rdst, oop_base_hh);
4278       }
4279       bind(done);
4280       BLOCK_COMMENT("} cOop decoder disjointBase");
4281     } else {
4282       BLOCK_COMMENT("cOop decoder general {");
4283       // There are three decode steps:
4284       //   scale oop offset (shift left)
4285       //   get base (in reg) and pow2_offset (constant)
4286       //   add base, pow2_offset, and oop offset
4287       // The following register overlap situations may exist:
4288       // Rdst == Rsrc,  Rbase any other
4289       //   not a problem. Scaling in-place leaves Rbase undisturbed.
4290       //   Loading Rbase does not impact the scaled offset.
4291       // Rdst == Rbase, Rsrc  any other
4292       //   scaling would destroy a possibly preloaded Rbase. Loading Rbase
4293       //   would destroy the scaled offset.
4294       //   Remedy: use Rdst_tmp if Rbase has been preloaded.
4295       //           use Rbase_tmp if base has to be loaded.
4296       // Rsrc == Rbase, Rdst  any other
4297       //   Only possible without preloaded Rbase.
4298       //   Loading Rbase does not destroy compressed oop because it was scaled into Rdst before.
4299       // Rsrc == Rbase, Rdst == Rbase
4300       //   Only possible without preloaded Rbase.
4301       //   Loading Rbase would destroy compressed oop. Scaling in-place is ok.
4302       //   Remedy: use Rbase_tmp.
4303       //
4304       Label    done;
4305       Register Rdst_tmp       = Rdst;
4306       Register Rbase_tmp      = Rbase;
4307       bool     used_R0        = false;
4308       bool     used_R1        = false;
4309       bool     base_preloaded = pow2_offset >= 0;
4310       guarantee(!(base_preloaded && (Rsrc == Rbase)), "Register clash, check caller");
4311       assert(oop_shift != 0, "room for optimization");
4312 
4313       // Check if we need to use scratch registers.
4314       if (Rdst == Rbase) {
4315         assert(!(((Rdst == Z_R0) && (Rsrc == Z_R1)) || ((Rdst == Z_R1) && (Rsrc == Z_R0))), "need a scratch reg");
4316         if (Rdst != Rsrc) {
4317           if (base_preloaded) { Rdst_tmp  = (Rdst == Z_R1) ? Z_R0 : Z_R1; }
4318           else                { Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; }
4319         } else {
4320           Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1;
4321         }
4322       }
4323       if (base_preloaded) lgr_if_needed(Rbase_tmp, Rbase);
4324 
4325       // Scale oop and check for NULL.
4326       // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.
4327       if (maybeNULL) {  // NULL ptr must be preserved!
4328         z_slag(Rdst_tmp, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4329         z_bre(done);
4330       } else {
4331         z_sllg(Rdst_tmp, Rsrc, oop_shift);  // Logical shift leaves condition code alone.
4332       }
4333 
4334       // Get oop_base components.
4335       if (!base_preloaded) {
4336         pow2_offset = get_oop_base(Rbase_tmp, (uint64_t)(intptr_t)oop_base);
4337       }
4338 
4339       // Add up all components.
4340       if ((Rbase_tmp == Z_R0) || (Rdst_tmp == Z_R0)) {
4341         z_algr(Rdst_tmp, Rbase_tmp);
4342         if (pow2_offset != 0) { add2reg(Rdst_tmp, pow2_offset); }
4343       } else {
4344         add2reg_with_index(Rdst_tmp, pow2_offset, Rbase_tmp, Rdst_tmp);
4345       }
4346 
4347       bind(done);
4348       lgr_if_needed(Rdst, Rdst_tmp);
4349 #ifdef ASSERT
4350       if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb03bUL, 2); }
4351       if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb04bUL, 2); }
4352 #endif
4353       BLOCK_COMMENT("} cOop decoder general");
4354     }
4355   } else {
4356     BLOCK_COMMENT("cOop decoder zeroBase {");
4357     if (oop_shift == 0) {
4358       lgr_if_needed(Rdst, Rsrc);
4359     } else {
4360       z_sllg(Rdst, Rsrc, oop_shift);
4361     }
4362     BLOCK_COMMENT("} cOop decoder zeroBase");
4363   }
4364 }
4365 
4366 // ((OopHandle)result).resolve();
4367 void MacroAssembler::resolve_oop_handle(Register result) {
4368   // OopHandle::resolve is an indirection.
4369   z_lg(result, 0, result);
4370 }
4371 
4372 void MacroAssembler::load_mirror(Register mirror, Register method) {
4373   mem2reg_opt(mirror, Address(method, Method::const_offset()));
4374   mem2reg_opt(mirror, Address(mirror, ConstMethod::constants_offset()));
4375   mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes()));
4376   mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset()));
4377   resolve_oop_handle(mirror);
4378 }
4379 
4380 //---------------------------------------------------------------
4381 //---  Operations on arrays.
4382 //---------------------------------------------------------------
4383 
4384 // Compiler ensures base is doubleword aligned and cnt is #doublewords.
4385 // Emitter does not KILL cnt and base arguments, since they need to be copied to
4386 // work registers anyway.
4387 // Actually, only r0, r1, and r5 are killed.
4388 unsigned int MacroAssembler::Clear_Array(Register cnt_arg, Register base_pointer_arg, Register src_addr, Register src_len) {
4389   // Src_addr is evenReg.
4390   // Src_len is odd_Reg.
4391 
4392   int      block_start = offset();
4393   Register tmp_reg  = src_len; // Holds target instr addr for EX.
4394   Register dst_len  = Z_R1;    // Holds dst len  for MVCLE.
4395   Register dst_addr = Z_R0;    // Holds dst addr for MVCLE.
4396 
4397   Label doXC, doMVCLE, done;
4398 
4399   BLOCK_COMMENT("Clear_Array {");
4400 
4401   // Check for zero len and convert to long.
4402   z_ltgfr(src_len, cnt_arg);      // Remember casted value for doSTG case.
4403   z_bre(done);                    // Nothing to do if len == 0.
4404 
4405   // Prefetch data to be cleared.
4406   if (VM_Version::has_Prefetch()) {
4407     z_pfd(0x02,   0, Z_R0, base_pointer_arg);
4408     z_pfd(0x02, 256, Z_R0, base_pointer_arg);
4409   }
4410 
4411   z_sllg(dst_len, src_len, 3);    // #bytes to clear.
4412   z_cghi(src_len, 32);            // Check for len <= 256 bytes (<=32 DW).
4413   z_brnh(doXC);                   // If so, use executed XC to clear.
4414 
4415   // MVCLE: initialize long arrays (general case).
4416   bind(doMVCLE);
4417   z_lgr(dst_addr, base_pointer_arg);
4418   clear_reg(src_len, true, false); // Src len of MVCLE is zero.
4419 
4420   MacroAssembler::move_long_ext(dst_addr, src_addr, 0);
4421   z_bru(done);
4422 
4423   // XC: initialize short arrays.
4424   Label XC_template; // Instr template, never exec directly!
4425     bind(XC_template);
4426     z_xc(0,0,base_pointer_arg,0,base_pointer_arg);
4427 
4428   bind(doXC);
4429     add2reg(dst_len, -1);             // Get #bytes-1 for EXECUTE.
4430     if (VM_Version::has_ExecuteExtensions()) {
4431       z_exrl(dst_len, XC_template);   // Execute XC with var. len.
4432     } else {
4433       z_larl(tmp_reg, XC_template);
4434       z_ex(dst_len,0,Z_R0,tmp_reg);   // Execute XC with var. len.
4435     }
4436     // z_bru(done);      // fallthru
4437 
4438   bind(done);
4439 
4440   BLOCK_COMMENT("} Clear_Array");
4441 
4442   int block_end = offset();
4443   return block_end - block_start;
4444 }
4445 
4446 // Compiler ensures base is doubleword aligned and cnt is count of doublewords.
4447 // Emitter does not KILL any arguments nor work registers.
4448 // Emitter generates up to 16 XC instructions, depending on the array length.
4449 unsigned int MacroAssembler::Clear_Array_Const(long cnt, Register base) {
4450   int  block_start    = offset();
4451   int  off;
4452   int  lineSize_Bytes = AllocatePrefetchStepSize;
4453   int  lineSize_DW    = AllocatePrefetchStepSize>>LogBytesPerWord;
4454   bool doPrefetch     = VM_Version::has_Prefetch();
4455   int  XC_maxlen      = 256;
4456   int  numXCInstr     = cnt > 0 ? (cnt*BytesPerWord-1)/XC_maxlen+1 : 0;
4457 
4458   BLOCK_COMMENT("Clear_Array_Const {");
4459   assert(cnt*BytesPerWord <= 4096, "ClearArrayConst can handle 4k only");
4460 
4461   // Do less prefetching for very short arrays.
4462   if (numXCInstr > 0) {
4463     // Prefetch only some cache lines, then begin clearing.
4464     if (doPrefetch) {
4465       if (cnt*BytesPerWord <= lineSize_Bytes/4) {  // If less than 1/4 of a cache line to clear,
4466         z_pfd(0x02, 0, Z_R0, base);                // prefetch just the first cache line.
4467       } else {
4468         assert(XC_maxlen == lineSize_Bytes, "ClearArrayConst needs 256B cache lines");
4469         for (off = 0; (off < AllocatePrefetchLines) && (off <= numXCInstr); off ++) {
4470           z_pfd(0x02, off*lineSize_Bytes, Z_R0, base);
4471         }
4472       }
4473     }
4474 
4475     for (off=0; off<(numXCInstr-1); off++) {
4476       z_xc(off*XC_maxlen, XC_maxlen-1, base, off*XC_maxlen, base);
4477 
4478       // Prefetch some cache lines in advance.
4479       if (doPrefetch && (off <= numXCInstr-AllocatePrefetchLines)) {
4480         z_pfd(0x02, (off+AllocatePrefetchLines)*lineSize_Bytes, Z_R0, base);
4481       }
4482     }
4483     if (off*XC_maxlen < cnt*BytesPerWord) {
4484       z_xc(off*XC_maxlen, (cnt*BytesPerWord-off*XC_maxlen)-1, base, off*XC_maxlen, base);
4485     }
4486   }
4487   BLOCK_COMMENT("} Clear_Array_Const");
4488 
4489   int block_end = offset();
4490   return block_end - block_start;
4491 }
4492 
4493 // Compiler ensures base is doubleword aligned and cnt is #doublewords.
4494 // Emitter does not KILL cnt and base arguments, since they need to be copied to
4495 // work registers anyway.
4496 // Actually, only r0, r1, r4, and r5 (which are work registers) are killed.
4497 //
4498 // For very large arrays, exploit MVCLE H/W support.
4499 // MVCLE instruction automatically exploits H/W-optimized page mover.
4500 // - Bytes up to next page boundary are cleared with a series of XC to self.
4501 // - All full pages are cleared with the page mover H/W assist.
4502 // - Remaining bytes are again cleared by a series of XC to self.
4503 //
4504 unsigned int MacroAssembler::Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register src_addr, Register src_len) {
4505   // Src_addr is evenReg.
4506   // Src_len is odd_Reg.
4507 
4508   int      block_start = offset();
4509   Register dst_len  = Z_R1;      // Holds dst len  for MVCLE.
4510   Register dst_addr = Z_R0;      // Holds dst addr for MVCLE.
4511 
4512   BLOCK_COMMENT("Clear_Array_Const_Big {");
4513 
4514   // Get len to clear.
4515   load_const_optimized(dst_len, (long)cnt*8L);  // in Bytes = #DW*8
4516 
4517   // Prepare other args to MVCLE.
4518   z_lgr(dst_addr, base_pointer_arg);
4519   // Indicate unused result.
4520   (void) clear_reg(src_len, true, false);  // Src len of MVCLE is zero.
4521 
4522   // Clear.
4523   MacroAssembler::move_long_ext(dst_addr, src_addr, 0);
4524   BLOCK_COMMENT("} Clear_Array_Const_Big");
4525 
4526   int block_end = offset();
4527   return block_end - block_start;
4528 }
4529 
4530 // Allocator.
4531 unsigned int MacroAssembler::CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg,
4532                                                            Register cnt_reg,
4533                                                            Register tmp1_reg, Register tmp2_reg) {
4534   // Tmp1 is oddReg.
4535   // Tmp2 is evenReg.
4536 
4537   int block_start = offset();
4538   Label doMVC, doMVCLE, done, MVC_template;
4539 
4540   BLOCK_COMMENT("CopyRawMemory_AlignedDisjoint {");
4541 
4542   // Check for zero len and convert to long.
4543   z_ltgfr(cnt_reg, cnt_reg);      // Remember casted value for doSTG case.
4544   z_bre(done);                    // Nothing to do if len == 0.
4545 
4546   z_sllg(Z_R1, cnt_reg, 3);       // Dst len in bytes. calc early to have the result ready.
4547 
4548   z_cghi(cnt_reg, 32);            // Check for len <= 256 bytes (<=32 DW).
4549   z_brnh(doMVC);                  // If so, use executed MVC to clear.
4550 
4551   bind(doMVCLE);                  // A lot of data (more than 256 bytes).
4552   // Prep dest reg pair.
4553   z_lgr(Z_R0, dst_reg);           // dst addr
4554   // Dst len already in Z_R1.
4555   // Prep src reg pair.
4556   z_lgr(tmp2_reg, src_reg);       // src addr
4557   z_lgr(tmp1_reg, Z_R1);          // Src len same as dst len.
4558 
4559   // Do the copy.
4560   move_long_ext(Z_R0, tmp2_reg, 0xb0); // Bypass cache.
4561   z_bru(done);                         // All done.
4562 
4563   bind(MVC_template);             // Just some data (not more than 256 bytes).
4564   z_mvc(0, 0, dst_reg, 0, src_reg);
4565 
4566   bind(doMVC);
4567 
4568   if (VM_Version::has_ExecuteExtensions()) {
4569     add2reg(Z_R1, -1);
4570   } else {
4571     add2reg(tmp1_reg, -1, Z_R1);
4572     z_larl(Z_R1, MVC_template);
4573   }
4574 
4575   if (VM_Version::has_Prefetch()) {
4576     z_pfd(1,  0,Z_R0,src_reg);
4577     z_pfd(2,  0,Z_R0,dst_reg);
4578     //    z_pfd(1,256,Z_R0,src_reg);    // Assume very short copy.
4579     //    z_pfd(2,256,Z_R0,dst_reg);
4580   }
4581 
4582   if (VM_Version::has_ExecuteExtensions()) {
4583     z_exrl(Z_R1, MVC_template);
4584   } else {
4585     z_ex(tmp1_reg, 0, Z_R0, Z_R1);
4586   }
4587 
4588   bind(done);
4589 
4590   BLOCK_COMMENT("} CopyRawMemory_AlignedDisjoint");
4591 
4592   int block_end = offset();
4593   return block_end - block_start;
4594 }
4595 
4596 //------------------------------------------------------
4597 //   Special String Intrinsics. Implementation
4598 //------------------------------------------------------
4599 
4600 // Intrinsics for CompactStrings
4601 
4602 // Compress char[] to byte[].
4603 //   Restores: src, dst
4604 //   Uses:     cnt
4605 //   Kills:    tmp, Z_R0, Z_R1.
4606 //   Early clobber: result.
4607 // Note:
4608 //   cnt is signed int. Do not rely on high word!
4609 //       counts # characters, not bytes.
4610 // The result is the number of characters copied before the first incompatible character was found.
4611 // If precise is true, the processing stops exactly at this point. Otherwise, the result may be off
4612 // by a few bytes. The result always indicates the number of copied characters.
4613 // When used as a character index, the returned value points to the first incompatible character.
4614 //
4615 // Note: Does not behave exactly like package private StringUTF16 compress java implementation in case of failure:
4616 // - Different number of characters may have been written to dead array (if precise is false).
4617 // - Returns a number <cnt instead of 0. (Result gets compared with cnt.)
4618 unsigned int MacroAssembler::string_compress(Register result, Register src, Register dst, Register cnt,
4619                                              Register tmp,    bool precise) {
4620   assert_different_registers(Z_R0, Z_R1, result, src, dst, cnt, tmp);
4621 
4622   if (precise) {
4623     BLOCK_COMMENT("encode_iso_array {");
4624   } else {
4625     BLOCK_COMMENT("string_compress {");
4626   }
4627   int  block_start = offset();
4628 
4629   Register       Rsrc  = src;
4630   Register       Rdst  = dst;
4631   Register       Rix   = tmp;
4632   Register       Rcnt  = cnt;
4633   Register       Rmask = result;  // holds incompatibility check mask until result value is stored.
4634   Label          ScalarShortcut, AllDone;
4635 
4636   z_iilf(Rmask, 0xFF00FF00);
4637   z_iihf(Rmask, 0xFF00FF00);
4638 
4639 #if 0  // Sacrifice shortcuts for code compactness
4640   {
4641     //---<  shortcuts for short strings (very frequent)   >---
4642     //   Strings with 4 and 8 characters were fond to occur very frequently.
4643     //   Therefore, we handle them right away with minimal overhead.
4644     Label     skipShortcut, skip4Shortcut, skip8Shortcut;
4645     Register  Rout = Z_R0;
4646     z_chi(Rcnt, 4);
4647     z_brne(skip4Shortcut);                 // 4 characters are very frequent
4648       z_lg(Z_R0, 0, Rsrc);                 // Treat exactly 4 characters specially.
4649       if (VM_Version::has_DistinctOpnds()) {
4650         Rout = Z_R0;
4651         z_ngrk(Rix, Z_R0, Rmask);
4652       } else {
4653         Rout = Rix;
4654         z_lgr(Rix, Z_R0);
4655         z_ngr(Z_R0, Rmask);
4656       }
4657       z_brnz(skipShortcut);
4658       z_stcmh(Rout, 5, 0, Rdst);
4659       z_stcm(Rout,  5, 2, Rdst);
4660       z_lgfr(result, Rcnt);
4661       z_bru(AllDone);
4662     bind(skip4Shortcut);
4663 
4664     z_chi(Rcnt, 8);
4665     z_brne(skip8Shortcut);                 // There's more to do...
4666       z_lmg(Z_R0, Z_R1, 0, Rsrc);          // Treat exactly 8 characters specially.
4667       if (VM_Version::has_DistinctOpnds()) {
4668         Rout = Z_R0;
4669         z_ogrk(Rix, Z_R0, Z_R1);
4670         z_ngr(Rix, Rmask);
4671       } else {
4672         Rout = Rix;
4673         z_lgr(Rix, Z_R0);
4674         z_ogr(Z_R0, Z_R1);
4675         z_ngr(Z_R0, Rmask);
4676       }
4677       z_brnz(skipShortcut);
4678       z_stcmh(Rout, 5, 0, Rdst);
4679       z_stcm(Rout,  5, 2, Rdst);
4680       z_stcmh(Z_R1, 5, 4, Rdst);
4681       z_stcm(Z_R1,  5, 6, Rdst);
4682       z_lgfr(result, Rcnt);
4683       z_bru(AllDone);
4684 
4685     bind(skip8Shortcut);
4686     clear_reg(Z_R0, true, false);          // #characters already processed (none). Precond for scalar loop.
4687     z_brl(ScalarShortcut);                 // Just a few characters
4688 
4689     bind(skipShortcut);
4690   }
4691 #endif
4692   clear_reg(Z_R0);                         // make sure register is properly initialized.
4693 
4694   if (VM_Version::has_VectorFacility()) {
4695     const int  min_vcnt     = 32;          // Minimum #characters required to use vector instructions.
4696                                            // Otherwise just do nothing in vector mode.
4697                                            // Must be multiple of 2*(vector register length in chars (8 HW = 128 bits)).
4698     const int  log_min_vcnt = exact_log2(min_vcnt);
4699     Label      VectorLoop, VectorDone, VectorBreak;
4700 
4701     VectorRegister Vtmp1      = Z_V16;
4702     VectorRegister Vtmp2      = Z_V17;
4703     VectorRegister Vmask      = Z_V18;
4704     VectorRegister Vzero      = Z_V19;
4705     VectorRegister Vsrc_first = Z_V20;
4706     VectorRegister Vsrc_last  = Z_V23;
4707 
4708     assert((Vsrc_last->encoding() - Vsrc_first->encoding() + 1) == min_vcnt/8, "logic error");
4709     assert(VM_Version::has_DistinctOpnds(), "Assumption when has_VectorFacility()");
4710     z_srak(Rix, Rcnt, log_min_vcnt);       // # vector loop iterations
4711     z_brz(VectorDone);                     // not enough data for vector loop
4712 
4713     z_vzero(Vzero);                        // all zeroes
4714     z_vgmh(Vmask, 0, 7);                   // generate 0xff00 mask for all 2-byte elements
4715     z_sllg(Z_R0, Rix, log_min_vcnt);       // remember #chars that will be processed by vector loop
4716 
4717     bind(VectorLoop);
4718       z_vlm(Vsrc_first, Vsrc_last, 0, Rsrc);
4719       add2reg(Rsrc, min_vcnt*2);
4720 
4721       //---<  check for incompatible character  >---
4722       z_vo(Vtmp1, Z_V20, Z_V21);
4723       z_vo(Vtmp2, Z_V22, Z_V23);
4724       z_vo(Vtmp1, Vtmp1, Vtmp2);
4725       z_vn(Vtmp1, Vtmp1, Vmask);
4726       z_vceqhs(Vtmp1, Vtmp1, Vzero);       // high half of all chars must be zero for successful compress.
4727       z_bvnt(VectorBreak);                 // break vector loop if not all vector elements compare eq -> incompatible character found.
4728                                            // re-process data from current iteration in break handler.
4729 
4730       //---<  pack & store characters  >---
4731       z_vpkh(Vtmp1, Z_V20, Z_V21);         // pack (src1, src2) -> tmp1
4732       z_vpkh(Vtmp2, Z_V22, Z_V23);         // pack (src3, src4) -> tmp2
4733       z_vstm(Vtmp1, Vtmp2, 0, Rdst);       // store packed string
4734       add2reg(Rdst, min_vcnt);
4735 
4736       z_brct(Rix, VectorLoop);
4737 
4738     z_bru(VectorDone);
4739 
4740     bind(VectorBreak);
4741       add2reg(Rsrc, -min_vcnt*2);          // Fix Rsrc. Rsrc was already updated, but Rdst and Rix are not.
4742       z_sll(Rix, log_min_vcnt);            // # chars processed so far in VectorLoop, excl. current iteration.
4743       z_sr(Z_R0, Rix);                     // correct # chars processed in total.
4744 
4745     bind(VectorDone);
4746   }
4747 
4748   {
4749     const int  min_cnt     =  8;           // Minimum #characters required to use unrolled loop.
4750                                            // Otherwise just do nothing in unrolled loop.
4751                                            // Must be multiple of 8.
4752     const int  log_min_cnt = exact_log2(min_cnt);
4753     Label      UnrolledLoop, UnrolledDone, UnrolledBreak;
4754 
4755     if (VM_Version::has_DistinctOpnds()) {
4756       z_srk(Rix, Rcnt, Z_R0);              // remaining # chars to compress in unrolled loop
4757     } else {
4758       z_lr(Rix, Rcnt);
4759       z_sr(Rix, Z_R0);
4760     }
4761     z_sra(Rix, log_min_cnt);             // unrolled loop count
4762     z_brz(UnrolledDone);
4763 
4764     bind(UnrolledLoop);
4765       z_lmg(Z_R0, Z_R1, 0, Rsrc);
4766       if (precise) {
4767         z_ogr(Z_R1, Z_R0);                 // check all 8 chars for incompatibility
4768         z_ngr(Z_R1, Rmask);
4769         z_brnz(UnrolledBreak);
4770 
4771         z_lg(Z_R1, 8, Rsrc);               // reload destroyed register
4772         z_stcmh(Z_R0, 5, 0, Rdst);
4773         z_stcm(Z_R0,  5, 2, Rdst);
4774       } else {
4775         z_stcmh(Z_R0, 5, 0, Rdst);
4776         z_stcm(Z_R0,  5, 2, Rdst);
4777 
4778         z_ogr(Z_R0, Z_R1);
4779         z_ngr(Z_R0, Rmask);
4780         z_brnz(UnrolledBreak);
4781       }
4782       z_stcmh(Z_R1, 5, 4, Rdst);
4783       z_stcm(Z_R1,  5, 6, Rdst);
4784 
4785       add2reg(Rsrc, min_cnt*2);
4786       add2reg(Rdst, min_cnt);
4787       z_brct(Rix, UnrolledLoop);
4788 
4789     z_lgfr(Z_R0, Rcnt);                    // # chars processed in total after unrolled loop.
4790     z_nilf(Z_R0, ~(min_cnt-1));
4791     z_tmll(Rcnt, min_cnt-1);
4792     z_brnaz(ScalarShortcut);               // if all bits zero, there is nothing left to do for scalar loop.
4793                                            // Rix == 0 in all cases.
4794     z_sllg(Z_R1, Rcnt, 1);                 // # src bytes already processed. Only lower 32 bits are valid!
4795                                            //   Z_R1 contents must be treated as unsigned operand! For huge strings,
4796                                            //   (Rcnt >= 2**30), the value may spill into the sign bit by sllg.
4797     z_lgfr(result, Rcnt);                  // all characters processed.
4798     z_slgfr(Rdst, Rcnt);                   // restore ptr
4799     z_slgfr(Rsrc, Z_R1);                   // restore ptr, double the element count for Rsrc restore
4800     z_bru(AllDone);
4801 
4802     bind(UnrolledBreak);
4803     z_lgfr(Z_R0, Rcnt);                    // # chars processed in total after unrolled loop
4804     z_nilf(Z_R0, ~(min_cnt-1));
4805     z_sll(Rix, log_min_cnt);               // # chars not yet processed in UnrolledLoop (due to break), broken iteration not included.
4806     z_sr(Z_R0, Rix);                       // fix # chars processed OK so far.
4807     if (!precise) {
4808       z_lgfr(result, Z_R0);
4809       z_sllg(Z_R1, Z_R0, 1);               // # src bytes already processed. Only lower 32 bits are valid!
4810                                            //   Z_R1 contents must be treated as unsigned operand! For huge strings,
4811                                            //   (Rcnt >= 2**30), the value may spill into the sign bit by sllg.
4812       z_aghi(result, min_cnt/2);           // min_cnt/2 characters have already been written
4813                                            // but ptrs were not updated yet.
4814       z_slgfr(Rdst, Z_R0);                 // restore ptr
4815       z_slgfr(Rsrc, Z_R1);                 // restore ptr, double the element count for Rsrc restore
4816       z_bru(AllDone);
4817     }
4818     bind(UnrolledDone);
4819   }
4820 
4821   {
4822     Label     ScalarLoop, ScalarDone, ScalarBreak;
4823 
4824     bind(ScalarShortcut);
4825     z_ltgfr(result, Rcnt);
4826     z_brz(AllDone);
4827 
4828 #if 0  // Sacrifice shortcuts for code compactness
4829     {
4830       //---<  Special treatment for very short strings (one or two characters)  >---
4831       //   For these strings, we are sure that the above code was skipped.
4832       //   Thus, no registers were modified, register restore is not required.
4833       Label     ScalarDoit, Scalar2Char;
4834       z_chi(Rcnt, 2);
4835       z_brh(ScalarDoit);
4836       z_llh(Z_R1,  0, Z_R0, Rsrc);
4837       z_bre(Scalar2Char);
4838       z_tmll(Z_R1, 0xff00);
4839       z_lghi(result, 0);                   // cnt == 1, first char invalid, no chars successfully processed
4840       z_brnaz(AllDone);
4841       z_stc(Z_R1,  0, Z_R0, Rdst);
4842       z_lghi(result, 1);
4843       z_bru(AllDone);
4844 
4845       bind(Scalar2Char);
4846       z_llh(Z_R0,  2, Z_R0, Rsrc);
4847       z_tmll(Z_R1, 0xff00);
4848       z_lghi(result, 0);                   // cnt == 2, first char invalid, no chars successfully processed
4849       z_brnaz(AllDone);
4850       z_stc(Z_R1,  0, Z_R0, Rdst);
4851       z_tmll(Z_R0, 0xff00);
4852       z_lghi(result, 1);                   // cnt == 2, second char invalid, one char successfully processed
4853       z_brnaz(AllDone);
4854       z_stc(Z_R0,  1, Z_R0, Rdst);
4855       z_lghi(result, 2);
4856       z_bru(AllDone);
4857 
4858       bind(ScalarDoit);
4859     }
4860 #endif
4861 
4862     if (VM_Version::has_DistinctOpnds()) {
4863       z_srk(Rix, Rcnt, Z_R0);              // remaining # chars to compress in unrolled loop
4864     } else {
4865       z_lr(Rix, Rcnt);
4866       z_sr(Rix, Z_R0);
4867     }
4868     z_lgfr(result, Rcnt);                  // # processed characters (if all runs ok).
4869     z_brz(ScalarDone);                     // uses CC from Rix calculation
4870 
4871     bind(ScalarLoop);
4872       z_llh(Z_R1, 0, Z_R0, Rsrc);
4873       z_tmll(Z_R1, 0xff00);
4874       z_brnaz(ScalarBreak);
4875       z_stc(Z_R1, 0, Z_R0, Rdst);
4876       add2reg(Rsrc, 2);
4877       add2reg(Rdst, 1);
4878       z_brct(Rix, ScalarLoop);
4879 
4880     z_bru(ScalarDone);
4881 
4882     bind(ScalarBreak);
4883     z_sr(result, Rix);
4884 
4885     bind(ScalarDone);
4886     z_sgfr(Rdst, result);                  // restore ptr
4887     z_sgfr(Rsrc, result);                  // restore ptr, double the element count for Rsrc restore
4888     z_sgfr(Rsrc, result);
4889   }
4890   bind(AllDone);
4891 
4892   if (precise) {
4893     BLOCK_COMMENT("} encode_iso_array");
4894   } else {
4895     BLOCK_COMMENT("} string_compress");
4896   }
4897   return offset() - block_start;
4898 }
4899 
4900 // Inflate byte[] to char[].
4901 unsigned int MacroAssembler::string_inflate_trot(Register src, Register dst, Register cnt, Register tmp) {
4902   int block_start = offset();
4903 
4904   BLOCK_COMMENT("string_inflate {");
4905 
4906   Register stop_char = Z_R0;
4907   Register table     = Z_R1;
4908   Register src_addr  = tmp;
4909 
4910   assert_different_registers(Z_R0, Z_R1, tmp, src, dst, cnt);
4911   assert(dst->encoding()%2 == 0, "must be even reg");
4912   assert(cnt->encoding()%2 == 1, "must be odd reg");
4913   assert(cnt->encoding() - dst->encoding() == 1, "must be even/odd pair");
4914 
4915   StubRoutines::zarch::generate_load_trot_table_addr(this, table);  // kills Z_R0 (if ASSERT)
4916   clear_reg(stop_char);  // Stop character. Not used here, but initialized to have a defined value.
4917   lgr_if_needed(src_addr, src);
4918   z_llgfr(cnt, cnt);     // # src characters, must be a positive simm32.
4919 
4920   translate_ot(dst, src_addr, /* mask = */ 0x0001);
4921 
4922   BLOCK_COMMENT("} string_inflate");
4923 
4924   return offset() - block_start;
4925 }
4926 
4927 // Inflate byte[] to char[].
4928 //   Restores: src, dst
4929 //   Uses:     cnt
4930 //   Kills:    tmp, Z_R0, Z_R1.
4931 // Note:
4932 //   cnt is signed int. Do not rely on high word!
4933 //       counts # characters, not bytes.
4934 unsigned int MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp) {
4935   assert_different_registers(Z_R0, Z_R1, src, dst, cnt, tmp);
4936 
4937   BLOCK_COMMENT("string_inflate {");
4938   int block_start = offset();
4939 
4940   Register   Rcnt = cnt;   // # characters (src: bytes, dst: char (2-byte)), remaining after current loop.
4941   Register   Rix  = tmp;   // loop index
4942   Register   Rsrc = src;   // addr(src array)
4943   Register   Rdst = dst;   // addr(dst array)
4944   Label      ScalarShortcut, AllDone;
4945 
4946 #if 0  // Sacrifice shortcuts for code compactness
4947   {
4948     //---<  shortcuts for short strings (very frequent)   >---
4949     Label   skipShortcut, skip4Shortcut;
4950     z_ltr(Rcnt, Rcnt);                     // absolutely nothing to do for strings of len == 0.
4951     z_brz(AllDone);
4952     clear_reg(Z_R0);                       // make sure registers are properly initialized.
4953     clear_reg(Z_R1);
4954     z_chi(Rcnt, 4);
4955     z_brne(skip4Shortcut);                 // 4 characters are very frequent
4956       z_icm(Z_R0, 5,    0, Rsrc);          // Treat exactly 4 characters specially.
4957       z_icm(Z_R1, 5,    2, Rsrc);
4958       z_stm(Z_R0, Z_R1, 0, Rdst);
4959       z_bru(AllDone);
4960     bind(skip4Shortcut);
4961 
4962     z_chi(Rcnt, 8);
4963     z_brh(skipShortcut);                   // There's a lot to do...
4964     z_lgfr(Z_R0, Rcnt);                    // remaining #characters (<= 8). Precond for scalar loop.
4965                                            // This does not destroy the "register cleared" state of Z_R0.
4966     z_brl(ScalarShortcut);                 // Just a few characters
4967       z_icmh(Z_R0, 5, 0, Rsrc);            // Treat exactly 8 characters specially.
4968       z_icmh(Z_R1, 5, 4, Rsrc);
4969       z_icm(Z_R0,  5, 2, Rsrc);
4970       z_icm(Z_R1,  5, 6, Rsrc);
4971       z_stmg(Z_R0, Z_R1, 0, Rdst);
4972       z_bru(AllDone);
4973     bind(skipShortcut);
4974   }
4975 #endif
4976   clear_reg(Z_R0);                         // make sure register is properly initialized.
4977 
4978   if (VM_Version::has_VectorFacility()) {
4979     const int  min_vcnt     = 32;          // Minimum #characters required to use vector instructions.
4980                                            // Otherwise just do nothing in vector mode.
4981                                            // Must be multiple of vector register length (16 bytes = 128 bits).
4982     const int  log_min_vcnt = exact_log2(min_vcnt);
4983     Label      VectorLoop, VectorDone;
4984 
4985     assert(VM_Version::has_DistinctOpnds(), "Assumption when has_VectorFacility()");
4986     z_srak(Rix, Rcnt, log_min_vcnt);       // calculate # vector loop iterations
4987     z_brz(VectorDone);                     // skip if none
4988 
4989     z_sllg(Z_R0, Rix, log_min_vcnt);       // remember #chars that will be processed by vector loop
4990 
4991     bind(VectorLoop);
4992       z_vlm(Z_V20, Z_V21, 0, Rsrc);        // get next 32 characters (single-byte)
4993       add2reg(Rsrc, min_vcnt);
4994 
4995       z_vuplhb(Z_V22, Z_V20);              // V2 <- (expand) V0(high)
4996       z_vupllb(Z_V23, Z_V20);              // V3 <- (expand) V0(low)
4997       z_vuplhb(Z_V24, Z_V21);              // V4 <- (expand) V1(high)
4998       z_vupllb(Z_V25, Z_V21);              // V5 <- (expand) V1(low)
4999       z_vstm(Z_V22, Z_V25, 0, Rdst);       // store next 32 bytes
5000       add2reg(Rdst, min_vcnt*2);
5001 
5002       z_brct(Rix, VectorLoop);
5003 
5004     bind(VectorDone);
5005   }
5006 
5007   const int  min_cnt     =  8;             // Minimum #characters required to use unrolled scalar loop.
5008                                            // Otherwise just do nothing in unrolled scalar mode.
5009                                            // Must be multiple of 8.
5010   {
5011     const int  log_min_cnt = exact_log2(min_cnt);
5012     Label      UnrolledLoop, UnrolledDone;
5013 
5014 
5015     if (VM_Version::has_DistinctOpnds()) {
5016       z_srk(Rix, Rcnt, Z_R0);              // remaining # chars to process in unrolled loop
5017     } else {
5018       z_lr(Rix, Rcnt);
5019       z_sr(Rix, Z_R0);
5020     }
5021     z_sra(Rix, log_min_cnt);               // unrolled loop count
5022     z_brz(UnrolledDone);
5023 
5024     clear_reg(Z_R0);
5025     clear_reg(Z_R1);
5026 
5027     bind(UnrolledLoop);
5028       z_icmh(Z_R0, 5, 0, Rsrc);
5029       z_icmh(Z_R1, 5, 4, Rsrc);
5030       z_icm(Z_R0,  5, 2, Rsrc);
5031       z_icm(Z_R1,  5, 6, Rsrc);
5032       add2reg(Rsrc, min_cnt);
5033 
5034       z_stmg(Z_R0, Z_R1, 0, Rdst);
5035 
5036       add2reg(Rdst, min_cnt*2);
5037       z_brct(Rix, UnrolledLoop);
5038 
5039     bind(UnrolledDone);
5040     z_lgfr(Z_R0, Rcnt);                    // # chars left over after unrolled loop.
5041     z_nilf(Z_R0, min_cnt-1);
5042     z_brnz(ScalarShortcut);                // if zero, there is nothing left to do for scalar loop.
5043                                            // Rix == 0 in all cases.
5044     z_sgfr(Z_R0, Rcnt);                    // negative # characters the ptrs have been advanced previously.
5045     z_agr(Rdst, Z_R0);                     // restore ptr, double the element count for Rdst restore.
5046     z_agr(Rdst, Z_R0);
5047     z_agr(Rsrc, Z_R0);                     // restore ptr.
5048     z_bru(AllDone);
5049   }
5050 
5051   {
5052     bind(ScalarShortcut);
5053     // Z_R0 must contain remaining # characters as 64-bit signed int here.
5054     //      register contents is preserved over scalar processing (for register fixup).
5055 
5056 #if 0  // Sacrifice shortcuts for code compactness
5057     {
5058       Label      ScalarDefault;
5059       z_chi(Rcnt, 2);
5060       z_brh(ScalarDefault);
5061       z_llc(Z_R0,  0, Z_R0, Rsrc);     // 6 bytes
5062       z_sth(Z_R0,  0, Z_R0, Rdst);     // 4 bytes
5063       z_brl(AllDone);
5064       z_llc(Z_R0,  1, Z_R0, Rsrc);     // 6 bytes
5065       z_sth(Z_R0,  2, Z_R0, Rdst);     // 4 bytes
5066       z_bru(AllDone);
5067       bind(ScalarDefault);
5068     }
5069 #endif
5070 
5071     Label   CodeTable;
5072     // Some comments on Rix calculation:
5073     //  - Rcnt is small, therefore no bits shifted out of low word (sll(g) instructions).
5074     //  - high word of both Rix and Rcnt may contain garbage
5075     //  - the final lngfr takes care of that garbage, extending the sign to high word
5076     z_sllg(Rix, Z_R0, 2);                // calculate 10*Rix = (4*Rix + Rix)*2
5077     z_ar(Rix, Z_R0);
5078     z_larl(Z_R1, CodeTable);
5079     z_sll(Rix, 1);
5080     z_lngfr(Rix, Rix);      // ix range: [0..7], after inversion & mult: [-(7*12)..(0*12)].
5081     z_bc(Assembler::bcondAlways, 0, Rix, Z_R1);
5082 
5083     z_llc(Z_R1,  6, Z_R0, Rsrc);  // 6 bytes
5084     z_sth(Z_R1, 12, Z_R0, Rdst);  // 4 bytes
5085 
5086     z_llc(Z_R1,  5, Z_R0, Rsrc);
5087     z_sth(Z_R1, 10, Z_R0, Rdst);
5088 
5089     z_llc(Z_R1,  4, Z_R0, Rsrc);
5090     z_sth(Z_R1,  8, Z_R0, Rdst);
5091 
5092     z_llc(Z_R1,  3, Z_R0, Rsrc);
5093     z_sth(Z_R1,  6, Z_R0, Rdst);
5094 
5095     z_llc(Z_R1,  2, Z_R0, Rsrc);
5096     z_sth(Z_R1,  4, Z_R0, Rdst);
5097 
5098     z_llc(Z_R1,  1, Z_R0, Rsrc);
5099     z_sth(Z_R1,  2, Z_R0, Rdst);
5100 
5101     z_llc(Z_R1,  0, Z_R0, Rsrc);
5102     z_sth(Z_R1,  0, Z_R0, Rdst);
5103     bind(CodeTable);
5104 
5105     z_chi(Rcnt, 8);                        // no fixup for small strings. Rdst, Rsrc were not modified.
5106     z_brl(AllDone);
5107 
5108     z_sgfr(Z_R0, Rcnt);                    // # characters the ptrs have been advanced previously.
5109     z_agr(Rdst, Z_R0);                     // restore ptr, double the element count for Rdst restore.
5110     z_agr(Rdst, Z_R0);
5111     z_agr(Rsrc, Z_R0);                     // restore ptr.
5112   }
5113   bind(AllDone);
5114 
5115   BLOCK_COMMENT("} string_inflate");
5116   return offset() - block_start;
5117 }
5118 
5119 // Inflate byte[] to char[], length known at compile time.
5120 //   Restores: src, dst
5121 //   Kills:    tmp, Z_R0, Z_R1.
5122 // Note:
5123 //   len is signed int. Counts # characters, not bytes.
5124 unsigned int MacroAssembler::string_inflate_const(Register src, Register dst, Register tmp, int len) {
5125   assert_different_registers(Z_R0, Z_R1, src, dst, tmp);
5126 
5127   BLOCK_COMMENT("string_inflate_const {");
5128   int block_start = offset();
5129 
5130   Register   Rix  = tmp;   // loop index
5131   Register   Rsrc = src;   // addr(src array)
5132   Register   Rdst = dst;   // addr(dst array)
5133   Label      ScalarShortcut, AllDone;
5134   int        nprocessed = 0;
5135   int        src_off    = 0;  // compensate for saved (optimized away) ptr advancement.
5136   int        dst_off    = 0;  // compensate for saved (optimized away) ptr advancement.
5137   bool       restore_inputs = false;
5138   bool       workreg_clear  = false;
5139 
5140   if ((len >= 32) && VM_Version::has_VectorFacility()) {
5141     const int  min_vcnt     = 32;          // Minimum #characters required to use vector instructions.
5142                                            // Otherwise just do nothing in vector mode.
5143                                            // Must be multiple of vector register length (16 bytes = 128 bits).
5144     const int  log_min_vcnt = exact_log2(min_vcnt);
5145     const int  iterations   = (len - nprocessed) >> log_min_vcnt;
5146     nprocessed             += iterations << log_min_vcnt;
5147     Label      VectorLoop;
5148 
5149     if (iterations == 1) {
5150       z_vlm(Z_V20, Z_V21, 0+src_off, Rsrc);  // get next 32 characters (single-byte)
5151       z_vuplhb(Z_V22, Z_V20);                // V2 <- (expand) V0(high)
5152       z_vupllb(Z_V23, Z_V20);                // V3 <- (expand) V0(low)
5153       z_vuplhb(Z_V24, Z_V21);                // V4 <- (expand) V1(high)
5154       z_vupllb(Z_V25, Z_V21);                // V5 <- (expand) V1(low)
5155       z_vstm(Z_V22, Z_V25, 0+dst_off, Rdst); // store next 32 bytes
5156 
5157       src_off += min_vcnt;
5158       dst_off += min_vcnt*2;
5159     } else {
5160       restore_inputs = true;
5161 
5162       z_lgfi(Rix, len>>log_min_vcnt);
5163       bind(VectorLoop);
5164         z_vlm(Z_V20, Z_V21, 0, Rsrc);        // get next 32 characters (single-byte)
5165         add2reg(Rsrc, min_vcnt);
5166 
5167         z_vuplhb(Z_V22, Z_V20);              // V2 <- (expand) V0(high)
5168         z_vupllb(Z_V23, Z_V20);              // V3 <- (expand) V0(low)
5169         z_vuplhb(Z_V24, Z_V21);              // V4 <- (expand) V1(high)
5170         z_vupllb(Z_V25, Z_V21);              // V5 <- (expand) V1(low)
5171         z_vstm(Z_V22, Z_V25, 0, Rdst);       // store next 32 bytes
5172         add2reg(Rdst, min_vcnt*2);
5173 
5174         z_brct(Rix, VectorLoop);
5175     }
5176   }
5177 
5178   if (((len-nprocessed) >= 16) && VM_Version::has_VectorFacility()) {
5179     const int  min_vcnt     = 16;          // Minimum #characters required to use vector instructions.
5180                                            // Otherwise just do nothing in vector mode.
5181                                            // Must be multiple of vector register length (16 bytes = 128 bits).
5182     const int  log_min_vcnt = exact_log2(min_vcnt);
5183     const int  iterations   = (len - nprocessed) >> log_min_vcnt;
5184     nprocessed             += iterations << log_min_vcnt;
5185     assert(iterations == 1, "must be!");
5186 
5187     z_vl(Z_V20, 0+src_off, Z_R0, Rsrc);    // get next 16 characters (single-byte)
5188     z_vuplhb(Z_V22, Z_V20);                // V2 <- (expand) V0(high)
5189     z_vupllb(Z_V23, Z_V20);                // V3 <- (expand) V0(low)
5190     z_vstm(Z_V22, Z_V23, 0+dst_off, Rdst); // store next 32 bytes
5191 
5192     src_off += min_vcnt;
5193     dst_off += min_vcnt*2;
5194   }
5195 
5196   if ((len-nprocessed) > 8) {
5197     const int  min_cnt     =  8;           // Minimum #characters required to use unrolled scalar loop.
5198                                            // Otherwise just do nothing in unrolled scalar mode.
5199                                            // Must be multiple of 8.
5200     const int  log_min_cnt = exact_log2(min_cnt);
5201     const int  iterations  = (len - nprocessed) >> log_min_cnt;
5202     nprocessed     += iterations << log_min_cnt;
5203 
5204     //---<  avoid loop overhead/ptr increment for small # iterations  >---
5205     if (iterations <= 2) {
5206       clear_reg(Z_R0);
5207       clear_reg(Z_R1);
5208       workreg_clear = true;
5209 
5210       z_icmh(Z_R0, 5, 0+src_off, Rsrc);
5211       z_icmh(Z_R1, 5, 4+src_off, Rsrc);
5212       z_icm(Z_R0,  5, 2+src_off, Rsrc);
5213       z_icm(Z_R1,  5, 6+src_off, Rsrc);
5214       z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst);
5215 
5216       src_off += min_cnt;
5217       dst_off += min_cnt*2;
5218     }
5219 
5220     if (iterations == 2) {
5221       z_icmh(Z_R0, 5, 0+src_off, Rsrc);
5222       z_icmh(Z_R1, 5, 4+src_off, Rsrc);
5223       z_icm(Z_R0,  5, 2+src_off, Rsrc);
5224       z_icm(Z_R1,  5, 6+src_off, Rsrc);
5225       z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst);
5226 
5227       src_off += min_cnt;
5228       dst_off += min_cnt*2;
5229     }
5230 
5231     if (iterations > 2) {
5232       Label      UnrolledLoop;
5233       restore_inputs  = true;
5234 
5235       clear_reg(Z_R0);
5236       clear_reg(Z_R1);
5237       workreg_clear = true;
5238 
5239       z_lgfi(Rix, iterations);
5240       bind(UnrolledLoop);
5241         z_icmh(Z_R0, 5, 0, Rsrc);
5242         z_icmh(Z_R1, 5, 4, Rsrc);
5243         z_icm(Z_R0,  5, 2, Rsrc);
5244         z_icm(Z_R1,  5, 6, Rsrc);
5245         add2reg(Rsrc, min_cnt);
5246 
5247         z_stmg(Z_R0, Z_R1, 0, Rdst);
5248         add2reg(Rdst, min_cnt*2);
5249 
5250         z_brct(Rix, UnrolledLoop);
5251     }
5252   }
5253 
5254   if ((len-nprocessed) > 0) {
5255     switch (len-nprocessed) {
5256       case 8:
5257         if (!workreg_clear) {
5258           clear_reg(Z_R0);
5259           clear_reg(Z_R1);
5260         }
5261         z_icmh(Z_R0, 5, 0+src_off, Rsrc);
5262         z_icmh(Z_R1, 5, 4+src_off, Rsrc);
5263         z_icm(Z_R0,  5, 2+src_off, Rsrc);
5264         z_icm(Z_R1,  5, 6+src_off, Rsrc);
5265         z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst);
5266         break;
5267       case 7:
5268         if (!workreg_clear) {
5269           clear_reg(Z_R0);
5270           clear_reg(Z_R1);
5271         }
5272         clear_reg(Rix);
5273         z_icm(Z_R0,  5, 0+src_off, Rsrc);
5274         z_icm(Z_R1,  5, 2+src_off, Rsrc);
5275         z_icm(Rix,   5, 4+src_off, Rsrc);
5276         z_stm(Z_R0,  Z_R1, 0+dst_off, Rdst);
5277         z_llc(Z_R0,  6+src_off, Z_R0, Rsrc);
5278         z_st(Rix,    8+dst_off, Z_R0, Rdst);
5279         z_sth(Z_R0, 12+dst_off, Z_R0, Rdst);
5280         break;
5281       case 6:
5282         if (!workreg_clear) {
5283           clear_reg(Z_R0);
5284           clear_reg(Z_R1);
5285         }
5286         clear_reg(Rix);
5287         z_icm(Z_R0, 5, 0+src_off, Rsrc);
5288         z_icm(Z_R1, 5, 2+src_off, Rsrc);
5289         z_icm(Rix,  5, 4+src_off, Rsrc);
5290         z_stm(Z_R0, Z_R1, 0+dst_off, Rdst);
5291         z_st(Rix,   8+dst_off, Z_R0, Rdst);
5292         break;
5293       case 5:
5294         if (!workreg_clear) {
5295           clear_reg(Z_R0);
5296           clear_reg(Z_R1);
5297         }
5298         z_icm(Z_R0, 5, 0+src_off, Rsrc);
5299         z_icm(Z_R1, 5, 2+src_off, Rsrc);
5300         z_llc(Rix,  4+src_off, Z_R0, Rsrc);
5301         z_stm(Z_R0, Z_R1, 0+dst_off, Rdst);
5302         z_sth(Rix,  8+dst_off, Z_R0, Rdst);
5303         break;
5304       case 4:
5305         if (!workreg_clear) {
5306           clear_reg(Z_R0);
5307           clear_reg(Z_R1);
5308         }
5309         z_icm(Z_R0, 5, 0+src_off, Rsrc);
5310         z_icm(Z_R1, 5, 2+src_off, Rsrc);
5311         z_stm(Z_R0, Z_R1, 0+dst_off, Rdst);
5312         break;
5313       case 3:
5314         if (!workreg_clear) {
5315           clear_reg(Z_R0);
5316         }
5317         z_llc(Z_R1, 2+src_off, Z_R0, Rsrc);
5318         z_icm(Z_R0, 5, 0+src_off, Rsrc);
5319         z_sth(Z_R1, 4+dst_off, Z_R0, Rdst);
5320         z_st(Z_R0,  0+dst_off, Rdst);
5321         break;
5322       case 2:
5323         z_llc(Z_R0, 0+src_off, Z_R0, Rsrc);
5324         z_llc(Z_R1, 1+src_off, Z_R0, Rsrc);
5325         z_sth(Z_R0, 0+dst_off, Z_R0, Rdst);
5326         z_sth(Z_R1, 2+dst_off, Z_R0, Rdst);
5327         break;
5328       case 1:
5329         z_llc(Z_R0, 0+src_off, Z_R0, Rsrc);
5330         z_sth(Z_R0, 0+dst_off, Z_R0, Rdst);
5331         break;
5332       default:
5333         guarantee(false, "Impossible");
5334         break;
5335     }
5336     src_off   +=  len-nprocessed;
5337     dst_off   += (len-nprocessed)*2;
5338     nprocessed = len;
5339   }
5340 
5341   //---< restore modified input registers  >---
5342   if ((nprocessed > 0) && restore_inputs) {
5343     z_agfi(Rsrc, -(nprocessed-src_off));
5344     if (nprocessed < 1000000000) { // avoid int overflow
5345       z_agfi(Rdst, -(nprocessed*2-dst_off));
5346     } else {
5347       z_agfi(Rdst, -(nprocessed-dst_off));
5348       z_agfi(Rdst, -nprocessed);
5349     }
5350   }
5351 
5352   BLOCK_COMMENT("} string_inflate_const");
5353   return offset() - block_start;
5354 }
5355 
5356 // Kills src.
5357 unsigned int MacroAssembler::has_negatives(Register result, Register src, Register cnt,
5358                                            Register odd_reg, Register even_reg, Register tmp) {
5359   int block_start = offset();
5360   Label Lloop1, Lloop2, Lslow, Lnotfound, Ldone;
5361   const Register addr = src, mask = tmp;
5362 
5363   BLOCK_COMMENT("has_negatives {");
5364 
5365   z_llgfr(Z_R1, cnt);      // Number of bytes to read. (Must be a positive simm32.)
5366   z_llilf(mask, 0x80808080);
5367   z_lhi(result, 1);        // Assume true.
5368   // Last possible addr for fast loop.
5369   z_lay(odd_reg, -16, Z_R1, src);
5370   z_chi(cnt, 16);
5371   z_brl(Lslow);
5372 
5373   // ind1: index, even_reg: index increment, odd_reg: index limit
5374   z_iihf(mask, 0x80808080);
5375   z_lghi(even_reg, 16);
5376 
5377   bind(Lloop1); // 16 bytes per iteration.
5378   z_lg(Z_R0, Address(addr));
5379   z_lg(Z_R1, Address(addr, 8));
5380   z_ogr(Z_R0, Z_R1);
5381   z_ngr(Z_R0, mask);
5382   z_brne(Ldone);           // If found return 1.
5383   z_brxlg(addr, even_reg, Lloop1);
5384 
5385   bind(Lslow);
5386   z_aghi(odd_reg, 16-1);   // Last possible addr for slow loop.
5387   z_lghi(even_reg, 1);
5388   z_cgr(addr, odd_reg);
5389   z_brh(Lnotfound);
5390 
5391   bind(Lloop2); // 1 byte per iteration.
5392   z_cli(Address(addr), 0x80);
5393   z_brnl(Ldone);           // If found return 1.
5394   z_brxlg(addr, even_reg, Lloop2);
5395 
5396   bind(Lnotfound);
5397   z_lhi(result, 0);
5398 
5399   bind(Ldone);
5400 
5401   BLOCK_COMMENT("} has_negatives");
5402 
5403   return offset() - block_start;
5404 }
5405 
5406 // kill: cnt1, cnt2, odd_reg, even_reg; early clobber: result
5407 unsigned int MacroAssembler::string_compare(Register str1, Register str2,
5408                                             Register cnt1, Register cnt2,
5409                                             Register odd_reg, Register even_reg, Register result, int ae) {
5410   int block_start = offset();
5411 
5412   assert_different_registers(str1, cnt1, cnt2, odd_reg, even_reg, result);
5413   assert_different_registers(str2, cnt1, cnt2, odd_reg, even_reg, result);
5414 
5415   // If strings are equal up to min length, return the length difference.
5416   const Register diff = result, // Pre-set result with length difference.
5417                  min  = cnt1,   // min number of bytes
5418                  tmp  = cnt2;
5419 
5420   // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a)
5421   // we interchange str1 and str2 in the UL case and negate the result.
5422   // Like this, str1 is always latin1 encoded, except for the UU case.
5423   // In addition, we need 0 (or sign which is 0) extend when using 64 bit register.
5424   const bool used_as_LU = (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL);
5425 
5426   BLOCK_COMMENT("string_compare {");
5427 
5428   if (used_as_LU) {
5429     z_srl(cnt2, 1);
5430   }
5431 
5432   // See if the lengths are different, and calculate min in cnt1.
5433   // Save diff in case we need it for a tie-breaker.
5434 
5435   // diff = cnt1 - cnt2
5436   if (VM_Version::has_DistinctOpnds()) {
5437     z_srk(diff, cnt1, cnt2);
5438   } else {
5439     z_lr(diff, cnt1);
5440     z_sr(diff, cnt2);
5441   }
5442   if (str1 != str2) {
5443     if (VM_Version::has_LoadStoreConditional()) {
5444       z_locr(min, cnt2, Assembler::bcondHigh);
5445     } else {
5446       Label Lskip;
5447       z_brl(Lskip);    // min ok if cnt1 < cnt2
5448       z_lr(min, cnt2); // min = cnt2
5449       bind(Lskip);
5450     }
5451   }
5452 
5453   if (ae == StrIntrinsicNode::UU) {
5454     z_sra(diff, 1);
5455   }
5456   if (str1 != str2) {
5457     Label Ldone;
5458     if (used_as_LU) {
5459       // Loop which searches the first difference character by character.
5460       Label Lloop;
5461       const Register ind1 = Z_R1,
5462                      ind2 = min;
5463       int stride1 = 1, stride2 = 2; // See comment above.
5464 
5465       // ind1: index, even_reg: index increment, odd_reg: index limit
5466       z_llilf(ind1, (unsigned int)(-stride1));
5467       z_lhi(even_reg, stride1);
5468       add2reg(odd_reg, -stride1, min);
5469       clear_reg(ind2); // kills min
5470 
5471       bind(Lloop);
5472       z_brxh(ind1, even_reg, Ldone);
5473       z_llc(tmp, Address(str1, ind1));
5474       z_llh(Z_R0, Address(str2, ind2));
5475       z_ahi(ind2, stride2);
5476       z_sr(tmp, Z_R0);
5477       z_bre(Lloop);
5478 
5479       z_lr(result, tmp);
5480 
5481     } else {
5482       // Use clcle in fast loop (only for same encoding).
5483       z_lgr(Z_R0, str1);
5484       z_lgr(even_reg, str2);
5485       z_llgfr(Z_R1, min);
5486       z_llgfr(odd_reg, min);
5487 
5488       if (ae == StrIntrinsicNode::LL) {
5489         compare_long_ext(Z_R0, even_reg, 0);
5490       } else {
5491         compare_long_uni(Z_R0, even_reg, 0);
5492       }
5493       z_bre(Ldone);
5494       z_lgr(Z_R1, Z_R0);
5495       if (ae == StrIntrinsicNode::LL) {
5496         z_llc(Z_R0, Address(even_reg));
5497         z_llc(result, Address(Z_R1));
5498       } else {
5499         z_llh(Z_R0, Address(even_reg));
5500         z_llh(result, Address(Z_R1));
5501       }
5502       z_sr(result, Z_R0);
5503     }
5504 
5505     // Otherwise, return the difference between the first mismatched chars.
5506     bind(Ldone);
5507   }
5508 
5509   if (ae == StrIntrinsicNode::UL) {
5510     z_lcr(result, result); // Negate result (see note above).
5511   }
5512 
5513   BLOCK_COMMENT("} string_compare");
5514 
5515   return offset() - block_start;
5516 }
5517 
5518 unsigned int MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, Register limit,
5519                                           Register odd_reg, Register even_reg, Register result, bool is_byte) {
5520   int block_start = offset();
5521 
5522   BLOCK_COMMENT("array_equals {");
5523 
5524   assert_different_registers(ary1, limit, odd_reg, even_reg);
5525   assert_different_registers(ary2, limit, odd_reg, even_reg);
5526 
5527   Label Ldone, Ldone_true, Ldone_false, Lclcle, CLC_template;
5528   int base_offset = 0;
5529 
5530   if (ary1 != ary2) {
5531     if (is_array_equ) {
5532       base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR);
5533 
5534       // Return true if the same array.
5535       compareU64_and_branch(ary1, ary2, Assembler::bcondEqual, Ldone_true);
5536 
5537       // Return false if one of them is NULL.
5538       compareU64_and_branch(ary1, (intptr_t)0, Assembler::bcondEqual, Ldone_false);
5539       compareU64_and_branch(ary2, (intptr_t)0, Assembler::bcondEqual, Ldone_false);
5540 
5541       // Load the lengths of arrays.
5542       z_llgf(odd_reg, Address(ary1, arrayOopDesc::length_offset_in_bytes()));
5543 
5544       // Return false if the two arrays are not equal length.
5545       z_c(odd_reg, Address(ary2, arrayOopDesc::length_offset_in_bytes()));
5546       z_brne(Ldone_false);
5547 
5548       // string len in bytes (right operand)
5549       if (!is_byte) {
5550         z_chi(odd_reg, 128);
5551         z_sll(odd_reg, 1); // preserves flags
5552         z_brh(Lclcle);
5553       } else {
5554         compareU32_and_branch(odd_reg, (intptr_t)256, Assembler::bcondHigh, Lclcle);
5555       }
5556     } else {
5557       z_llgfr(odd_reg, limit); // Need to zero-extend prior to using the value.
5558       compareU32_and_branch(limit, (intptr_t)256, Assembler::bcondHigh, Lclcle);
5559     }
5560 
5561 
5562     // Use clc instruction for up to 256 bytes.
5563     {
5564       Register str1_reg = ary1,
5565           str2_reg = ary2;
5566       if (is_array_equ) {
5567         str1_reg = Z_R1;
5568         str2_reg = even_reg;
5569         add2reg(str1_reg, base_offset, ary1); // string addr (left operand)
5570         add2reg(str2_reg, base_offset, ary2); // string addr (right operand)
5571       }
5572       z_ahi(odd_reg, -1); // Clc uses decremented limit. Also compare result to 0.
5573       z_brl(Ldone_true);
5574       // Note: We could jump to the template if equal.
5575 
5576       assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware");
5577       z_exrl(odd_reg, CLC_template);
5578       z_bre(Ldone_true);
5579       // fall through
5580 
5581       bind(Ldone_false);
5582       clear_reg(result);
5583       z_bru(Ldone);
5584 
5585       bind(CLC_template);
5586       z_clc(0, 0, str1_reg, 0, str2_reg);
5587     }
5588 
5589     // Use clcle instruction.
5590     {
5591       bind(Lclcle);
5592       add2reg(even_reg, base_offset, ary2); // string addr (right operand)
5593       add2reg(Z_R0, base_offset, ary1);     // string addr (left operand)
5594 
5595       z_lgr(Z_R1, odd_reg); // string len in bytes (left operand)
5596       if (is_byte) {
5597         compare_long_ext(Z_R0, even_reg, 0);
5598       } else {
5599         compare_long_uni(Z_R0, even_reg, 0);
5600       }
5601       z_lghi(result, 0); // Preserve flags.
5602       z_brne(Ldone);
5603     }
5604   }
5605   // fall through
5606 
5607   bind(Ldone_true);
5608   z_lghi(result, 1); // All characters are equal.
5609   bind(Ldone);
5610 
5611   BLOCK_COMMENT("} array_equals");
5612 
5613   return offset() - block_start;
5614 }
5615 
5616 // kill: haycnt, needlecnt, odd_reg, even_reg; early clobber: result
5617 unsigned int MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt,
5618                                             Register needle, Register needlecnt, int needlecntval,
5619                                             Register odd_reg, Register even_reg, int ae) {
5620   int block_start = offset();
5621 
5622   // Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite!
5623   assert(ae != StrIntrinsicNode::LU, "Invalid encoding");
5624   const int h_csize = (ae == StrIntrinsicNode::LL) ? 1 : 2;
5625   const int n_csize = (ae == StrIntrinsicNode::UU) ? 2 : 1;
5626   Label L_needle1, L_Found, L_NotFound;
5627 
5628   BLOCK_COMMENT("string_indexof {");
5629 
5630   if (needle == haystack) {
5631     z_lhi(result, 0);
5632   } else {
5633 
5634   // Load first character of needle (R0 used by search_string instructions).
5635   if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); }
5636 
5637   // Compute last haystack addr to use if no match gets found.
5638   if (needlecnt != noreg) { // variable needlecnt
5639     z_ahi(needlecnt, -1); // Remaining characters after first one.
5640     z_sr(haycnt, needlecnt); // Compute index succeeding last element to compare.
5641     if (n_csize == 2) { z_sll(needlecnt, 1); } // In bytes.
5642   } else { // constant needlecnt
5643     assert((needlecntval & 0x7fff) == needlecntval, "must be positive simm16 immediate");
5644     // Compute index succeeding last element to compare.
5645     if (needlecntval != 1) { z_ahi(haycnt, 1 - needlecntval); }
5646   }
5647 
5648   z_llgfr(haycnt, haycnt); // Clear high half.
5649   z_lgr(result, haystack); // Final result will be computed from needle start pointer.
5650   if (h_csize == 2) { z_sll(haycnt, 1); } // Scale to number of bytes.
5651   z_agr(haycnt, haystack); // Point to address succeeding last element (haystack+scale*(haycnt-needlecnt+1)).
5652 
5653   if (h_csize != n_csize) {
5654     assert(ae == StrIntrinsicNode::UL, "Invalid encoding");
5655 
5656     if (needlecnt != noreg || needlecntval != 1) {
5657       if (needlecnt != noreg) {
5658         compare32_and_branch(needlecnt, (intptr_t)0, Assembler::bcondEqual, L_needle1);
5659       }
5660 
5661       // Main Loop: UL version (now we have at least 2 characters).
5662       Label L_OuterLoop, L_InnerLoop, L_Skip;
5663       bind(L_OuterLoop); // Search for 1st 2 characters.
5664       z_lgr(Z_R1, haycnt);
5665       MacroAssembler::search_string_uni(Z_R1, result);
5666       z_brc(Assembler::bcondNotFound, L_NotFound);
5667       z_lgr(result, Z_R1);
5668 
5669       z_lghi(Z_R1, n_csize);
5670       z_lghi(even_reg, h_csize);
5671       bind(L_InnerLoop);
5672       z_llgc(odd_reg, Address(needle, Z_R1));
5673       z_ch(odd_reg, Address(result, even_reg));
5674       z_brne(L_Skip);
5675       if (needlecnt != noreg) { z_cr(Z_R1, needlecnt); } else { z_chi(Z_R1, needlecntval - 1); }
5676       z_brnl(L_Found);
5677       z_aghi(Z_R1, n_csize);
5678       z_aghi(even_reg, h_csize);
5679       z_bru(L_InnerLoop);
5680 
5681       bind(L_Skip);
5682       z_aghi(result, h_csize); // This is the new address we want to use for comparing.
5683       z_bru(L_OuterLoop);
5684     }
5685 
5686   } else {
5687     const intptr_t needle_bytes = (n_csize == 2) ? ((needlecntval - 1) << 1) : (needlecntval - 1);
5688     Label L_clcle;
5689 
5690     if (needlecnt != noreg || (needlecntval != 1 && needle_bytes <= 256)) {
5691       if (needlecnt != noreg) {
5692         compare32_and_branch(needlecnt, 256, Assembler::bcondHigh, L_clcle);
5693         z_ahi(needlecnt, -1); // remaining bytes -1 (for CLC)
5694         z_brl(L_needle1);
5695       }
5696 
5697       // Main Loop: clc version (now we have at least 2 characters).
5698       Label L_OuterLoop, CLC_template;
5699       bind(L_OuterLoop); // Search for 1st 2 characters.
5700       z_lgr(Z_R1, haycnt);
5701       if (h_csize == 1) {
5702         MacroAssembler::search_string(Z_R1, result);
5703       } else {
5704         MacroAssembler::search_string_uni(Z_R1, result);
5705       }
5706       z_brc(Assembler::bcondNotFound, L_NotFound);
5707       z_lgr(result, Z_R1);
5708 
5709       if (needlecnt != noreg) {
5710         assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware");
5711         z_exrl(needlecnt, CLC_template);
5712       } else {
5713         z_clc(h_csize, needle_bytes -1, Z_R1, n_csize, needle);
5714       }
5715       z_bre(L_Found);
5716       z_aghi(result, h_csize); // This is the new address we want to use for comparing.
5717       z_bru(L_OuterLoop);
5718 
5719       if (needlecnt != noreg) {
5720         bind(CLC_template);
5721         z_clc(h_csize, 0, Z_R1, n_csize, needle);
5722       }
5723     }
5724 
5725     if (needlecnt != noreg || needle_bytes > 256) {
5726       bind(L_clcle);
5727 
5728       // Main Loop: clcle version (now we have at least 256 bytes).
5729       Label L_OuterLoop, CLC_template;
5730       bind(L_OuterLoop); // Search for 1st 2 characters.
5731       z_lgr(Z_R1, haycnt);
5732       if (h_csize == 1) {
5733         MacroAssembler::search_string(Z_R1, result);
5734       } else {
5735         MacroAssembler::search_string_uni(Z_R1, result);
5736       }
5737       z_brc(Assembler::bcondNotFound, L_NotFound);
5738 
5739       add2reg(Z_R0, n_csize, needle);
5740       add2reg(even_reg, h_csize, Z_R1);
5741       z_lgr(result, Z_R1);
5742       if (needlecnt != noreg) {
5743         z_llgfr(Z_R1, needlecnt); // needle len in bytes (left operand)
5744         z_llgfr(odd_reg, needlecnt);
5745       } else {
5746         load_const_optimized(Z_R1, needle_bytes);
5747         if (Immediate::is_simm16(needle_bytes)) { z_lghi(odd_reg, needle_bytes); } else { z_lgr(odd_reg, Z_R1); }
5748       }
5749       if (h_csize == 1) {
5750         compare_long_ext(Z_R0, even_reg, 0);
5751       } else {
5752         compare_long_uni(Z_R0, even_reg, 0);
5753       }
5754       z_bre(L_Found);
5755 
5756       if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); } // Reload.
5757       z_aghi(result, h_csize); // This is the new address we want to use for comparing.
5758       z_bru(L_OuterLoop);
5759     }
5760   }
5761 
5762   if (needlecnt != noreg || needlecntval == 1) {
5763     bind(L_needle1);
5764 
5765     // Single needle character version.
5766     if (h_csize == 1) {
5767       MacroAssembler::search_string(haycnt, result);
5768     } else {
5769       MacroAssembler::search_string_uni(haycnt, result);
5770     }
5771     z_lgr(result, haycnt);
5772     z_brc(Assembler::bcondFound, L_Found);
5773   }
5774 
5775   bind(L_NotFound);
5776   add2reg(result, -1, haystack); // Return -1.
5777 
5778   bind(L_Found); // Return index (or -1 in fallthrough case).
5779   z_sgr(result, haystack);
5780   if (h_csize == 2) { z_srag(result, result, exact_log2(sizeof(jchar))); }
5781   }
5782   BLOCK_COMMENT("} string_indexof");
5783 
5784   return offset() - block_start;
5785 }
5786 
5787 // early clobber: result
5788 unsigned int MacroAssembler::string_indexof_char(Register result, Register haystack, Register haycnt,
5789                                                  Register needle, jchar needleChar, Register odd_reg, Register even_reg, bool is_byte) {
5790   int block_start = offset();
5791 
5792   BLOCK_COMMENT("string_indexof_char {");
5793 
5794   if (needle == haystack) {
5795     z_lhi(result, 0);
5796   } else {
5797 
5798   Label Ldone;
5799 
5800   z_llgfr(odd_reg, haycnt);  // Preset loop ctr/searchrange end.
5801   if (needle == noreg) {
5802     load_const_optimized(Z_R0, (unsigned long)needleChar);
5803   } else {
5804     if (is_byte) {
5805       z_llgcr(Z_R0, needle); // First (and only) needle char.
5806     } else {
5807       z_llghr(Z_R0, needle); // First (and only) needle char.
5808     }
5809   }
5810 
5811   if (!is_byte) {
5812     z_agr(odd_reg, odd_reg); // Calc #bytes to be processed with SRSTU.
5813   }
5814 
5815   z_lgr(even_reg, haystack); // haystack addr
5816   z_agr(odd_reg, haystack);  // First char after range end.
5817   z_lghi(result, -1);
5818 
5819   if (is_byte) {
5820     MacroAssembler::search_string(odd_reg, even_reg);
5821   } else {
5822     MacroAssembler::search_string_uni(odd_reg, even_reg);
5823   }
5824   z_brc(Assembler::bcondNotFound, Ldone);
5825   if (is_byte) {
5826     if (VM_Version::has_DistinctOpnds()) {
5827       z_sgrk(result, odd_reg, haystack);
5828     } else {
5829       z_sgr(odd_reg, haystack);
5830       z_lgr(result, odd_reg);
5831     }
5832   } else {
5833     z_slgr(odd_reg, haystack);
5834     z_srlg(result, odd_reg, exact_log2(sizeof(jchar)));
5835   }
5836 
5837   bind(Ldone);
5838   }
5839   BLOCK_COMMENT("} string_indexof_char");
5840 
5841   return offset() - block_start;
5842 }
5843 
5844 
5845 //-------------------------------------------------
5846 //   Constants (scalar and oop) in constant pool
5847 //-------------------------------------------------
5848 
5849 // Add a non-relocated constant to the CP.
5850 int MacroAssembler::store_const_in_toc(AddressLiteral& val) {
5851   long    value  = val.value();
5852   address tocPos = long_constant(value);
5853 
5854   if (tocPos != NULL) {
5855     int tocOffset = (int)(tocPos - code()->consts()->start());
5856     return tocOffset;
5857   }
5858   // Address_constant returned NULL, so no constant entry has been created.
5859   // In that case, we return a "fatal" offset, just in case that subsequently
5860   // generated access code is executed.
5861   return -1;
5862 }
5863 
5864 // Returns the TOC offset where the address is stored.
5865 // Add a relocated constant to the CP.
5866 int MacroAssembler::store_oop_in_toc(AddressLiteral& oop) {
5867   // Use RelocationHolder::none for the constant pool entry.
5868   // Otherwise we will end up with a failing NativeCall::verify(x),
5869   // where x is the address of the constant pool entry.
5870   address tocPos = address_constant((address)oop.value(), RelocationHolder::none);
5871 
5872   if (tocPos != NULL) {
5873     int              tocOffset = (int)(tocPos - code()->consts()->start());
5874     RelocationHolder rsp = oop.rspec();
5875     Relocation      *rel = rsp.reloc();
5876 
5877     // Store toc_offset in relocation, used by call_far_patchable.
5878     if ((relocInfo::relocType)rel->type() == relocInfo::runtime_call_w_cp_type) {
5879       ((runtime_call_w_cp_Relocation *)(rel))->set_constant_pool_offset(tocOffset);
5880     }
5881     // Relocate at the load's pc.
5882     relocate(rsp);
5883 
5884     return tocOffset;
5885   }
5886   // Address_constant returned NULL, so no constant entry has been created
5887   // in that case, we return a "fatal" offset, just in case that subsequently
5888   // generated access code is executed.
5889   return -1;
5890 }
5891 
5892 bool MacroAssembler::load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc) {
5893   int     tocOffset = store_const_in_toc(a);
5894   if (tocOffset == -1) return false;
5895   address tocPos    = tocOffset + code()->consts()->start();
5896   assert((address)code()->consts()->start() != NULL, "Please add CP address");
5897 
5898   load_long_pcrelative(dst, tocPos);
5899   return true;
5900 }
5901 
5902 bool MacroAssembler::load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc) {
5903   int     tocOffset = store_oop_in_toc(a);
5904   if (tocOffset == -1) return false;
5905   address tocPos    = tocOffset + code()->consts()->start();
5906   assert((address)code()->consts()->start() != NULL, "Please add CP address");
5907 
5908   load_addr_pcrelative(dst, tocPos);
5909   return true;
5910 }
5911 
5912 // If the instruction sequence at the given pc is a load_const_from_toc
5913 // sequence, return the value currently stored at the referenced position
5914 // in the TOC.
5915 intptr_t MacroAssembler::get_const_from_toc(address pc) {
5916 
5917   assert(is_load_const_from_toc(pc), "must be load_const_from_pool");
5918 
5919   long    offset  = get_load_const_from_toc_offset(pc);
5920   address dataLoc = NULL;
5921   if (is_load_const_from_toc_pcrelative(pc)) {
5922     dataLoc = pc + offset;
5923   } else {
5924     CodeBlob* cb = CodeCache::find_blob_unsafe(pc);   // Else we get assertion if nmethod is zombie.
5925     assert(cb && cb->is_nmethod(), "sanity");
5926     nmethod* nm = (nmethod*)cb;
5927     dataLoc = nm->ctable_begin() + offset;
5928   }
5929   return *(intptr_t *)dataLoc;
5930 }
5931 
5932 // If the instruction sequence at the given pc is a load_const_from_toc
5933 // sequence, copy the passed-in new_data value into the referenced
5934 // position in the TOC.
5935 void MacroAssembler::set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb) {
5936   assert(is_load_const_from_toc(pc), "must be load_const_from_pool");
5937 
5938   long    offset = MacroAssembler::get_load_const_from_toc_offset(pc);
5939   address dataLoc = NULL;
5940   if (is_load_const_from_toc_pcrelative(pc)) {
5941     dataLoc = pc+offset;
5942   } else {
5943     nmethod* nm = CodeCache::find_nmethod(pc);
5944     assert((cb == NULL) || (nm == (nmethod*)cb), "instruction address should be in CodeBlob");
5945     dataLoc = nm->ctable_begin() + offset;
5946   }
5947   if (*(unsigned long *)dataLoc != new_data) { // Prevent cache invalidation: update only if necessary.
5948     *(unsigned long *)dataLoc = new_data;
5949   }
5950 }
5951 
5952 // Dynamic TOC. Getter must only be called if "a" is a load_const_from_toc
5953 // site. Verify by calling is_load_const_from_toc() before!!
5954 // Offset is +/- 2**32 -> use long.
5955 long MacroAssembler::get_load_const_from_toc_offset(address a) {
5956   assert(is_load_const_from_toc_pcrelative(a), "expected pc relative load");
5957   //  expected code sequence:
5958   //    z_lgrl(t, simm32);    len = 6
5959   unsigned long inst;
5960   unsigned int  len = get_instruction(a, &inst);
5961   return get_pcrel_offset(inst);
5962 }
5963 
5964 //**********************************************************************************
5965 //  inspection of generated instruction sequences for a particular pattern
5966 //**********************************************************************************
5967 
5968 bool MacroAssembler::is_load_const_from_toc_pcrelative(address a) {
5969 #ifdef ASSERT
5970   unsigned long inst;
5971   unsigned int  len = get_instruction(a+2, &inst);
5972   if ((len == 6) && is_load_pcrelative_long(a) && is_call_pcrelative_long(inst)) {
5973     const int range = 128;
5974     Assembler::dump_code_range(tty, a, range, "instr(a) == z_lgrl && instr(a+2) == z_brasl");
5975     VM_Version::z_SIGSEGV();
5976   }
5977 #endif
5978   // expected code sequence:
5979   //   z_lgrl(t, relAddr32);    len = 6
5980   //TODO: verify accessed data is in CP, if possible.
5981   return is_load_pcrelative_long(a);  // TODO: might be too general. Currently, only lgrl is used.
5982 }
5983 
5984 bool MacroAssembler::is_load_const_from_toc_call(address a) {
5985   return is_load_const_from_toc(a) && is_call_byregister(a + load_const_from_toc_size());
5986 }
5987 
5988 bool MacroAssembler::is_load_const_call(address a) {
5989   return is_load_const(a) && is_call_byregister(a + load_const_size());
5990 }
5991 
5992 //-------------------------------------------------
5993 //   Emitters for some really CICS instructions
5994 //-------------------------------------------------
5995 
5996 void MacroAssembler::move_long_ext(Register dst, Register src, unsigned int pad) {
5997   assert(dst->encoding()%2==0, "must be an even/odd register pair");
5998   assert(src->encoding()%2==0, "must be an even/odd register pair");
5999   assert(pad<256, "must be a padding BYTE");
6000 
6001   Label retry;
6002   bind(retry);
6003   Assembler::z_mvcle(dst, src, pad);
6004   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6005 }
6006 
6007 void MacroAssembler::compare_long_ext(Register left, Register right, unsigned int pad) {
6008   assert(left->encoding() % 2 == 0, "must be an even/odd register pair");
6009   assert(right->encoding() % 2 == 0, "must be an even/odd register pair");
6010   assert(pad<256, "must be a padding BYTE");
6011 
6012   Label retry;
6013   bind(retry);
6014   Assembler::z_clcle(left, right, pad, Z_R0);
6015   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6016 }
6017 
6018 void MacroAssembler::compare_long_uni(Register left, Register right, unsigned int pad) {
6019   assert(left->encoding() % 2 == 0, "must be an even/odd register pair");
6020   assert(right->encoding() % 2 == 0, "must be an even/odd register pair");
6021   assert(pad<=0xfff, "must be a padding HALFWORD");
6022   assert(VM_Version::has_ETF2(), "instruction must be available");
6023 
6024   Label retry;
6025   bind(retry);
6026   Assembler::z_clclu(left, right, pad, Z_R0);
6027   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6028 }
6029 
6030 void MacroAssembler::search_string(Register end, Register start) {
6031   assert(end->encoding() != 0, "end address must not be in R0");
6032   assert(start->encoding() != 0, "start address must not be in R0");
6033 
6034   Label retry;
6035   bind(retry);
6036   Assembler::z_srst(end, start);
6037   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6038 }
6039 
6040 void MacroAssembler::search_string_uni(Register end, Register start) {
6041   assert(end->encoding() != 0, "end address must not be in R0");
6042   assert(start->encoding() != 0, "start address must not be in R0");
6043   assert(VM_Version::has_ETF3(), "instruction must be available");
6044 
6045   Label retry;
6046   bind(retry);
6047   Assembler::z_srstu(end, start);
6048   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6049 }
6050 
6051 void MacroAssembler::kmac(Register srcBuff) {
6052   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
6053   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
6054 
6055   Label retry;
6056   bind(retry);
6057   Assembler::z_kmac(Z_R0, srcBuff);
6058   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6059 }
6060 
6061 void MacroAssembler::kimd(Register srcBuff) {
6062   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
6063   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
6064 
6065   Label retry;
6066   bind(retry);
6067   Assembler::z_kimd(Z_R0, srcBuff);
6068   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6069 }
6070 
6071 void MacroAssembler::klmd(Register srcBuff) {
6072   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
6073   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
6074 
6075   Label retry;
6076   bind(retry);
6077   Assembler::z_klmd(Z_R0, srcBuff);
6078   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6079 }
6080 
6081 void MacroAssembler::km(Register dstBuff, Register srcBuff) {
6082   // DstBuff and srcBuff are allowed to be the same register (encryption in-place).
6083   // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.
6084   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
6085   assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");
6086   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
6087 
6088   Label retry;
6089   bind(retry);
6090   Assembler::z_km(dstBuff, srcBuff);
6091   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6092 }
6093 
6094 void MacroAssembler::kmc(Register dstBuff, Register srcBuff) {
6095   // DstBuff and srcBuff are allowed to be the same register (encryption in-place).
6096   // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.
6097   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
6098   assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");
6099   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
6100 
6101   Label retry;
6102   bind(retry);
6103   Assembler::z_kmc(dstBuff, srcBuff);
6104   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6105 }
6106 
6107 void MacroAssembler::cksm(Register crcBuff, Register srcBuff) {
6108   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
6109 
6110   Label retry;
6111   bind(retry);
6112   Assembler::z_cksm(crcBuff, srcBuff);
6113   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6114 }
6115 
6116 void MacroAssembler::translate_oo(Register r1, Register r2, uint m3) {
6117   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
6118   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
6119 
6120   Label retry;
6121   bind(retry);
6122   Assembler::z_troo(r1, r2, m3);
6123   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6124 }
6125 
6126 void MacroAssembler::translate_ot(Register r1, Register r2, uint m3) {
6127   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
6128   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
6129 
6130   Label retry;
6131   bind(retry);
6132   Assembler::z_trot(r1, r2, m3);
6133   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6134 }
6135 
6136 void MacroAssembler::translate_to(Register r1, Register r2, uint m3) {
6137   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
6138   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
6139 
6140   Label retry;
6141   bind(retry);
6142   Assembler::z_trto(r1, r2, m3);
6143   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6144 }
6145 
6146 void MacroAssembler::translate_tt(Register r1, Register r2, uint m3) {
6147   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
6148   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
6149 
6150   Label retry;
6151   bind(retry);
6152   Assembler::z_trtt(r1, r2, m3);
6153   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6154 }
6155 
6156 
6157 void MacroAssembler::generate_type_profiling(const Register Rdata,
6158                                              const Register Rreceiver_klass,
6159                                              const Register Rwanted_receiver_klass,
6160                                              const Register Rmatching_row,
6161                                              bool is_virtual_call) {
6162   const int row_size = in_bytes(ReceiverTypeData::receiver_offset(1)) -
6163                        in_bytes(ReceiverTypeData::receiver_offset(0));
6164   const int num_rows = ReceiverTypeData::row_limit();
6165   NearLabel found_free_row;
6166   NearLabel do_increment;
6167   NearLabel found_no_slot;
6168 
6169   BLOCK_COMMENT("type profiling {");
6170 
6171   // search for:
6172   //    a) The type given in Rwanted_receiver_klass.
6173   //    b) The *first* empty row.
6174 
6175   // First search for a) only, just running over b) with no regard.
6176   // This is possible because
6177   //    wanted_receiver_class == receiver_class  &&  wanted_receiver_class == 0
6178   // is never true (receiver_class can't be zero).
6179   for (int row_num = 0; row_num < num_rows; row_num++) {
6180     // Row_offset should be a well-behaved positive number. The generated code relies
6181     // on that wrt constant code size. Add2reg can handle all row_offset values, but
6182     // will have to vary generated code size.
6183     int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num));
6184     assert(Displacement::is_shortDisp(row_offset), "Limitation of generated code");
6185 
6186     // Is Rwanted_receiver_klass in this row?
6187     if (VM_Version::has_CompareBranch()) {
6188       z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata);
6189       // Rmatching_row = Rdata + row_offset;
6190       add2reg(Rmatching_row, row_offset, Rdata);
6191       // if (*row_recv == (intptr_t) receiver_klass) goto fill_existing_slot;
6192       compare64_and_branch(Rwanted_receiver_klass, Rreceiver_klass, Assembler::bcondEqual, do_increment);
6193     } else {
6194       add2reg(Rmatching_row, row_offset, Rdata);
6195       z_cg(Rreceiver_klass, row_offset, Z_R0, Rdata);
6196       z_bre(do_increment);
6197     }
6198   }
6199 
6200   // Now that we did not find a match, let's search for b).
6201 
6202   // We could save the first calculation of Rmatching_row if we woud search for a) in reverse order.
6203   // We would then end up here with Rmatching_row containing the value for row_num == 0.
6204   // We would not see much benefit, if any at all, because the CPU can schedule
6205   // two instructions together with a branch anyway.
6206   for (int row_num = 0; row_num < num_rows; row_num++) {
6207     int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num));
6208 
6209     // Has this row a zero receiver_klass, i.e. is it empty?
6210     if (VM_Version::has_CompareBranch()) {
6211       z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata);
6212       // Rmatching_row = Rdata + row_offset
6213       add2reg(Rmatching_row, row_offset, Rdata);
6214       // if (*row_recv == (intptr_t) 0) goto found_free_row
6215       compare64_and_branch(Rwanted_receiver_klass, (intptr_t)0, Assembler::bcondEqual, found_free_row);
6216     } else {
6217       add2reg(Rmatching_row, row_offset, Rdata);
6218       load_and_test_long(Rwanted_receiver_klass, Address(Rdata, row_offset));
6219       z_bre(found_free_row);  // zero -> Found a free row.
6220     }
6221   }
6222 
6223   // No match, no empty row found.
6224   // Increment total counter to indicate polymorphic case.
6225   if (is_virtual_call) {
6226     add2mem_64(Address(Rdata, CounterData::count_offset()), 1, Rmatching_row);
6227   }
6228   z_bru(found_no_slot);
6229 
6230   // Here we found an empty row, but we have not found Rwanted_receiver_klass.
6231   // Rmatching_row holds the address to the first empty row.
6232   bind(found_free_row);
6233   // Store receiver_klass into empty slot.
6234   z_stg(Rreceiver_klass, 0, Z_R0, Rmatching_row);
6235 
6236   // Increment the counter of Rmatching_row.
6237   bind(do_increment);
6238   ByteSize counter_offset = ReceiverTypeData::receiver_count_offset(0) - ReceiverTypeData::receiver_offset(0);
6239   add2mem_64(Address(Rmatching_row, counter_offset), 1, Rdata);
6240 
6241   bind(found_no_slot);
6242 
6243   BLOCK_COMMENT("} type profiling");
6244 }
6245 
6246 //---------------------------------------
6247 // Helpers for Intrinsic Emitters
6248 //---------------------------------------
6249 
6250 /**
6251  * uint32_t crc;
6252  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
6253  */
6254 void MacroAssembler::fold_byte_crc32(Register crc, Register val, Register table, Register tmp) {
6255   assert_different_registers(crc, table, tmp);
6256   assert_different_registers(val, table);
6257   if (crc == val) {      // Must rotate first to use the unmodified value.
6258     rotate_then_insert(tmp, val, 56-2, 63-2, 2, true);  // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
6259     z_srl(crc, 8);       // Unsigned shift, clear leftmost 8 bits.
6260   } else {
6261     z_srl(crc, 8);       // Unsigned shift, clear leftmost 8 bits.
6262     rotate_then_insert(tmp, val, 56-2, 63-2, 2, true);  // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
6263   }
6264   z_x(crc, Address(table, tmp, 0));
6265 }
6266 
6267 /**
6268  * uint32_t crc;
6269  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
6270  */
6271 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
6272   fold_byte_crc32(crc, crc, table, tmp);
6273 }
6274 
6275 /**
6276  * Emits code to update CRC-32 with a byte value according to constants in table.
6277  *
6278  * @param [in,out]crc Register containing the crc.
6279  * @param [in]val     Register containing the byte to fold into the CRC.
6280  * @param [in]table   Register containing the table of crc constants.
6281  *
6282  * uint32_t crc;
6283  * val = crc_table[(val ^ crc) & 0xFF];
6284  * crc = val ^ (crc >> 8);
6285  */
6286 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
6287   z_xr(val, crc);
6288   fold_byte_crc32(crc, val, table, val);
6289 }
6290 
6291 
6292 /**
6293  * @param crc   register containing existing CRC (32-bit)
6294  * @param buf   register pointing to input byte buffer (byte*)
6295  * @param len   register containing number of bytes
6296  * @param table register pointing to CRC table
6297  */
6298 void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, Register data) {
6299   assert_different_registers(crc, buf, len, table, data);
6300 
6301   Label L_mainLoop, L_done;
6302   const int mainLoop_stepping = 1;
6303 
6304   // Process all bytes in a single-byte loop.
6305   z_ltr(len, len);
6306   z_brnh(L_done);
6307 
6308   bind(L_mainLoop);
6309     z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
6310     add2reg(buf, mainLoop_stepping);        // Advance buffer position.
6311     update_byte_crc32(crc, data, table);
6312     z_brct(len, L_mainLoop);                // Iterate.
6313 
6314   bind(L_done);
6315 }
6316 
6317 /**
6318  * Emits code to update CRC-32 with a 4-byte value according to constants in table.
6319  * Implementation according to jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.c.
6320  *
6321  */
6322 void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
6323                                         Register t0,  Register t1,  Register t2,    Register t3) {
6324   // This is what we implement (the DOBIG4 part):
6325   //
6326   // #define DOBIG4 c ^= *++buf4; \
6327   //         c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \
6328   //             crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]
6329   // #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4
6330   // Pre-calculate (constant) column offsets, use columns 4..7 for big-endian.
6331   const int ix0 = 4*(4*CRC32_COLUMN_SIZE);
6332   const int ix1 = 5*(4*CRC32_COLUMN_SIZE);
6333   const int ix2 = 6*(4*CRC32_COLUMN_SIZE);
6334   const int ix3 = 7*(4*CRC32_COLUMN_SIZE);
6335 
6336   // XOR crc with next four bytes of buffer.
6337   lgr_if_needed(t0, crc);
6338   z_x(t0, Address(buf, bufDisp));
6339   if (bufInc != 0) {
6340     add2reg(buf, bufInc);
6341   }
6342 
6343   // Chop crc into 4 single-byte pieces, shifted left 2 bits, to form the table indices.
6344   rotate_then_insert(t3, t0, 56-2, 63-2, 2,    true);  // ((c >>  0) & 0xff) << 2
6345   rotate_then_insert(t2, t0, 56-2, 63-2, 2-8,  true);  // ((c >>  8) & 0xff) << 2
6346   rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true);  // ((c >> 16) & 0xff) << 2
6347   rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true);  // ((c >> 24) & 0xff) << 2
6348 
6349   // XOR indexed table values to calculate updated crc.
6350   z_ly(t2, Address(table, t2, (intptr_t)ix1));
6351   z_ly(t0, Address(table, t0, (intptr_t)ix3));
6352   z_xy(t2, Address(table, t3, (intptr_t)ix0));
6353   z_xy(t0, Address(table, t1, (intptr_t)ix2));
6354   z_xr(t0, t2);           // Now t0 contains the updated CRC value.
6355   lgr_if_needed(crc, t0);
6356 }
6357 
6358 /**
6359  * @param crc   register containing existing CRC (32-bit)
6360  * @param buf   register pointing to input byte buffer (byte*)
6361  * @param len   register containing number of bytes
6362  * @param table register pointing to CRC table
6363  *
6364  * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
6365  */
6366 void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
6367                                         Register t0,  Register t1,  Register t2,  Register t3,
6368                                         bool invertCRC) {
6369   assert_different_registers(crc, buf, len, table);
6370 
6371   Label L_mainLoop, L_tail;
6372   Register  data = t0;
6373   Register  ctr  = Z_R0;
6374   const int mainLoop_stepping = 8;
6375   const int tailLoop_stepping = 1;
6376   const int log_stepping      = exact_log2(mainLoop_stepping);
6377 
6378   // Don't test for len <= 0 here. This pathological case should not occur anyway.
6379   // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
6380   // The situation itself is detected and handled correctly by the conditional branches
6381   // following aghi(len, -stepping) and aghi(len, +stepping).
6382 
6383   if (invertCRC) {
6384     not_(crc, noreg, false);           // 1s complement of crc
6385   }
6386 
6387 #if 0
6388   {
6389     // Pre-mainLoop alignment did not show any positive effect on performance.
6390     // We leave the code in for reference. Maybe the vector instructions in z13 depend on alignment.
6391 
6392     z_cghi(len, mainLoop_stepping);    // Alignment is useless for short data streams.
6393     z_brnh(L_tail);
6394 
6395     // Align buf to word (4-byte) boundary.
6396     z_lcr(ctr, buf);
6397     rotate_then_insert(ctr, ctr, 62, 63, 0, true); // TODO: should set cc
6398     z_sgfr(len, ctr);                  // Remaining len after alignment.
6399 
6400     update_byteLoop_crc32(crc, buf, ctr, table, data);
6401   }
6402 #endif
6403 
6404   // Check for short (<mainLoop_stepping bytes) buffer.
6405   z_srag(ctr, len, log_stepping);
6406   z_brnh(L_tail);
6407 
6408   z_lrvr(crc, crc);          // Revert byte order because we are dealing with big-endian data.
6409   rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop
6410 
6411   BIND(L_mainLoop);
6412     update_1word_crc32(crc, buf, table, 0, 0, crc, t1, t2, t3);
6413     update_1word_crc32(crc, buf, table, 4, mainLoop_stepping, crc, t1, t2, t3);
6414     z_brct(ctr, L_mainLoop); // Iterate.
6415 
6416   z_lrvr(crc, crc);          // Revert byte order back to original.
6417 
6418   // Process last few (<8) bytes of buffer.
6419   BIND(L_tail);
6420   update_byteLoop_crc32(crc, buf, len, table, data);
6421 
6422   if (invertCRC) {
6423     not_(crc, noreg, false);           // 1s complement of crc
6424   }
6425 }
6426 
6427 /**
6428  * @param crc   register containing existing CRC (32-bit)
6429  * @param buf   register pointing to input byte buffer (byte*)
6430  * @param len   register containing number of bytes
6431  * @param table register pointing to CRC table
6432  *
6433  * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
6434  */
6435 void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
6436                                         Register t0,  Register t1,  Register t2,  Register t3,
6437                                         bool invertCRC) {
6438   assert_different_registers(crc, buf, len, table);
6439 
6440   Label L_mainLoop, L_tail;
6441   Register  data = t0;
6442   Register  ctr  = Z_R0;
6443   const int mainLoop_stepping = 4;
6444   const int log_stepping      = exact_log2(mainLoop_stepping);
6445 
6446   // Don't test for len <= 0 here. This pathological case should not occur anyway.
6447   // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
6448   // The situation itself is detected and handled correctly by the conditional branches
6449   // following aghi(len, -stepping) and aghi(len, +stepping).
6450 
6451   if (invertCRC) {
6452     not_(crc, noreg, false);           // 1s complement of crc
6453   }
6454 
6455   // Check for short (<4 bytes) buffer.
6456   z_srag(ctr, len, log_stepping);
6457   z_brnh(L_tail);
6458 
6459   z_lrvr(crc, crc);          // Revert byte order because we are dealing with big-endian data.
6460   rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop
6461 
6462   BIND(L_mainLoop);
6463     update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3);
6464     z_brct(ctr, L_mainLoop); // Iterate.
6465 
6466   z_lrvr(crc, crc);          // Revert byte order back to original.
6467 
6468   // Process last few (<8) bytes of buffer.
6469   BIND(L_tail);
6470   update_byteLoop_crc32(crc, buf, len, table, data);
6471 
6472   if (invertCRC) {
6473     not_(crc, noreg, false);           // 1s complement of crc
6474   }
6475 }
6476 
6477 /**
6478  * @param crc   register containing existing CRC (32-bit)
6479  * @param buf   register pointing to input byte buffer (byte*)
6480  * @param len   register containing number of bytes
6481  * @param table register pointing to CRC table
6482  */
6483 void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
6484                                         Register t0,  Register t1,  Register t2,  Register t3,
6485                                         bool invertCRC) {
6486   assert_different_registers(crc, buf, len, table);
6487   Register data = t0;
6488 
6489   if (invertCRC) {
6490     not_(crc, noreg, false);           // 1s complement of crc
6491   }
6492 
6493   update_byteLoop_crc32(crc, buf, len, table, data);
6494 
6495   if (invertCRC) {
6496     not_(crc, noreg, false);           // 1s complement of crc
6497   }
6498 }
6499 
6500 void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
6501                                              bool invertCRC) {
6502   assert_different_registers(crc, buf, len, table, tmp);
6503 
6504   if (invertCRC) {
6505     not_(crc, noreg, false);           // 1s complement of crc
6506   }
6507 
6508   z_llgc(tmp, Address(buf, (intptr_t)0));  // Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
6509   update_byte_crc32(crc, tmp, table);
6510 
6511   if (invertCRC) {
6512     not_(crc, noreg, false);           // 1s complement of crc
6513   }
6514 }
6515 
6516 void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table,
6517                                                 bool invertCRC) {
6518   assert_different_registers(crc, val, table);
6519 
6520   if (invertCRC) {
6521     not_(crc, noreg, false);           // 1s complement of crc
6522   }
6523 
6524   update_byte_crc32(crc, val, table);
6525 
6526   if (invertCRC) {
6527     not_(crc, noreg, false);           // 1s complement of crc
6528   }
6529 }
6530 
6531 //
6532 // Code for BigInteger::multiplyToLen() intrinsic.
6533 //
6534 
6535 // dest_lo += src1 + src2
6536 // dest_hi += carry1 + carry2
6537 // Z_R7 is destroyed !
6538 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo,
6539                                      Register src1, Register src2) {
6540   clear_reg(Z_R7);
6541   z_algr(dest_lo, src1);
6542   z_alcgr(dest_hi, Z_R7);
6543   z_algr(dest_lo, src2);
6544   z_alcgr(dest_hi, Z_R7);
6545 }
6546 
6547 // Multiply 64 bit by 64 bit first loop.
6548 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart,
6549                                            Register x_xstart,
6550                                            Register y, Register y_idx,
6551                                            Register z,
6552                                            Register carry,
6553                                            Register product,
6554                                            Register idx, Register kdx) {
6555   // jlong carry, x[], y[], z[];
6556   // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) {
6557   //   huge_128 product = y[idx] * x[xstart] + carry;
6558   //   z[kdx] = (jlong)product;
6559   //   carry  = (jlong)(product >>> 64);
6560   // }
6561   // z[xstart] = carry;
6562 
6563   Label L_first_loop, L_first_loop_exit;
6564   Label L_one_x, L_one_y, L_multiply;
6565 
6566   z_aghi(xstart, -1);
6567   z_brl(L_one_x);   // Special case: length of x is 1.
6568 
6569   // Load next two integers of x.
6570   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
6571   mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0));
6572 
6573 
6574   bind(L_first_loop);
6575 
6576   z_aghi(idx, -1);
6577   z_brl(L_first_loop_exit);
6578   z_aghi(idx, -1);
6579   z_brl(L_one_y);
6580 
6581   // Load next two integers of y.
6582   z_sllg(Z_R1_scratch, idx, LogBytesPerInt);
6583   mem2reg_opt(y_idx, Address(y, Z_R1_scratch, 0));
6584 
6585 
6586   bind(L_multiply);
6587 
6588   Register multiplicand = product->successor();
6589   Register product_low = multiplicand;
6590 
6591   lgr_if_needed(multiplicand, x_xstart);
6592   z_mlgr(product, y_idx);     // multiplicand * y_idx -> product::multiplicand
6593   clear_reg(Z_R7);
6594   z_algr(product_low, carry); // Add carry to result.
6595   z_alcgr(product, Z_R7);     // Add carry of the last addition.
6596   add2reg(kdx, -2);
6597 
6598   // Store result.
6599   z_sllg(Z_R7, kdx, LogBytesPerInt);
6600   reg2mem_opt(product_low, Address(z, Z_R7, 0));
6601   lgr_if_needed(carry, product);
6602   z_bru(L_first_loop);
6603 
6604 
6605   bind(L_one_y); // Load one 32 bit portion of y as (0,value).
6606 
6607   clear_reg(y_idx);
6608   mem2reg_opt(y_idx, Address(y, (intptr_t) 0), false);
6609   z_bru(L_multiply);
6610 
6611 
6612   bind(L_one_x); // Load one 32 bit portion of x as (0,value).
6613 
6614   clear_reg(x_xstart);
6615   mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false);
6616   z_bru(L_first_loop);
6617 
6618   bind(L_first_loop_exit);
6619 }
6620 
6621 // Multiply 64 bit by 64 bit and add 128 bit.
6622 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y,
6623                                             Register z,
6624                                             Register yz_idx, Register idx,
6625                                             Register carry, Register product,
6626                                             int offset) {
6627   // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
6628   // z[kdx] = (jlong)product;
6629 
6630   Register multiplicand = product->successor();
6631   Register product_low = multiplicand;
6632 
6633   z_sllg(Z_R7, idx, LogBytesPerInt);
6634   mem2reg_opt(yz_idx, Address(y, Z_R7, offset));
6635 
6636   lgr_if_needed(multiplicand, x_xstart);
6637   z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand
6638   mem2reg_opt(yz_idx, Address(z, Z_R7, offset));
6639 
6640   add2_with_carry(product, product_low, carry, yz_idx);
6641 
6642   z_sllg(Z_R7, idx, LogBytesPerInt);
6643   reg2mem_opt(product_low, Address(z, Z_R7, offset));
6644 
6645 }
6646 
6647 // Multiply 128 bit by 128 bit. Unrolled inner loop.
6648 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart,
6649                                              Register y, Register z,
6650                                              Register yz_idx, Register idx,
6651                                              Register jdx,
6652                                              Register carry, Register product,
6653                                              Register carry2) {
6654   // jlong carry, x[], y[], z[];
6655   // int kdx = ystart+1;
6656   // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
6657   //   huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
6658   //   z[kdx+idx+1] = (jlong)product;
6659   //   jlong carry2 = (jlong)(product >>> 64);
6660   //   product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
6661   //   z[kdx+idx] = (jlong)product;
6662   //   carry = (jlong)(product >>> 64);
6663   // }
6664   // idx += 2;
6665   // if (idx > 0) {
6666   //   product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
6667   //   z[kdx+idx] = (jlong)product;
6668   //   carry = (jlong)(product >>> 64);
6669   // }
6670 
6671   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
6672 
6673   // scale the index
6674   lgr_if_needed(jdx, idx);
6675   and_imm(jdx, 0xfffffffffffffffcL);
6676   rshift(jdx, 2);
6677 
6678 
6679   bind(L_third_loop);
6680 
6681   z_aghi(jdx, -1);
6682   z_brl(L_third_loop_exit);
6683   add2reg(idx, -4);
6684 
6685   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
6686   lgr_if_needed(carry2, product);
6687 
6688   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
6689   lgr_if_needed(carry, product);
6690   z_bru(L_third_loop);
6691 
6692 
6693   bind(L_third_loop_exit);  // Handle any left-over operand parts.
6694 
6695   and_imm(idx, 0x3);
6696   z_brz(L_post_third_loop_done);
6697 
6698   Label L_check_1;
6699 
6700   z_aghi(idx, -2);
6701   z_brl(L_check_1);
6702 
6703   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
6704   lgr_if_needed(carry, product);
6705 
6706 
6707   bind(L_check_1);
6708 
6709   add2reg(idx, 0x2);
6710   and_imm(idx, 0x1);
6711   z_aghi(idx, -1);
6712   z_brl(L_post_third_loop_done);
6713 
6714   Register   multiplicand = product->successor();
6715   Register   product_low = multiplicand;
6716 
6717   z_sllg(Z_R7, idx, LogBytesPerInt);
6718   clear_reg(yz_idx);
6719   mem2reg_opt(yz_idx, Address(y, Z_R7, 0), false);
6720   lgr_if_needed(multiplicand, x_xstart);
6721   z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand
6722   clear_reg(yz_idx);
6723   mem2reg_opt(yz_idx, Address(z, Z_R7, 0), false);
6724 
6725   add2_with_carry(product, product_low, yz_idx, carry);
6726 
6727   z_sllg(Z_R7, idx, LogBytesPerInt);
6728   reg2mem_opt(product_low, Address(z, Z_R7, 0), false);
6729   rshift(product_low, 32);
6730 
6731   lshift(product, 32);
6732   z_ogr(product_low, product);
6733   lgr_if_needed(carry, product_low);
6734 
6735   bind(L_post_third_loop_done);
6736 }
6737 
6738 void MacroAssembler::multiply_to_len(Register x, Register xlen,
6739                                      Register y, Register ylen,
6740                                      Register z,
6741                                      Register tmp1, Register tmp2,
6742                                      Register tmp3, Register tmp4,
6743                                      Register tmp5) {
6744   ShortBranchVerifier sbv(this);
6745 
6746   assert_different_registers(x, xlen, y, ylen, z,
6747                              tmp1, tmp2, tmp3, tmp4, tmp5, Z_R1_scratch, Z_R7);
6748   assert_different_registers(x, xlen, y, ylen, z,
6749                              tmp1, tmp2, tmp3, tmp4, tmp5, Z_R8);
6750 
6751   z_stmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);
6752 
6753   // In openJdk, we store the argument as 32-bit value to slot.
6754   Address zlen(Z_SP, _z_abi(remaining_cargs));  // Int in long on big endian.
6755 
6756   const Register idx = tmp1;
6757   const Register kdx = tmp2;
6758   const Register xstart = tmp3;
6759 
6760   const Register y_idx = tmp4;
6761   const Register carry = tmp5;
6762   const Register product  = Z_R0_scratch;
6763   const Register x_xstart = Z_R8;
6764 
6765   // First Loop.
6766   //
6767   //   final static long LONG_MASK = 0xffffffffL;
6768   //   int xstart = xlen - 1;
6769   //   int ystart = ylen - 1;
6770   //   long carry = 0;
6771   //   for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
6772   //     long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
6773   //     z[kdx] = (int)product;
6774   //     carry = product >>> 32;
6775   //   }
6776   //   z[xstart] = (int)carry;
6777   //
6778 
6779   lgr_if_needed(idx, ylen);  // idx = ylen
6780   z_llgf(kdx, zlen);         // C2 does not respect int to long conversion for stub calls, thus load zero-extended.
6781   clear_reg(carry);          // carry = 0
6782 
6783   Label L_done;
6784 
6785   lgr_if_needed(xstart, xlen);
6786   z_aghi(xstart, -1);
6787   z_brl(L_done);
6788 
6789   multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
6790 
6791   NearLabel L_second_loop;
6792   compare64_and_branch(kdx, RegisterOrConstant((intptr_t) 0), bcondEqual, L_second_loop);
6793 
6794   NearLabel L_carry;
6795   z_aghi(kdx, -1);
6796   z_brz(L_carry);
6797 
6798   // Store lower 32 bits of carry.
6799   z_sllg(Z_R1_scratch, kdx, LogBytesPerInt);
6800   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
6801   rshift(carry, 32);
6802   z_aghi(kdx, -1);
6803 
6804 
6805   bind(L_carry);
6806 
6807   // Store upper 32 bits of carry.
6808   z_sllg(Z_R1_scratch, kdx, LogBytesPerInt);
6809   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
6810 
6811   // Second and third (nested) loops.
6812   //
6813   // for (int i = xstart-1; i >= 0; i--) { // Second loop
6814   //   carry = 0;
6815   //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
6816   //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
6817   //                    (z[k] & LONG_MASK) + carry;
6818   //     z[k] = (int)product;
6819   //     carry = product >>> 32;
6820   //   }
6821   //   z[i] = (int)carry;
6822   // }
6823   //
6824   // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
6825 
6826   const Register jdx = tmp1;
6827 
6828   bind(L_second_loop);
6829 
6830   clear_reg(carry);           // carry = 0;
6831   lgr_if_needed(jdx, ylen);   // j = ystart+1
6832 
6833   z_aghi(xstart, -1);         // i = xstart-1;
6834   z_brl(L_done);
6835 
6836   // Use free slots in the current stackframe instead of push/pop.
6837   Address zsave(Z_SP, _z_abi(carg_1));
6838   reg2mem_opt(z, zsave);
6839 
6840 
6841   Label L_last_x;
6842 
6843   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
6844   load_address(z, Address(z, Z_R1_scratch, 4)); // z = z + k - j
6845   z_aghi(xstart, -1);                           // i = xstart-1;
6846   z_brl(L_last_x);
6847 
6848   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
6849   mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0));
6850 
6851 
6852   Label L_third_loop_prologue;
6853 
6854   bind(L_third_loop_prologue);
6855 
6856   Address xsave(Z_SP, _z_abi(carg_2));
6857   Address xlensave(Z_SP, _z_abi(carg_3));
6858   Address ylensave(Z_SP, _z_abi(carg_4));
6859 
6860   reg2mem_opt(x, xsave);
6861   reg2mem_opt(xstart, xlensave);
6862   reg2mem_opt(ylen, ylensave);
6863 
6864 
6865   multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
6866 
6867   mem2reg_opt(z, zsave);
6868   mem2reg_opt(x, xsave);
6869   mem2reg_opt(xlen, xlensave);   // This is the decrement of the loop counter!
6870   mem2reg_opt(ylen, ylensave);
6871 
6872   add2reg(tmp3, 1, xlen);
6873   z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt);
6874   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
6875   z_aghi(tmp3, -1);
6876   z_brl(L_done);
6877 
6878   rshift(carry, 32);
6879   z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt);
6880   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
6881   z_bru(L_second_loop);
6882 
6883   // Next infrequent code is moved outside loops.
6884   bind(L_last_x);
6885 
6886   clear_reg(x_xstart);
6887   mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false);
6888   z_bru(L_third_loop_prologue);
6889 
6890   bind(L_done);
6891 
6892   z_lmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);
6893 }
6894 
6895 #ifndef PRODUCT
6896 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false).
6897 void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) {
6898   Label ok;
6899   if (check_equal) {
6900     z_bre(ok);
6901   } else {
6902     z_brne(ok);
6903   }
6904   stop(msg, id);
6905   bind(ok);
6906 }
6907 
6908 // Assert if CC indicates "low".
6909 void MacroAssembler::asm_assert_low(const char *msg, int id) {
6910   Label ok;
6911   z_brnl(ok);
6912   stop(msg, id);
6913   bind(ok);
6914 }
6915 
6916 // Assert if CC indicates "high".
6917 void MacroAssembler::asm_assert_high(const char *msg, int id) {
6918   Label ok;
6919   z_brnh(ok);
6920   stop(msg, id);
6921   bind(ok);
6922 }
6923 
6924 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false)
6925 // generate non-relocatable code.
6926 void MacroAssembler::asm_assert_static(bool check_equal, const char *msg, int id) {
6927   Label ok;
6928   if (check_equal) { z_bre(ok); }
6929   else             { z_brne(ok); }
6930   stop_static(msg, id);
6931   bind(ok);
6932 }
6933 
6934 void MacroAssembler::asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset,
6935                                           Register mem_base, const char* msg, int id) {
6936   switch (size) {
6937     case 4:
6938       load_and_test_int(Z_R0, Address(mem_base, mem_offset));
6939       break;
6940     case 8:
6941       load_and_test_long(Z_R0,  Address(mem_base, mem_offset));
6942       break;
6943     default:
6944       ShouldNotReachHere();
6945   }
6946   if (allow_relocation) { asm_assert(check_equal, msg, id); }
6947   else                  { asm_assert_static(check_equal, msg, id); }
6948 }
6949 
6950 // Check the condition
6951 //   expected_size == FP - SP
6952 // after transformation:
6953 //   expected_size - FP + SP == 0
6954 // Destroys Register expected_size if no tmp register is passed.
6955 void MacroAssembler::asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) {
6956   if (tmp == noreg) {
6957     tmp = expected_size;
6958   } else {
6959     if (tmp != expected_size) {
6960       z_lgr(tmp, expected_size);
6961     }
6962     z_algr(tmp, Z_SP);
6963     z_slg(tmp, 0, Z_R0, Z_SP);
6964     asm_assert_eq(msg, id);
6965   }
6966 }
6967 #endif // !PRODUCT
6968 
6969 void MacroAssembler::verify_thread() {
6970   if (VerifyThread) {
6971     unimplemented("", 117);
6972   }
6973 }
6974 
6975 // Plausibility check for oops.
6976 void MacroAssembler::verify_oop(Register oop, const char* msg) {
6977   if (!VerifyOops) return;
6978 
6979   BLOCK_COMMENT("verify_oop {");
6980   Register tmp = Z_R0;
6981   unsigned int nbytes_save = 5*BytesPerWord;
6982   address entry = StubRoutines::verify_oop_subroutine_entry_address();
6983 
6984   save_return_pc();
6985   push_frame_abi160(nbytes_save);
6986   z_stmg(Z_R1, Z_R5, frame::z_abi_160_size, Z_SP);
6987 
6988   z_lgr(Z_ARG2, oop);
6989   load_const(Z_ARG1, (address) msg);
6990   load_const(Z_R1, entry);
6991   z_lg(Z_R1, 0, Z_R1);
6992   call_c(Z_R1);
6993 
6994   z_lmg(Z_R1, Z_R5, frame::z_abi_160_size, Z_SP);
6995   pop_frame();
6996   restore_return_pc();
6997 
6998   BLOCK_COMMENT("} verify_oop ");
6999 }
7000 
7001 const char* MacroAssembler::stop_types[] = {
7002   "stop",
7003   "untested",
7004   "unimplemented",
7005   "shouldnotreachhere"
7006 };
7007 
7008 static void stop_on_request(const char* tp, const char* msg) {
7009   tty->print("Z assembly code requires stop: (%s) %s\n", tp, msg);
7010   guarantee(false, "Z assembly code requires stop: %s", msg);
7011 }
7012 
7013 void MacroAssembler::stop(int type, const char* msg, int id) {
7014   BLOCK_COMMENT(err_msg("stop: %s {", msg));
7015 
7016   // Setup arguments.
7017   load_const(Z_ARG1, (void*) stop_types[type%stop_end]);
7018   load_const(Z_ARG2, (void*) msg);
7019   get_PC(Z_R14);     // Following code pushes a frame without entering a new function. Use current pc as return address.
7020   save_return_pc();  // Saves return pc Z_R14.
7021   push_frame_abi160(0);
7022   call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
7023   // The plain disassembler does not recognize illtrap. It instead displays
7024   // a 32-bit value. Issueing two illtraps assures the disassembler finds
7025   // the proper beginning of the next instruction.
7026   z_illtrap(); // Illegal instruction.
7027   z_illtrap(); // Illegal instruction.
7028 
7029   BLOCK_COMMENT(" } stop");
7030 }
7031 
7032 // Special version of stop() for code size reduction.
7033 // Reuses the previously generated call sequence, if any.
7034 // Generates the call sequence on its own, if necessary.
7035 // Note: This code will work only in non-relocatable code!
7036 //       The relative address of the data elements (arg1, arg2) must not change.
7037 //       The reentry point must not move relative to it's users. This prerequisite
7038 //       should be given for "hand-written" code, if all chain calls are in the same code blob.
7039 //       Generated code must not undergo any transformation, e.g. ShortenBranches, to be safe.
7040 address MacroAssembler::stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation) {
7041   BLOCK_COMMENT(err_msg("stop_chain(%s,%s): %s {", reentry==NULL?"init":"cont", allow_relocation?"reloc ":"static", msg));
7042 
7043   // Setup arguments.
7044   if (allow_relocation) {
7045     // Relocatable version (for comparison purposes). Remove after some time.
7046     load_const(Z_ARG1, (void*) stop_types[type%stop_end]);
7047     load_const(Z_ARG2, (void*) msg);
7048   } else {
7049     load_absolute_address(Z_ARG1, (address)stop_types[type%stop_end]);
7050     load_absolute_address(Z_ARG2, (address)msg);
7051   }
7052   if ((reentry != NULL) && RelAddr::is_in_range_of_RelAddr16(reentry, pc())) {
7053     BLOCK_COMMENT("branch to reentry point:");
7054     z_brc(bcondAlways, reentry);
7055   } else {
7056     BLOCK_COMMENT("reentry point:");
7057     reentry = pc();      // Re-entry point for subsequent stop calls.
7058     save_return_pc();    // Saves return pc Z_R14.
7059     push_frame_abi160(0);
7060     if (allow_relocation) {
7061       reentry = NULL;    // Prevent reentry if code relocation is allowed.
7062       call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
7063     } else {
7064       call_VM_leaf_static(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
7065     }
7066     z_illtrap(); // Illegal instruction as emergency stop, should the above call return.
7067   }
7068   BLOCK_COMMENT(" } stop_chain");
7069 
7070   return reentry;
7071 }
7072 
7073 // Special version of stop() for code size reduction.
7074 // Assumes constant relative addresses for data and runtime call.
7075 void MacroAssembler::stop_static(int type, const char* msg, int id) {
7076   stop_chain(NULL, type, msg, id, false);
7077 }
7078 
7079 void MacroAssembler::stop_subroutine() {
7080   unimplemented("stop_subroutine", 710);
7081 }
7082 
7083 // Prints msg to stdout from within generated code..
7084 void MacroAssembler::warn(const char* msg) {
7085   RegisterSaver::save_live_registers(this, RegisterSaver::all_registers, Z_R14);
7086   load_absolute_address(Z_R1, (address) warning);
7087   load_absolute_address(Z_ARG1, (address) msg);
7088   (void) call(Z_R1);
7089   RegisterSaver::restore_live_registers(this, RegisterSaver::all_registers);
7090 }
7091 
7092 #ifndef PRODUCT
7093 
7094 // Write pattern 0x0101010101010101 in region [low-before, high+after].
7095 void MacroAssembler::zap_from_to(Register low, Register high, Register val, Register addr, int before, int after) {
7096   if (!ZapEmptyStackFields) return;
7097   BLOCK_COMMENT("zap memory region {");
7098   load_const_optimized(val, 0x0101010101010101);
7099   int size = before + after;
7100   if (low == high && size < 5 && size > 0) {
7101     int offset = -before*BytesPerWord;
7102     for (int i = 0; i < size; ++i) {
7103       z_stg(val, Address(low, offset));
7104       offset +=(1*BytesPerWord);
7105     }
7106   } else {
7107     add2reg(addr, -before*BytesPerWord, low);
7108     if (after) {
7109 #ifdef ASSERT
7110       jlong check = after * BytesPerWord;
7111       assert(Immediate::is_simm32(check) && Immediate::is_simm32(-check), "value not encodable !");
7112 #endif
7113       add2reg(high, after * BytesPerWord);
7114     }
7115     NearLabel loop;
7116     bind(loop);
7117     z_stg(val, Address(addr));
7118     add2reg(addr, 8);
7119     compare64_and_branch(addr, high, bcondNotHigh, loop);
7120     if (after) {
7121       add2reg(high, -after * BytesPerWord);
7122     }
7123   }
7124   BLOCK_COMMENT("} zap memory region");
7125 }
7126 #endif // !PRODUCT
7127 
7128 SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value, Register _rscratch) {
7129   _masm = masm;
7130   _masm->load_absolute_address(_rscratch, (address)flag_addr);
7131   _masm->load_and_test_int(_rscratch, Address(_rscratch));
7132   if (value) {
7133     _masm->z_brne(_label); // Skip if true, i.e. != 0.
7134   } else {
7135     _masm->z_bre(_label);  // Skip if false, i.e. == 0.
7136   }
7137 }
7138 
7139 SkipIfEqual::~SkipIfEqual() {
7140   _masm->bind(_label);
7141 }