1 /*
   2  * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2016, 2019, SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/codeBuffer.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "compiler/disassembler.hpp"
  30 #include "gc/shared/barrierSet.hpp"
  31 #include "gc/shared/barrierSetAssembler.hpp"
  32 #include "gc/shared/collectedHeap.inline.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "gc/shared/cardTableBarrierSet.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/universe.hpp"
  37 #include "oops/accessDecorators.hpp"
  38 #include "oops/compressedOops.inline.hpp"
  39 #include "oops/klass.inline.hpp"
  40 #ifdef COMPILER2
  41 #include "opto/compile.hpp"
  42 #include "opto/intrinsicnode.hpp"
  43 #include "opto/matcher.hpp"
  44 #endif
  45 #include "prims/methodHandles.hpp"
  46 #include "registerSaver_s390.hpp"
  47 #include "runtime/biasedLocking.hpp"
  48 #include "runtime/icache.hpp"
  49 #include "runtime/interfaceSupport.inline.hpp"
  50 #include "runtime/objectMonitor.hpp"
  51 #include "runtime/os.hpp"
  52 #include "runtime/safepoint.hpp"
  53 #include "runtime/safepointMechanism.hpp"
  54 #include "runtime/sharedRuntime.hpp"
  55 #include "runtime/stubRoutines.hpp"
  56 #include "utilities/events.hpp"
  57 #include "utilities/macros.hpp"
  58 
  59 #include <ucontext.h>
  60 
  61 #define BLOCK_COMMENT(str) block_comment(str)
  62 #define BIND(label)        bind(label); BLOCK_COMMENT(#label ":")
  63 
  64 // Move 32-bit register if destination and source are different.
  65 void MacroAssembler::lr_if_needed(Register rd, Register rs) {
  66   if (rs != rd) { z_lr(rd, rs); }
  67 }
  68 
  69 // Move register if destination and source are different.
  70 void MacroAssembler::lgr_if_needed(Register rd, Register rs) {
  71   if (rs != rd) { z_lgr(rd, rs); }
  72 }
  73 
  74 // Zero-extend 32-bit register into 64-bit register if destination and source are different.
  75 void MacroAssembler::llgfr_if_needed(Register rd, Register rs) {
  76   if (rs != rd) { z_llgfr(rd, rs); }
  77 }
  78 
  79 // Move float register if destination and source are different.
  80 void MacroAssembler::ldr_if_needed(FloatRegister rd, FloatRegister rs) {
  81   if (rs != rd) { z_ldr(rd, rs); }
  82 }
  83 
  84 // Move integer register if destination and source are different.
  85 // It is assumed that shorter-than-int types are already
  86 // appropriately sign-extended.
  87 void MacroAssembler::move_reg_if_needed(Register dst, BasicType dst_type, Register src,
  88                                         BasicType src_type) {
  89   assert((dst_type != T_FLOAT) && (dst_type != T_DOUBLE), "use move_freg for float types");
  90   assert((src_type != T_FLOAT) && (src_type != T_DOUBLE), "use move_freg for float types");
  91 
  92   if (dst_type == src_type) {
  93     lgr_if_needed(dst, src); // Just move all 64 bits.
  94     return;
  95   }
  96 
  97   switch (dst_type) {
  98     // Do not support these types for now.
  99     //  case T_BOOLEAN:
 100     case T_BYTE:  // signed byte
 101       switch (src_type) {
 102         case T_INT:
 103           z_lgbr(dst, src);
 104           break;
 105         default:
 106           ShouldNotReachHere();
 107       }
 108       return;
 109 
 110     case T_CHAR:
 111     case T_SHORT:
 112       switch (src_type) {
 113         case T_INT:
 114           if (dst_type == T_CHAR) {
 115             z_llghr(dst, src);
 116           } else {
 117             z_lghr(dst, src);
 118           }
 119           break;
 120         default:
 121           ShouldNotReachHere();
 122       }
 123       return;
 124 
 125     case T_INT:
 126       switch (src_type) {
 127         case T_BOOLEAN:
 128         case T_BYTE:
 129         case T_CHAR:
 130         case T_SHORT:
 131         case T_INT:
 132         case T_LONG:
 133         case T_OBJECT:
 134         case T_ARRAY:
 135         case T_VOID:
 136         case T_ADDRESS:
 137           lr_if_needed(dst, src);
 138           // llgfr_if_needed(dst, src);  // zero-extend (in case we need to find a bug).
 139           return;
 140 
 141         default:
 142           assert(false, "non-integer src type");
 143           return;
 144       }
 145     case T_LONG:
 146       switch (src_type) {
 147         case T_BOOLEAN:
 148         case T_BYTE:
 149         case T_CHAR:
 150         case T_SHORT:
 151         case T_INT:
 152           z_lgfr(dst, src); // sign extension
 153           return;
 154 
 155         case T_LONG:
 156         case T_OBJECT:
 157         case T_ARRAY:
 158         case T_VOID:
 159         case T_ADDRESS:
 160           lgr_if_needed(dst, src);
 161           return;
 162 
 163         default:
 164           assert(false, "non-integer src type");
 165           return;
 166       }
 167       return;
 168     case T_OBJECT:
 169     case T_ARRAY:
 170     case T_VOID:
 171     case T_ADDRESS:
 172       switch (src_type) {
 173         // These types don't make sense to be converted to pointers:
 174         //      case T_BOOLEAN:
 175         //      case T_BYTE:
 176         //      case T_CHAR:
 177         //      case T_SHORT:
 178 
 179         case T_INT:
 180           z_llgfr(dst, src); // zero extension
 181           return;
 182 
 183         case T_LONG:
 184         case T_OBJECT:
 185         case T_ARRAY:
 186         case T_VOID:
 187         case T_ADDRESS:
 188           lgr_if_needed(dst, src);
 189           return;
 190 
 191         default:
 192           assert(false, "non-integer src type");
 193           return;
 194       }
 195       return;
 196     default:
 197       assert(false, "non-integer dst type");
 198       return;
 199   }
 200 }
 201 
 202 // Move float register if destination and source are different.
 203 void MacroAssembler::move_freg_if_needed(FloatRegister dst, BasicType dst_type,
 204                                          FloatRegister src, BasicType src_type) {
 205   assert((dst_type == T_FLOAT) || (dst_type == T_DOUBLE), "use move_reg for int types");
 206   assert((src_type == T_FLOAT) || (src_type == T_DOUBLE), "use move_reg for int types");
 207   if (dst_type == src_type) {
 208     ldr_if_needed(dst, src); // Just move all 64 bits.
 209   } else {
 210     switch (dst_type) {
 211       case T_FLOAT:
 212         assert(src_type == T_DOUBLE, "invalid float type combination");
 213         z_ledbr(dst, src);
 214         return;
 215       case T_DOUBLE:
 216         assert(src_type == T_FLOAT, "invalid float type combination");
 217         z_ldebr(dst, src);
 218         return;
 219       default:
 220         assert(false, "non-float dst type");
 221         return;
 222     }
 223   }
 224 }
 225 
 226 // Optimized emitter for reg to mem operations.
 227 // Uses modern instructions if running on modern hardware, classic instructions
 228 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 229 // Data register (reg) cannot be used as work register.
 230 //
 231 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default).
 232 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs!
 233 void MacroAssembler::freg2mem_opt(FloatRegister reg,
 234                                   int64_t       disp,
 235                                   Register      index,
 236                                   Register      base,
 237                                   void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
 238                                   void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
 239                                   Register      scratch) {
 240   index = (index == noreg) ? Z_R0 : index;
 241   if (Displacement::is_shortDisp(disp)) {
 242     (this->*classic)(reg, disp, index, base);
 243   } else {
 244     if (Displacement::is_validDisp(disp)) {
 245       (this->*modern)(reg, disp, index, base);
 246     } else {
 247       if (scratch != Z_R0 && scratch != Z_R1) {
 248         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 249       } else {
 250         if (scratch != Z_R0) {   // scratch == Z_R1
 251           if ((scratch == index) || (index == base)) {
 252             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 253           } else {
 254             add2reg(scratch, disp, base);
 255             (this->*classic)(reg, 0, index, scratch);
 256             if (base == scratch) {
 257               add2reg(base, -disp);  // Restore base.
 258             }
 259           }
 260         } else {   // scratch == Z_R0
 261           z_lgr(scratch, base);
 262           add2reg(base, disp);
 263           (this->*classic)(reg, 0, index, base);
 264           z_lgr(base, scratch);      // Restore base.
 265         }
 266       }
 267     }
 268   }
 269 }
 270 
 271 void MacroAssembler::freg2mem_opt(FloatRegister reg, const Address &a, bool is_double) {
 272   if (is_double) {
 273     freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stdy), CLASSIC_FFUN(z_std));
 274   } else {
 275     freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stey), CLASSIC_FFUN(z_ste));
 276   }
 277 }
 278 
 279 // Optimized emitter for mem to reg operations.
 280 // Uses modern instructions if running on modern hardware, classic instructions
 281 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 282 // data register (reg) cannot be used as work register.
 283 //
 284 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default).
 285 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs!
 286 void MacroAssembler::mem2freg_opt(FloatRegister reg,
 287                                   int64_t       disp,
 288                                   Register      index,
 289                                   Register      base,
 290                                   void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
 291                                   void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
 292                                   Register      scratch) {
 293   index = (index == noreg) ? Z_R0 : index;
 294   if (Displacement::is_shortDisp(disp)) {
 295     (this->*classic)(reg, disp, index, base);
 296   } else {
 297     if (Displacement::is_validDisp(disp)) {
 298       (this->*modern)(reg, disp, index, base);
 299     } else {
 300       if (scratch != Z_R0 && scratch != Z_R1) {
 301         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 302       } else {
 303         if (scratch != Z_R0) {   // scratch == Z_R1
 304           if ((scratch == index) || (index == base)) {
 305             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 306           } else {
 307             add2reg(scratch, disp, base);
 308             (this->*classic)(reg, 0, index, scratch);
 309             if (base == scratch) {
 310               add2reg(base, -disp);  // Restore base.
 311             }
 312           }
 313         } else {   // scratch == Z_R0
 314           z_lgr(scratch, base);
 315           add2reg(base, disp);
 316           (this->*classic)(reg, 0, index, base);
 317           z_lgr(base, scratch);      // Restore base.
 318         }
 319       }
 320     }
 321   }
 322 }
 323 
 324 void MacroAssembler::mem2freg_opt(FloatRegister reg, const Address &a, bool is_double) {
 325   if (is_double) {
 326     mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ldy), CLASSIC_FFUN(z_ld));
 327   } else {
 328     mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ley), CLASSIC_FFUN(z_le));
 329   }
 330 }
 331 
 332 // Optimized emitter for reg to mem operations.
 333 // Uses modern instructions if running on modern hardware, classic instructions
 334 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 335 // Data register (reg) cannot be used as work register.
 336 //
 337 // Don't rely on register locking, instead pass a scratch register
 338 // (Z_R0 by default)
 339 // CAUTION! passing registers >= Z_R2 may produce bad results on old CPUs!
 340 void MacroAssembler::reg2mem_opt(Register reg,
 341                                  int64_t  disp,
 342                                  Register index,
 343                                  Register base,
 344                                  void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
 345                                  void (MacroAssembler::*classic)(Register, int64_t, Register, Register),
 346                                  Register scratch) {
 347   index = (index == noreg) ? Z_R0 : index;
 348   if (Displacement::is_shortDisp(disp)) {
 349     (this->*classic)(reg, disp, index, base);
 350   } else {
 351     if (Displacement::is_validDisp(disp)) {
 352       (this->*modern)(reg, disp, index, base);
 353     } else {
 354       if (scratch != Z_R0 && scratch != Z_R1) {
 355         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 356       } else {
 357         if (scratch != Z_R0) {   // scratch == Z_R1
 358           if ((scratch == index) || (index == base)) {
 359             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 360           } else {
 361             add2reg(scratch, disp, base);
 362             (this->*classic)(reg, 0, index, scratch);
 363             if (base == scratch) {
 364               add2reg(base, -disp);  // Restore base.
 365             }
 366           }
 367         } else {   // scratch == Z_R0
 368           if ((scratch == reg) || (scratch == base) || (reg == base)) {
 369             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 370           } else {
 371             z_lgr(scratch, base);
 372             add2reg(base, disp);
 373             (this->*classic)(reg, 0, index, base);
 374             z_lgr(base, scratch);    // Restore base.
 375           }
 376         }
 377       }
 378     }
 379   }
 380 }
 381 
 382 int MacroAssembler::reg2mem_opt(Register reg, const Address &a, bool is_double) {
 383   int store_offset = offset();
 384   if (is_double) {
 385     reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_stg), CLASSIC_IFUN(z_stg));
 386   } else {
 387     reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_sty), CLASSIC_IFUN(z_st));
 388   }
 389   return store_offset;
 390 }
 391 
 392 // Optimized emitter for mem to reg operations.
 393 // Uses modern instructions if running on modern hardware, classic instructions
 394 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 395 // Data register (reg) will be used as work register where possible.
 396 void MacroAssembler::mem2reg_opt(Register reg,
 397                                  int64_t  disp,
 398                                  Register index,
 399                                  Register base,
 400                                  void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
 401                                  void (MacroAssembler::*classic)(Register, int64_t, Register, Register)) {
 402   index = (index == noreg) ? Z_R0 : index;
 403   if (Displacement::is_shortDisp(disp)) {
 404     (this->*classic)(reg, disp, index, base);
 405   } else {
 406     if (Displacement::is_validDisp(disp)) {
 407       (this->*modern)(reg, disp, index, base);
 408     } else {
 409       if ((reg == index) && (reg == base)) {
 410         z_sllg(reg, reg, 1);
 411         add2reg(reg, disp);
 412         (this->*classic)(reg, 0, noreg, reg);
 413       } else if ((reg == index) && (reg != Z_R0)) {
 414         add2reg(reg, disp);
 415         (this->*classic)(reg, 0, reg, base);
 416       } else if (reg == base) {
 417         add2reg(reg, disp);
 418         (this->*classic)(reg, 0, index, reg);
 419       } else if (reg != Z_R0) {
 420         add2reg(reg, disp, base);
 421         (this->*classic)(reg, 0, index, reg);
 422       } else { // reg == Z_R0 && reg != base here
 423         add2reg(base, disp);
 424         (this->*classic)(reg, 0, index, base);
 425         add2reg(base, -disp);
 426       }
 427     }
 428   }
 429 }
 430 
 431 void MacroAssembler::mem2reg_opt(Register reg, const Address &a, bool is_double) {
 432   if (is_double) {
 433     z_lg(reg, a);
 434   } else {
 435     mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_ly), CLASSIC_IFUN(z_l));
 436   }
 437 }
 438 
 439 void MacroAssembler::mem2reg_signed_opt(Register reg, const Address &a) {
 440   mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_lgf), CLASSIC_IFUN(z_lgf));
 441 }
 442 
 443 void MacroAssembler::and_imm(Register r, long mask,
 444                              Register tmp /* = Z_R0 */,
 445                              bool wide    /* = false */) {
 446   assert(wide || Immediate::is_simm32(mask), "mask value too large");
 447 
 448   if (!wide) {
 449     z_nilf(r, mask);
 450     return;
 451   }
 452 
 453   assert(r != tmp, " need a different temporary register !");
 454   load_const_optimized(tmp, mask);
 455   z_ngr(r, tmp);
 456 }
 457 
 458 // Calculate the 1's complement.
 459 // Note: The condition code is neither preserved nor correctly set by this code!!!
 460 // Note: (wide == false) does not protect the high order half of the target register
 461 //       from alteration. It only serves as optimization hint for 32-bit results.
 462 void MacroAssembler::not_(Register r1, Register r2, bool wide) {
 463 
 464   if ((r2 == noreg) || (r2 == r1)) { // Calc 1's complement in place.
 465     z_xilf(r1, -1);
 466     if (wide) {
 467       z_xihf(r1, -1);
 468     }
 469   } else { // Distinct src and dst registers.
 470     load_const_optimized(r1, -1);
 471     z_xgr(r1, r2);
 472   }
 473 }
 474 
 475 unsigned long MacroAssembler::create_mask(int lBitPos, int rBitPos) {
 476   assert(lBitPos >=  0,      "zero is  leftmost bit position");
 477   assert(rBitPos <= 63,      "63   is rightmost bit position");
 478   assert(lBitPos <= rBitPos, "inverted selection interval");
 479   return (lBitPos == 0 ? (unsigned long)(-1L) : ((1UL<<(63-lBitPos+1))-1)) & (~((1UL<<(63-rBitPos))-1));
 480 }
 481 
 482 // Helper function for the "Rotate_then_<logicalOP>" emitters.
 483 // Rotate src, then mask register contents such that only bits in range survive.
 484 // For oneBits == false, all bits not in range are set to 0. Useful for deleting all bits outside range.
 485 // For oneBits == true,  all bits not in range are set to 1. Useful for preserving all bits outside range.
 486 // The caller must ensure that the selected range only contains bits with defined value.
 487 void MacroAssembler::rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos,
 488                                       int nRotate, bool src32bit, bool dst32bit, bool oneBits) {
 489   assert(!(dst32bit && lBitPos < 32), "selection interval out of range for int destination");
 490   bool sll4rll = (nRotate >= 0) && (nRotate <= (63-rBitPos)); // Substitute SLL(G) for RLL(G).
 491   bool srl4rll = (nRotate <  0) && (-nRotate <= lBitPos);     // Substitute SRL(G) for RLL(G).
 492   //  Pre-determine which parts of dst will be zero after shift/rotate.
 493   bool llZero  =  sll4rll && (nRotate >= 16);
 494   bool lhZero  = (sll4rll && (nRotate >= 32)) || (srl4rll && (nRotate <= -48));
 495   bool lfZero  = llZero && lhZero;
 496   bool hlZero  = (sll4rll && (nRotate >= 48)) || (srl4rll && (nRotate <= -32));
 497   bool hhZero  =                                 (srl4rll && (nRotate <= -16));
 498   bool hfZero  = hlZero && hhZero;
 499 
 500   // rotate then mask src operand.
 501   // if oneBits == true,  all bits outside selected range are 1s.
 502   // if oneBits == false, all bits outside selected range are 0s.
 503   if (src32bit) {   // There might be garbage in the upper 32 bits which will get masked away.
 504     if (dst32bit) {
 505       z_rll(dst, src, nRotate);   // Copy and rotate, upper half of reg remains undisturbed.
 506     } else {
 507       if      (sll4rll) { z_sllg(dst, src,  nRotate); }
 508       else if (srl4rll) { z_srlg(dst, src, -nRotate); }
 509       else              { z_rllg(dst, src,  nRotate); }
 510     }
 511   } else {
 512     if      (sll4rll) { z_sllg(dst, src,  nRotate); }
 513     else if (srl4rll) { z_srlg(dst, src, -nRotate); }
 514     else              { z_rllg(dst, src,  nRotate); }
 515   }
 516 
 517   unsigned long  range_mask    = create_mask(lBitPos, rBitPos);
 518   unsigned int   range_mask_h  = (unsigned int)(range_mask >> 32);
 519   unsigned int   range_mask_l  = (unsigned int)range_mask;
 520   unsigned short range_mask_hh = (unsigned short)(range_mask >> 48);
 521   unsigned short range_mask_hl = (unsigned short)(range_mask >> 32);
 522   unsigned short range_mask_lh = (unsigned short)(range_mask >> 16);
 523   unsigned short range_mask_ll = (unsigned short)range_mask;
 524   // Works for z9 and newer H/W.
 525   if (oneBits) {
 526     if ((~range_mask_l) != 0)                { z_oilf(dst, ~range_mask_l); } // All bits outside range become 1s.
 527     if (((~range_mask_h) != 0) && !dst32bit) { z_oihf(dst, ~range_mask_h); }
 528   } else {
 529     // All bits outside range become 0s
 530     if (((~range_mask_l) != 0) &&              !lfZero) {
 531       z_nilf(dst, range_mask_l);
 532     }
 533     if (((~range_mask_h) != 0) && !dst32bit && !hfZero) {
 534       z_nihf(dst, range_mask_h);
 535     }
 536   }
 537 }
 538 
 539 // Rotate src, then insert selected range from rotated src into dst.
 540 // Clear dst before, if requested.
 541 void MacroAssembler::rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos,
 542                                         int nRotate, bool clear_dst) {
 543   // This version does not depend on src being zero-extended int2long.
 544   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 545   z_risbg(dst, src, lBitPos, rBitPos, nRotate, clear_dst); // Rotate, then insert selected, clear the rest.
 546 }
 547 
 548 // Rotate src, then and selected range from rotated src into dst.
 549 // Set condition code only if so requested. Otherwise it is unpredictable.
 550 // See performance note in macroAssembler_s390.hpp for important information.
 551 void MacroAssembler::rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos,
 552                                      int nRotate, bool test_only) {
 553   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 554   // This version does not depend on src being zero-extended int2long.
 555   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 556   z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 557 }
 558 
 559 // Rotate src, then or selected range from rotated src into dst.
 560 // Set condition code only if so requested. Otherwise it is unpredictable.
 561 // See performance note in macroAssembler_s390.hpp for important information.
 562 void MacroAssembler::rotate_then_or(Register dst, Register src,  int  lBitPos,  int  rBitPos,
 563                                     int nRotate, bool test_only) {
 564   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 565   // This version does not depend on src being zero-extended int2long.
 566   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 567   z_rosbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 568 }
 569 
 570 // Rotate src, then xor selected range from rotated src into dst.
 571 // Set condition code only if so requested. Otherwise it is unpredictable.
 572 // See performance note in macroAssembler_s390.hpp for important information.
 573 void MacroAssembler::rotate_then_xor(Register dst, Register src,  int  lBitPos,  int  rBitPos,
 574                                      int nRotate, bool test_only) {
 575   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 576     // This version does not depend on src being zero-extended int2long.
 577   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 578   z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 579 }
 580 
 581 void MacroAssembler::add64(Register r1, RegisterOrConstant inc) {
 582   if (inc.is_register()) {
 583     z_agr(r1, inc.as_register());
 584   } else { // constant
 585     intptr_t imm = inc.as_constant();
 586     add2reg(r1, imm);
 587   }
 588 }
 589 // Helper function to multiply the 64bit contents of a register by a 16bit constant.
 590 // The optimization tries to avoid the mghi instruction, since it uses the FPU for
 591 // calculation and is thus rather slow.
 592 //
 593 // There is no handling for special cases, e.g. cval==0 or cval==1.
 594 //
 595 // Returns len of generated code block.
 596 unsigned int MacroAssembler::mul_reg64_const16(Register rval, Register work, int cval) {
 597   int block_start = offset();
 598 
 599   bool sign_flip = cval < 0;
 600   cval = sign_flip ? -cval : cval;
 601 
 602   BLOCK_COMMENT("Reg64*Con16 {");
 603 
 604   int bit1 = cval & -cval;
 605   if (bit1 == cval) {
 606     z_sllg(rval, rval, exact_log2(bit1));
 607     if (sign_flip) { z_lcgr(rval, rval); }
 608   } else {
 609     int bit2 = (cval-bit1) & -(cval-bit1);
 610     if ((bit1+bit2) == cval) {
 611       z_sllg(work, rval, exact_log2(bit1));
 612       z_sllg(rval, rval, exact_log2(bit2));
 613       z_agr(rval, work);
 614       if (sign_flip) { z_lcgr(rval, rval); }
 615     } else {
 616       if (sign_flip) { z_mghi(rval, -cval); }
 617       else           { z_mghi(rval,  cval); }
 618     }
 619   }
 620   BLOCK_COMMENT("} Reg64*Con16");
 621 
 622   int block_end = offset();
 623   return block_end - block_start;
 624 }
 625 
 626 // Generic operation r1 := r2 + imm.
 627 //
 628 // Should produce the best code for each supported CPU version.
 629 // r2 == noreg yields r1 := r1 + imm
 630 // imm == 0 emits either no instruction or r1 := r2 !
 631 // NOTES: 1) Don't use this function where fixed sized
 632 //           instruction sequences are required!!!
 633 //        2) Don't use this function if condition code
 634 //           setting is required!
 635 //        3) Despite being declared as int64_t, the parameter imm
 636 //           must be a simm_32 value (= signed 32-bit integer).
 637 void MacroAssembler::add2reg(Register r1, int64_t imm, Register r2) {
 638   assert(Immediate::is_simm32(imm), "probably an implicit conversion went wrong");
 639 
 640   if (r2 == noreg) { r2 = r1; }
 641 
 642   // Handle special case imm == 0.
 643   if (imm == 0) {
 644     lgr_if_needed(r1, r2);
 645     // Nothing else to do.
 646     return;
 647   }
 648 
 649   if (!PreferLAoverADD || (r2 == Z_R0)) {
 650     bool distinctOpnds = VM_Version::has_DistinctOpnds();
 651 
 652     // Can we encode imm in 16 bits signed?
 653     if (Immediate::is_simm16(imm)) {
 654       if (r1 == r2) {
 655         z_aghi(r1, imm);
 656         return;
 657       }
 658       if (distinctOpnds) {
 659         z_aghik(r1, r2, imm);
 660         return;
 661       }
 662       z_lgr(r1, r2);
 663       z_aghi(r1, imm);
 664       return;
 665     }
 666   } else {
 667     // Can we encode imm in 12 bits unsigned?
 668     if (Displacement::is_shortDisp(imm)) {
 669       z_la(r1, imm, r2);
 670       return;
 671     }
 672     // Can we encode imm in 20 bits signed?
 673     if (Displacement::is_validDisp(imm)) {
 674       // Always use LAY instruction, so we don't need the tmp register.
 675       z_lay(r1, imm, r2);
 676       return;
 677     }
 678 
 679   }
 680 
 681   // Can handle it (all possible values) with long immediates.
 682   lgr_if_needed(r1, r2);
 683   z_agfi(r1, imm);
 684 }
 685 
 686 // Generic operation r := b + x + d
 687 //
 688 // Addition of several operands with address generation semantics - sort of:
 689 //  - no restriction on the registers. Any register will do for any operand.
 690 //  - x == noreg: operand will be disregarded.
 691 //  - b == noreg: will use (contents of) result reg as operand (r := r + d).
 692 //  - x == Z_R0:  just disregard
 693 //  - b == Z_R0:  use as operand. This is not address generation semantics!!!
 694 //
 695 // The same restrictions as on add2reg() are valid!!!
 696 void MacroAssembler::add2reg_with_index(Register r, int64_t d, Register x, Register b) {
 697   assert(Immediate::is_simm32(d), "probably an implicit conversion went wrong");
 698 
 699   if (x == noreg) { x = Z_R0; }
 700   if (b == noreg) { b = r; }
 701 
 702   // Handle special case x == R0.
 703   if (x == Z_R0) {
 704     // Can simply add the immediate value to the base register.
 705     add2reg(r, d, b);
 706     return;
 707   }
 708 
 709   if (!PreferLAoverADD || (b == Z_R0)) {
 710     bool distinctOpnds = VM_Version::has_DistinctOpnds();
 711     // Handle special case d == 0.
 712     if (d == 0) {
 713       if (b == x)        { z_sllg(r, b, 1); return; }
 714       if (r == x)        { z_agr(r, b);     return; }
 715       if (r == b)        { z_agr(r, x);     return; }
 716       if (distinctOpnds) { z_agrk(r, x, b); return; }
 717       z_lgr(r, b);
 718       z_agr(r, x);
 719     } else {
 720       if (x == b)             { z_sllg(r, x, 1); }
 721       else if (r == x)        { z_agr(r, b); }
 722       else if (r == b)        { z_agr(r, x); }
 723       else if (distinctOpnds) { z_agrk(r, x, b); }
 724       else {
 725         z_lgr(r, b);
 726         z_agr(r, x);
 727       }
 728       add2reg(r, d);
 729     }
 730   } else {
 731     // Can we encode imm in 12 bits unsigned?
 732     if (Displacement::is_shortDisp(d)) {
 733       z_la(r, d, x, b);
 734       return;
 735     }
 736     // Can we encode imm in 20 bits signed?
 737     if (Displacement::is_validDisp(d)) {
 738       z_lay(r, d, x, b);
 739       return;
 740     }
 741     z_la(r, 0, x, b);
 742     add2reg(r, d);
 743   }
 744 }
 745 
 746 // Generic emitter (32bit) for direct memory increment.
 747 // For optimal code, do not specify Z_R0 as temp register.
 748 void MacroAssembler::add2mem_32(const Address &a, int64_t imm, Register tmp) {
 749   if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) {
 750     z_asi(a, imm);
 751   } else {
 752     z_lgf(tmp, a);
 753     add2reg(tmp, imm);
 754     z_st(tmp, a);
 755   }
 756 }
 757 
 758 void MacroAssembler::add2mem_64(const Address &a, int64_t imm, Register tmp) {
 759   if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) {
 760     z_agsi(a, imm);
 761   } else {
 762     z_lg(tmp, a);
 763     add2reg(tmp, imm);
 764     z_stg(tmp, a);
 765   }
 766 }
 767 
 768 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) {
 769   switch (size_in_bytes) {
 770     case  8: z_lg(dst, src); break;
 771     case  4: is_signed ? z_lgf(dst, src) : z_llgf(dst, src); break;
 772     case  2: is_signed ? z_lgh(dst, src) : z_llgh(dst, src); break;
 773     case  1: is_signed ? z_lgb(dst, src) : z_llgc(dst, src); break;
 774     default: ShouldNotReachHere();
 775   }
 776 }
 777 
 778 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) {
 779   switch (size_in_bytes) {
 780     case  8: z_stg(src, dst); break;
 781     case  4: z_st(src, dst); break;
 782     case  2: z_sth(src, dst); break;
 783     case  1: z_stc(src, dst); break;
 784     default: ShouldNotReachHere();
 785   }
 786 }
 787 
 788 // Split a si20 offset (20bit, signed) into an ui12 offset (12bit, unsigned) and
 789 // a high-order summand in register tmp.
 790 //
 791 // return value: <  0: No split required, si20 actually has property uimm12.
 792 //               >= 0: Split performed. Use return value as uimm12 displacement and
 793 //                     tmp as index register.
 794 int MacroAssembler::split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate) {
 795   assert(Immediate::is_simm20(si20_offset), "sanity");
 796   int lg_off = (int)si20_offset &  0x0fff; // Punch out low-order 12 bits, always positive.
 797   int ll_off = (int)si20_offset & ~0x0fff; // Force low-order 12 bits to zero.
 798   assert((Displacement::is_shortDisp(si20_offset) && (ll_off == 0)) ||
 799          !Displacement::is_shortDisp(si20_offset), "unexpected offset values");
 800   assert((lg_off+ll_off) == si20_offset, "offset splitup error");
 801 
 802   Register work = accumulate? Z_R0 : tmp;
 803 
 804   if (fixed_codelen) {          // Len of code = 10 = 4 + 6.
 805     z_lghi(work, ll_off>>12);   // Implicit sign extension.
 806     z_slag(work, work, 12);
 807   } else {                      // Len of code = 0..10.
 808     if (ll_off == 0) { return -1; }
 809     // ll_off has 8 significant bits (at most) plus sign.
 810     if ((ll_off & 0x0000f000) == 0) {    // Non-zero bits only in upper halfbyte.
 811       z_llilh(work, ll_off >> 16);
 812       if (ll_off < 0) {                  // Sign-extension required.
 813         z_lgfr(work, work);
 814       }
 815     } else {
 816       if ((ll_off & 0x000f0000) == 0) {  // Non-zero bits only in lower halfbyte.
 817         z_llill(work, ll_off);
 818       } else {                           // Non-zero bits in both halfbytes.
 819         z_lghi(work, ll_off>>12);        // Implicit sign extension.
 820         z_slag(work, work, 12);
 821       }
 822     }
 823   }
 824   if (accumulate) { z_algr(tmp, work); } // len of code += 4
 825   return lg_off;
 826 }
 827 
 828 void MacroAssembler::load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) {
 829   if (Displacement::is_validDisp(si20)) {
 830     z_ley(t, si20, a);
 831   } else {
 832     // Fixed_codelen = true is a simple way to ensure that the size of load_float_largeoffset
 833     // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant
 834     // pool loads).
 835     bool accumulate    = true;
 836     bool fixed_codelen = true;
 837     Register work;
 838 
 839     if (fixed_codelen) {
 840       z_lgr(tmp, a);  // Lgr_if_needed not applicable due to fixed_codelen.
 841     } else {
 842       accumulate = (a == tmp);
 843     }
 844     work = tmp;
 845 
 846     int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate);
 847     if (disp12 < 0) {
 848       z_le(t, si20, work);
 849     } else {
 850       if (accumulate) {
 851         z_le(t, disp12, work);
 852       } else {
 853         z_le(t, disp12, work, a);
 854       }
 855     }
 856   }
 857 }
 858 
 859 void MacroAssembler::load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) {
 860   if (Displacement::is_validDisp(si20)) {
 861     z_ldy(t, si20, a);
 862   } else {
 863     // Fixed_codelen = true is a simple way to ensure that the size of load_double_largeoffset
 864     // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant
 865     // pool loads).
 866     bool accumulate    = true;
 867     bool fixed_codelen = true;
 868     Register work;
 869 
 870     if (fixed_codelen) {
 871       z_lgr(tmp, a);  // Lgr_if_needed not applicable due to fixed_codelen.
 872     } else {
 873       accumulate = (a == tmp);
 874     }
 875     work = tmp;
 876 
 877     int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate);
 878     if (disp12 < 0) {
 879       z_ld(t, si20, work);
 880     } else {
 881       if (accumulate) {
 882         z_ld(t, disp12, work);
 883       } else {
 884         z_ld(t, disp12, work, a);
 885       }
 886     }
 887   }
 888 }
 889 
 890 // PCrelative TOC access.
 891 // Returns distance (in bytes) from current position to start of consts section.
 892 // Returns 0 (zero) if no consts section exists or if it has size zero.
 893 long MacroAssembler::toc_distance() {
 894   CodeSection* cs = code()->consts();
 895   return (long)((cs != NULL) ? cs->start()-pc() : 0);
 896 }
 897 
 898 // Implementation on x86/sparc assumes that constant and instruction section are
 899 // adjacent, but this doesn't hold. Two special situations may occur, that we must
 900 // be able to handle:
 901 //   1. const section may be located apart from the inst section.
 902 //   2. const section may be empty
 903 // In both cases, we use the const section's start address to compute the "TOC",
 904 // this seems to occur only temporarily; in the final step we always seem to end up
 905 // with the pc-relatice variant.
 906 //
 907 // PC-relative offset could be +/-2**32 -> use long for disp
 908 // Furthermore: makes no sense to have special code for
 909 // adjacent const and inst sections.
 910 void MacroAssembler::load_toc(Register Rtoc) {
 911   // Simply use distance from start of const section (should be patched in the end).
 912   long disp = toc_distance();
 913 
 914   RelocationHolder rspec = internal_word_Relocation::spec(pc() + disp);
 915   relocate(rspec);
 916   z_larl(Rtoc, RelAddr::pcrel_off32(disp));  // Offset is in halfwords.
 917 }
 918 
 919 // PCrelative TOC access.
 920 // Load from anywhere pcrelative (with relocation of load instr)
 921 void MacroAssembler::load_long_pcrelative(Register Rdst, address dataLocation) {
 922   address          pc             = this->pc();
 923   ptrdiff_t        total_distance = dataLocation - pc;
 924   RelocationHolder rspec          = internal_word_Relocation::spec(dataLocation);
 925 
 926   assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory");
 927   assert(total_distance != 0, "sanity");
 928 
 929   // Some extra safety net.
 930   if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
 931     guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance);
 932   }
 933 
 934   (this)->relocate(rspec, relocInfo::pcrel_addr_format);
 935   z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance));
 936 }
 937 
 938 
 939 // PCrelative TOC access.
 940 // Load from anywhere pcrelative (with relocation of load instr)
 941 // loaded addr has to be relocated when added to constant pool.
 942 void MacroAssembler::load_addr_pcrelative(Register Rdst, address addrLocation) {
 943   address          pc             = this->pc();
 944   ptrdiff_t        total_distance = addrLocation - pc;
 945   RelocationHolder rspec          = internal_word_Relocation::spec(addrLocation);
 946 
 947   assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory");
 948 
 949   // Some extra safety net.
 950   if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
 951     guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance);
 952   }
 953 
 954   (this)->relocate(rspec, relocInfo::pcrel_addr_format);
 955   z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance));
 956 }
 957 
 958 // Generic operation: load a value from memory and test.
 959 // CondCode indicates the sign (<0, ==0, >0) of the loaded value.
 960 void MacroAssembler::load_and_test_byte(Register dst, const Address &a) {
 961   z_lb(dst, a);
 962   z_ltr(dst, dst);
 963 }
 964 
 965 void MacroAssembler::load_and_test_short(Register dst, const Address &a) {
 966   int64_t disp = a.disp20();
 967   if (Displacement::is_shortDisp(disp)) {
 968     z_lh(dst, a);
 969   } else if (Displacement::is_longDisp(disp)) {
 970     z_lhy(dst, a);
 971   } else {
 972     guarantee(false, "displacement out of range");
 973   }
 974   z_ltr(dst, dst);
 975 }
 976 
 977 void MacroAssembler::load_and_test_int(Register dst, const Address &a) {
 978   z_lt(dst, a);
 979 }
 980 
 981 void MacroAssembler::load_and_test_int2long(Register dst, const Address &a) {
 982   z_ltgf(dst, a);
 983 }
 984 
 985 void MacroAssembler::load_and_test_long(Register dst, const Address &a) {
 986   z_ltg(dst, a);
 987 }
 988 
 989 // Test a bit in memory.
 990 void MacroAssembler::testbit(const Address &a, unsigned int bit) {
 991   assert(a.index() == noreg, "no index reg allowed in testbit");
 992   if (bit <= 7) {
 993     z_tm(a.disp() + 3, a.base(), 1 << bit);
 994   } else if (bit <= 15) {
 995     z_tm(a.disp() + 2, a.base(), 1 << (bit - 8));
 996   } else if (bit <= 23) {
 997     z_tm(a.disp() + 1, a.base(), 1 << (bit - 16));
 998   } else if (bit <= 31) {
 999     z_tm(a.disp() + 0, a.base(), 1 << (bit - 24));
1000   } else {
1001     ShouldNotReachHere();
1002   }
1003 }
1004 
1005 // Test a bit in a register. Result is reflected in CC.
1006 void MacroAssembler::testbit(Register r, unsigned int bitPos) {
1007   if (bitPos < 16) {
1008     z_tmll(r, 1U<<bitPos);
1009   } else if (bitPos < 32) {
1010     z_tmlh(r, 1U<<(bitPos-16));
1011   } else if (bitPos < 48) {
1012     z_tmhl(r, 1U<<(bitPos-32));
1013   } else if (bitPos < 64) {
1014     z_tmhh(r, 1U<<(bitPos-48));
1015   } else {
1016     ShouldNotReachHere();
1017   }
1018 }
1019 
1020 void MacroAssembler::prefetch_read(Address a) {
1021   z_pfd(1, a.disp20(), a.indexOrR0(), a.base());
1022 }
1023 void MacroAssembler::prefetch_update(Address a) {
1024   z_pfd(2, a.disp20(), a.indexOrR0(), a.base());
1025 }
1026 
1027 // Clear a register, i.e. load const zero into reg.
1028 // Return len (in bytes) of generated instruction(s).
1029 // whole_reg: Clear 64 bits if true, 32 bits otherwise.
1030 // set_cc:    Use instruction that sets the condition code, if true.
1031 int MacroAssembler::clear_reg(Register r, bool whole_reg, bool set_cc) {
1032   unsigned int start_off = offset();
1033   if (whole_reg) {
1034     set_cc ? z_xgr(r, r) : z_laz(r, 0, Z_R0);
1035   } else {  // Only 32bit register.
1036     set_cc ? z_xr(r, r) : z_lhi(r, 0);
1037   }
1038   return offset() - start_off;
1039 }
1040 
1041 #ifdef ASSERT
1042 int MacroAssembler::preset_reg(Register r, unsigned long pattern, int pattern_len) {
1043   switch (pattern_len) {
1044     case 1:
1045       pattern = (pattern & 0x000000ff)  | ((pattern & 0x000000ff)<<8);
1046     case 2:
1047       pattern = (pattern & 0x0000ffff)  | ((pattern & 0x0000ffff)<<16);
1048     case 4:
1049       pattern = (pattern & 0xffffffffL) | ((pattern & 0xffffffffL)<<32);
1050     case 8:
1051       return load_const_optimized_rtn_len(r, pattern, true);
1052       break;
1053     default:
1054       guarantee(false, "preset_reg: bad len");
1055   }
1056   return 0;
1057 }
1058 #endif
1059 
1060 // addr: Address descriptor of memory to clear index register will not be used !
1061 // size: Number of bytes to clear.
1062 //    !!! DO NOT USE THEM FOR ATOMIC MEMORY CLEARING !!!
1063 //    !!! Use store_const() instead                  !!!
1064 void MacroAssembler::clear_mem(const Address& addr, unsigned size) {
1065   guarantee(size <= 256, "MacroAssembler::clear_mem: size too large");
1066 
1067   if (size == 1) {
1068     z_mvi(addr, 0);
1069     return;
1070   }
1071 
1072   switch (size) {
1073     case 2: z_mvhhi(addr, 0);
1074       return;
1075     case 4: z_mvhi(addr, 0);
1076       return;
1077     case 8: z_mvghi(addr, 0);
1078       return;
1079     default: ; // Fallthru to xc.
1080   }
1081 
1082   z_xc(addr, size, addr);
1083 }
1084 
1085 void MacroAssembler::align(int modulus) {
1086   while (offset() % modulus != 0) z_nop();
1087 }
1088 
1089 // Special version for non-relocateable code if required alignment
1090 // is larger than CodeEntryAlignment.
1091 void MacroAssembler::align_address(int modulus) {
1092   while ((uintptr_t)pc() % modulus != 0) z_nop();
1093 }
1094 
1095 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
1096                                          Register temp_reg,
1097                                          int64_t extra_slot_offset) {
1098   // On Z, we can have index and disp in an Address. So don't call argument_offset,
1099   // which issues an unnecessary add instruction.
1100   int stackElementSize = Interpreter::stackElementSize;
1101   int64_t offset = extra_slot_offset * stackElementSize;
1102   const Register argbase = Z_esp;
1103   if (arg_slot.is_constant()) {
1104     offset += arg_slot.as_constant() * stackElementSize;
1105     return Address(argbase, offset);
1106   }
1107   // else
1108   assert(temp_reg != noreg, "must specify");
1109   assert(temp_reg != Z_ARG1, "base and index are conflicting");
1110   z_sllg(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize)); // tempreg = arg_slot << 3
1111   return Address(argbase, temp_reg, offset);
1112 }
1113 
1114 
1115 //===================================================================
1116 //===   START   C O N S T A N T S   I N   C O D E   S T R E A M   ===
1117 //===================================================================
1118 //===            P A T CH A B L E   C O N S T A N T S             ===
1119 //===================================================================
1120 
1121 
1122 //---------------------------------------------------
1123 //  Load (patchable) constant into register
1124 //---------------------------------------------------
1125 
1126 
1127 // Load absolute address (and try to optimize).
1128 //   Note: This method is usable only for position-fixed code,
1129 //         referring to a position-fixed target location.
1130 //         If not so, relocations and patching must be used.
1131 void MacroAssembler::load_absolute_address(Register d, address addr) {
1132   assert(addr != NULL, "should not happen");
1133   BLOCK_COMMENT("load_absolute_address:");
1134   if (addr == NULL) {
1135     z_larl(d, pc()); // Dummy emit for size calc.
1136     return;
1137   }
1138 
1139   if (RelAddr::is_in_range_of_RelAddr32(addr, pc())) {
1140     z_larl(d, addr);
1141     return;
1142   }
1143 
1144   load_const_optimized(d, (long)addr);
1145 }
1146 
1147 // Load a 64bit constant.
1148 // Patchable code sequence, but not atomically patchable.
1149 // Make sure to keep code size constant -> no value-dependent optimizations.
1150 // Do not kill condition code.
1151 void MacroAssembler::load_const(Register t, long x) {
1152   // Note: Right shift is only cleanly defined for unsigned types
1153   //       or for signed types with nonnegative values.
1154   Assembler::z_iihf(t, (long)((unsigned long)x >> 32));
1155   Assembler::z_iilf(t, (long)((unsigned long)x & 0xffffffffUL));
1156 }
1157 
1158 // Load a 32bit constant into a 64bit register, sign-extend or zero-extend.
1159 // Patchable code sequence, but not atomically patchable.
1160 // Make sure to keep code size constant -> no value-dependent optimizations.
1161 // Do not kill condition code.
1162 void MacroAssembler::load_const_32to64(Register t, int64_t x, bool sign_extend) {
1163   if (sign_extend) { Assembler::z_lgfi(t, x); }
1164   else             { Assembler::z_llilf(t, x); }
1165 }
1166 
1167 // Load narrow oop constant, no decompression.
1168 void MacroAssembler::load_narrow_oop(Register t, narrowOop a) {
1169   assert(UseCompressedOops, "must be on to call this method");
1170   load_const_32to64(t, a, false /*sign_extend*/);
1171 }
1172 
1173 // Load narrow klass constant, compression required.
1174 void MacroAssembler::load_narrow_klass(Register t, Klass* k) {
1175   assert(UseCompressedClassPointers, "must be on to call this method");
1176   narrowKlass encoded_k = CompressedKlassPointers::encode(k);
1177   load_const_32to64(t, encoded_k, false /*sign_extend*/);
1178 }
1179 
1180 //------------------------------------------------------
1181 //  Compare (patchable) constant with register.
1182 //------------------------------------------------------
1183 
1184 // Compare narrow oop in reg with narrow oop constant, no decompression.
1185 void MacroAssembler::compare_immediate_narrow_oop(Register oop1, narrowOop oop2) {
1186   assert(UseCompressedOops, "must be on to call this method");
1187 
1188   Assembler::z_clfi(oop1, oop2);
1189 }
1190 
1191 // Compare narrow oop in reg with narrow oop constant, no decompression.
1192 void MacroAssembler::compare_immediate_narrow_klass(Register klass1, Klass* klass2) {
1193   assert(UseCompressedClassPointers, "must be on to call this method");
1194   narrowKlass encoded_k = CompressedKlassPointers::encode(klass2);
1195 
1196   Assembler::z_clfi(klass1, encoded_k);
1197 }
1198 
1199 //----------------------------------------------------------
1200 //  Check which kind of load_constant we have here.
1201 //----------------------------------------------------------
1202 
1203 // Detection of CPU version dependent load_const sequence.
1204 // The detection is valid only for code sequences generated by load_const,
1205 // not load_const_optimized.
1206 bool MacroAssembler::is_load_const(address a) {
1207   unsigned long inst1, inst2;
1208   unsigned int  len1,  len2;
1209 
1210   len1 = get_instruction(a, &inst1);
1211   len2 = get_instruction(a + len1, &inst2);
1212 
1213   return is_z_iihf(inst1) && is_z_iilf(inst2);
1214 }
1215 
1216 // Detection of CPU version dependent load_const_32to64 sequence.
1217 // Mostly used for narrow oops and narrow Klass pointers.
1218 // The detection is valid only for code sequences generated by load_const_32to64.
1219 bool MacroAssembler::is_load_const_32to64(address pos) {
1220   unsigned long inst1, inst2;
1221   unsigned int len1;
1222 
1223   len1 = get_instruction(pos, &inst1);
1224   return is_z_llilf(inst1);
1225 }
1226 
1227 // Detection of compare_immediate_narrow sequence.
1228 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop.
1229 bool MacroAssembler::is_compare_immediate32(address pos) {
1230   return is_equal(pos, CLFI_ZOPC, RIL_MASK);
1231 }
1232 
1233 // Detection of compare_immediate_narrow sequence.
1234 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop.
1235 bool MacroAssembler::is_compare_immediate_narrow_oop(address pos) {
1236   return is_compare_immediate32(pos);
1237   }
1238 
1239 // Detection of compare_immediate_narrow sequence.
1240 // The detection is valid only for code sequences generated by compare_immediate_narrow_klass.
1241 bool MacroAssembler::is_compare_immediate_narrow_klass(address pos) {
1242   return is_compare_immediate32(pos);
1243 }
1244 
1245 //-----------------------------------
1246 //  patch the load_constant
1247 //-----------------------------------
1248 
1249 // CPU-version dependend patching of load_const.
1250 void MacroAssembler::patch_const(address a, long x) {
1251   assert(is_load_const(a), "not a load of a constant");
1252   // Note: Right shift is only cleanly defined for unsigned types
1253   //       or for signed types with nonnegative values.
1254   set_imm32((address)a, (long)((unsigned long)x >> 32));
1255   set_imm32((address)(a + 6), (long)((unsigned long)x & 0xffffffffUL));
1256 }
1257 
1258 // Patching the value of CPU version dependent load_const_32to64 sequence.
1259 // The passed ptr MUST be in compressed format!
1260 int MacroAssembler::patch_load_const_32to64(address pos, int64_t np) {
1261   assert(is_load_const_32to64(pos), "not a load of a narrow ptr (oop or klass)");
1262 
1263   set_imm32(pos, np);
1264   return 6;
1265 }
1266 
1267 // Patching the value of CPU version dependent compare_immediate_narrow sequence.
1268 // The passed ptr MUST be in compressed format!
1269 int MacroAssembler::patch_compare_immediate_32(address pos, int64_t np) {
1270   assert(is_compare_immediate32(pos), "not a compressed ptr compare");
1271 
1272   set_imm32(pos, np);
1273   return 6;
1274 }
1275 
1276 // Patching the immediate value of CPU version dependent load_narrow_oop sequence.
1277 // The passed ptr must NOT be in compressed format!
1278 int MacroAssembler::patch_load_narrow_oop(address pos, oop o) {
1279   assert(UseCompressedOops, "Can only patch compressed oops");
1280 
1281   narrowOop no = CompressedOops::encode(o);
1282   return patch_load_const_32to64(pos, no);
1283 }
1284 
1285 // Patching the immediate value of CPU version dependent load_narrow_klass sequence.
1286 // The passed ptr must NOT be in compressed format!
1287 int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) {
1288   assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
1289 
1290   narrowKlass nk = CompressedKlassPointers::encode(k);
1291   return patch_load_const_32to64(pos, nk);
1292 }
1293 
1294 // Patching the immediate value of CPU version dependent compare_immediate_narrow_oop sequence.
1295 // The passed ptr must NOT be in compressed format!
1296 int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) {
1297   assert(UseCompressedOops, "Can only patch compressed oops");
1298 
1299   narrowOop no = CompressedOops::encode(o);
1300   return patch_compare_immediate_32(pos, no);
1301 }
1302 
1303 // Patching the immediate value of CPU version dependent compare_immediate_narrow_klass sequence.
1304 // The passed ptr must NOT be in compressed format!
1305 int MacroAssembler::patch_compare_immediate_narrow_klass(address pos, Klass* k) {
1306   assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
1307 
1308   narrowKlass nk = CompressedKlassPointers::encode(k);
1309   return patch_compare_immediate_32(pos, nk);
1310 }
1311 
1312 //------------------------------------------------------------------------
1313 //  Extract the constant from a load_constant instruction stream.
1314 //------------------------------------------------------------------------
1315 
1316 // Get constant from a load_const sequence.
1317 long MacroAssembler::get_const(address a) {
1318   assert(is_load_const(a), "not a load of a constant");
1319   unsigned long x;
1320   x =  (((unsigned long) (get_imm32(a,0) & 0xffffffff)) << 32);
1321   x |= (((unsigned long) (get_imm32(a,1) & 0xffffffff)));
1322   return (long) x;
1323 }
1324 
1325 //--------------------------------------
1326 //  Store a constant in memory.
1327 //--------------------------------------
1328 
1329 // General emitter to move a constant to memory.
1330 // The store is atomic.
1331 //  o Address must be given in RS format (no index register)
1332 //  o Displacement should be 12bit unsigned for efficiency. 20bit signed also supported.
1333 //  o Constant can be 1, 2, 4, or 8 bytes, signed or unsigned.
1334 //  o Memory slot can be 1, 2, 4, or 8 bytes, signed or unsigned.
1335 //  o Memory slot must be at least as wide as constant, will assert otherwise.
1336 //  o Signed constants will sign-extend, unsigned constants will zero-extend to slot width.
1337 int MacroAssembler::store_const(const Address &dest, long imm,
1338                                 unsigned int lm, unsigned int lc,
1339                                 Register scratch) {
1340   int64_t  disp = dest.disp();
1341   Register base = dest.base();
1342   assert(!dest.has_index(), "not supported");
1343   assert((lm==1)||(lm==2)||(lm==4)||(lm==8), "memory   length not supported");
1344   assert((lc==1)||(lc==2)||(lc==4)||(lc==8), "constant length not supported");
1345   assert(lm>=lc, "memory slot too small");
1346   assert(lc==8 || Immediate::is_simm(imm, lc*8), "const out of range");
1347   assert(Displacement::is_validDisp(disp), "displacement out of range");
1348 
1349   bool is_shortDisp = Displacement::is_shortDisp(disp);
1350   int store_offset = -1;
1351 
1352   // For target len == 1 it's easy.
1353   if (lm == 1) {
1354     store_offset = offset();
1355     if (is_shortDisp) {
1356       z_mvi(disp, base, imm);
1357       return store_offset;
1358     } else {
1359       z_mviy(disp, base, imm);
1360       return store_offset;
1361     }
1362   }
1363 
1364   // All the "good stuff" takes an unsigned displacement.
1365   if (is_shortDisp) {
1366     // NOTE: Cannot use clear_mem for imm==0, because it is not atomic.
1367 
1368     store_offset = offset();
1369     switch (lm) {
1370       case 2:  // Lc == 1 handled correctly here, even for unsigned. Instruction does no widening.
1371         z_mvhhi(disp, base, imm);
1372         return store_offset;
1373       case 4:
1374         if (Immediate::is_simm16(imm)) {
1375           z_mvhi(disp, base, imm);
1376           return store_offset;
1377         }
1378         break;
1379       case 8:
1380         if (Immediate::is_simm16(imm)) {
1381           z_mvghi(disp, base, imm);
1382           return store_offset;
1383         }
1384         break;
1385       default:
1386         ShouldNotReachHere();
1387         break;
1388     }
1389   }
1390 
1391   //  Can't optimize, so load value and store it.
1392   guarantee(scratch != noreg, " need a scratch register here !");
1393   if (imm != 0) {
1394     load_const_optimized(scratch, imm);  // Preserves CC anyway.
1395   } else {
1396     // Leave CC alone!!
1397     (void) clear_reg(scratch, true, false); // Indicate unused result.
1398   }
1399 
1400   store_offset = offset();
1401   if (is_shortDisp) {
1402     switch (lm) {
1403       case 2:
1404         z_sth(scratch, disp, Z_R0, base);
1405         return store_offset;
1406       case 4:
1407         z_st(scratch, disp, Z_R0, base);
1408         return store_offset;
1409       case 8:
1410         z_stg(scratch, disp, Z_R0, base);
1411         return store_offset;
1412       default:
1413         ShouldNotReachHere();
1414         break;
1415     }
1416   } else {
1417     switch (lm) {
1418       case 2:
1419         z_sthy(scratch, disp, Z_R0, base);
1420         return store_offset;
1421       case 4:
1422         z_sty(scratch, disp, Z_R0, base);
1423         return store_offset;
1424       case 8:
1425         z_stg(scratch, disp, Z_R0, base);
1426         return store_offset;
1427       default:
1428         ShouldNotReachHere();
1429         break;
1430     }
1431   }
1432   return -1; // should not reach here
1433 }
1434 
1435 //===================================================================
1436 //===       N O T   P A T CH A B L E   C O N S T A N T S          ===
1437 //===================================================================
1438 
1439 // Load constant x into register t with a fast instrcution sequence
1440 // depending on the bits in x. Preserves CC under all circumstances.
1441 int MacroAssembler::load_const_optimized_rtn_len(Register t, long x, bool emit) {
1442   if (x == 0) {
1443     int len;
1444     if (emit) {
1445       len = clear_reg(t, true, false);
1446     } else {
1447       len = 4;
1448     }
1449     return len;
1450   }
1451 
1452   if (Immediate::is_simm16(x)) {
1453     if (emit) { z_lghi(t, x); }
1454     return 4;
1455   }
1456 
1457   // 64 bit value: | part1 | part2 | part3 | part4 |
1458   // At least one part is not zero!
1459   // Note: Right shift is only cleanly defined for unsigned types
1460   //       or for signed types with nonnegative values.
1461   int part1 = (int)((unsigned long)x >> 48) & 0x0000ffff;
1462   int part2 = (int)((unsigned long)x >> 32) & 0x0000ffff;
1463   int part3 = (int)((unsigned long)x >> 16) & 0x0000ffff;
1464   int part4 = (int)x & 0x0000ffff;
1465   int part12 = (int)((unsigned long)x >> 32);
1466   int part34 = (int)x;
1467 
1468   // Lower word only (unsigned).
1469   if (part12 == 0) {
1470     if (part3 == 0) {
1471       if (emit) z_llill(t, part4);
1472       return 4;
1473     }
1474     if (part4 == 0) {
1475       if (emit) z_llilh(t, part3);
1476       return 4;
1477     }
1478     if (emit) z_llilf(t, part34);
1479     return 6;
1480   }
1481 
1482   // Upper word only.
1483   if (part34 == 0) {
1484     if (part1 == 0) {
1485       if (emit) z_llihl(t, part2);
1486       return 4;
1487     }
1488     if (part2 == 0) {
1489       if (emit) z_llihh(t, part1);
1490       return 4;
1491     }
1492     if (emit) z_llihf(t, part12);
1493     return 6;
1494   }
1495 
1496   // Lower word only (signed).
1497   if ((part1 == 0x0000ffff) && (part2 == 0x0000ffff) && ((part3 & 0x00008000) != 0)) {
1498     if (emit) z_lgfi(t, part34);
1499     return 6;
1500   }
1501 
1502   int len = 0;
1503 
1504   if ((part1 == 0) || (part2 == 0)) {
1505     if (part1 == 0) {
1506       if (emit) z_llihl(t, part2);
1507       len += 4;
1508     } else {
1509       if (emit) z_llihh(t, part1);
1510       len += 4;
1511     }
1512   } else {
1513     if (emit) z_llihf(t, part12);
1514     len += 6;
1515   }
1516 
1517   if ((part3 == 0) || (part4 == 0)) {
1518     if (part3 == 0) {
1519       if (emit) z_iill(t, part4);
1520       len += 4;
1521     } else {
1522       if (emit) z_iilh(t, part3);
1523       len += 4;
1524     }
1525   } else {
1526     if (emit) z_iilf(t, part34);
1527     len += 6;
1528   }
1529   return len;
1530 }
1531 
1532 //=====================================================================
1533 //===     H I G H E R   L E V E L   B R A N C H   E M I T T E R S   ===
1534 //=====================================================================
1535 
1536 // Note: In the worst case, one of the scratch registers is destroyed!!!
1537 void MacroAssembler::compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1538   // Right operand is constant.
1539   if (x2.is_constant()) {
1540     jlong value = x2.as_constant();
1541     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/true);
1542     return;
1543   }
1544 
1545   // Right operand is in register.
1546   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/true);
1547 }
1548 
1549 // Note: In the worst case, one of the scratch registers is destroyed!!!
1550 void MacroAssembler::compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1551   // Right operand is constant.
1552   if (x2.is_constant()) {
1553     jlong value = x2.as_constant();
1554     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/false);
1555     return;
1556   }
1557 
1558   // Right operand is in register.
1559   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/false);
1560 }
1561 
1562 // Note: In the worst case, one of the scratch registers is destroyed!!!
1563 void MacroAssembler::compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1564   // Right operand is constant.
1565   if (x2.is_constant()) {
1566     jlong value = x2.as_constant();
1567     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/true);
1568     return;
1569   }
1570 
1571   // Right operand is in register.
1572   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/true);
1573 }
1574 
1575 void MacroAssembler::compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1576   // Right operand is constant.
1577   if (x2.is_constant()) {
1578     jlong value = x2.as_constant();
1579     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/false);
1580     return;
1581   }
1582 
1583   // Right operand is in register.
1584   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/false);
1585 }
1586 
1587 // Generate an optimal branch to the branch target.
1588 // Optimal means that a relative branch (brc or brcl) is used if the
1589 // branch distance is short enough. Loading the target address into a
1590 // register and branching via reg is used as fallback only.
1591 //
1592 // Used registers:
1593 //   Z_R1 - work reg. Holds branch target address.
1594 //          Used in fallback case only.
1595 //
1596 // This version of branch_optimized is good for cases where the target address is known
1597 // and constant, i.e. is never changed (no relocation, no patching).
1598 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, address branch_addr) {
1599   address branch_origin = pc();
1600 
1601   if (RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) {
1602     z_brc(cond, branch_addr);
1603   } else if (RelAddr::is_in_range_of_RelAddr32(branch_addr, branch_origin)) {
1604     z_brcl(cond, branch_addr);
1605   } else {
1606     load_const_optimized(Z_R1, branch_addr);  // CC must not get killed by load_const_optimized.
1607     z_bcr(cond, Z_R1);
1608   }
1609 }
1610 
1611 // This version of branch_optimized is good for cases where the target address
1612 // is potentially not yet known at the time the code is emitted.
1613 //
1614 // One very common case is a branch to an unbound label which is handled here.
1615 // The caller might know (or hope) that the branch distance is short enough
1616 // to be encoded in a 16bit relative address. In this case he will pass a
1617 // NearLabel branch_target.
1618 // Care must be taken with unbound labels. Each call to target(label) creates
1619 // an entry in the patch queue for that label to patch all references of the label
1620 // once it gets bound. Those recorded patch locations must be patchable. Otherwise,
1621 // an assertion fires at patch time.
1622 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& branch_target) {
1623   if (branch_target.is_bound()) {
1624     address branch_addr = target(branch_target);
1625     branch_optimized(cond, branch_addr);
1626   } else if (branch_target.is_near()) {
1627     z_brc(cond, branch_target);  // Caller assures that the target will be in range for z_brc.
1628   } else {
1629     z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time.
1630   }
1631 }
1632 
1633 // Generate an optimal compare and branch to the branch target.
1634 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the
1635 // branch distance is short enough. Loading the target address into a
1636 // register and branching via reg is used as fallback only.
1637 //
1638 // Input:
1639 //   r1 - left compare operand
1640 //   r2 - right compare operand
1641 void MacroAssembler::compare_and_branch_optimized(Register r1,
1642                                                   Register r2,
1643                                                   Assembler::branch_condition cond,
1644                                                   address  branch_addr,
1645                                                   bool     len64,
1646                                                   bool     has_sign) {
1647   unsigned int casenum = (len64?2:0)+(has_sign?0:1);
1648 
1649   address branch_origin = pc();
1650   if (VM_Version::has_CompareBranch() && RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) {
1651     switch (casenum) {
1652       case 0: z_crj( r1, r2, cond, branch_addr); break;
1653       case 1: z_clrj (r1, r2, cond, branch_addr); break;
1654       case 2: z_cgrj(r1, r2, cond, branch_addr); break;
1655       case 3: z_clgrj(r1, r2, cond, branch_addr); break;
1656       default: ShouldNotReachHere(); break;
1657     }
1658   } else {
1659     switch (casenum) {
1660       case 0: z_cr( r1, r2); break;
1661       case 1: z_clr(r1, r2); break;
1662       case 2: z_cgr(r1, r2); break;
1663       case 3: z_clgr(r1, r2); break;
1664       default: ShouldNotReachHere(); break;
1665     }
1666     branch_optimized(cond, branch_addr);
1667   }
1668 }
1669 
1670 // Generate an optimal compare and branch to the branch target.
1671 // Optimal means that a relative branch (clgij, brc or brcl) is used if the
1672 // branch distance is short enough. Loading the target address into a
1673 // register and branching via reg is used as fallback only.
1674 //
1675 // Input:
1676 //   r1 - left compare operand (in register)
1677 //   x2 - right compare operand (immediate)
1678 void MacroAssembler::compare_and_branch_optimized(Register r1,
1679                                                   jlong    x2,
1680                                                   Assembler::branch_condition cond,
1681                                                   Label&   branch_target,
1682                                                   bool     len64,
1683                                                   bool     has_sign) {
1684   address      branch_origin = pc();
1685   bool         x2_imm8       = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2));
1686   bool         is_RelAddr16  = branch_target.is_near() ||
1687                                (branch_target.is_bound() &&
1688                                 RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin));
1689   unsigned int casenum       = (len64?2:0)+(has_sign?0:1);
1690 
1691   if (VM_Version::has_CompareBranch() && is_RelAddr16 && x2_imm8) {
1692     switch (casenum) {
1693       case 0: z_cij( r1, x2, cond, branch_target); break;
1694       case 1: z_clij(r1, x2, cond, branch_target); break;
1695       case 2: z_cgij(r1, x2, cond, branch_target); break;
1696       case 3: z_clgij(r1, x2, cond, branch_target); break;
1697       default: ShouldNotReachHere(); break;
1698     }
1699     return;
1700   }
1701 
1702   if (x2 == 0) {
1703     switch (casenum) {
1704       case 0: z_ltr(r1, r1); break;
1705       case 1: z_ltr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication!
1706       case 2: z_ltgr(r1, r1); break;
1707       case 3: z_ltgr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication!
1708       default: ShouldNotReachHere(); break;
1709     }
1710   } else {
1711     if ((has_sign && Immediate::is_simm16(x2)) || (!has_sign && Immediate::is_uimm(x2, 15))) {
1712       switch (casenum) {
1713         case 0: z_chi(r1, x2); break;
1714         case 1: z_chi(r1, x2); break; // positive immediate < 2**15
1715         case 2: z_cghi(r1, x2); break;
1716         case 3: z_cghi(r1, x2); break; // positive immediate < 2**15
1717         default: break;
1718       }
1719     } else if ( (has_sign && Immediate::is_simm32(x2)) || (!has_sign && Immediate::is_uimm32(x2)) ) {
1720       switch (casenum) {
1721         case 0: z_cfi( r1, x2); break;
1722         case 1: z_clfi(r1, x2); break;
1723         case 2: z_cgfi(r1, x2); break;
1724         case 3: z_clgfi(r1, x2); break;
1725         default: ShouldNotReachHere(); break;
1726       }
1727     } else {
1728       // No instruction with immediate operand possible, so load into register.
1729       Register scratch = (r1 != Z_R0) ? Z_R0 : Z_R1;
1730       load_const_optimized(scratch, x2);
1731       switch (casenum) {
1732         case 0: z_cr( r1, scratch); break;
1733         case 1: z_clr(r1, scratch); break;
1734         case 2: z_cgr(r1, scratch); break;
1735         case 3: z_clgr(r1, scratch); break;
1736         default: ShouldNotReachHere(); break;
1737       }
1738     }
1739   }
1740   branch_optimized(cond, branch_target);
1741 }
1742 
1743 // Generate an optimal compare and branch to the branch target.
1744 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the
1745 // branch distance is short enough. Loading the target address into a
1746 // register and branching via reg is used as fallback only.
1747 //
1748 // Input:
1749 //   r1 - left compare operand
1750 //   r2 - right compare operand
1751 void MacroAssembler::compare_and_branch_optimized(Register r1,
1752                                                   Register r2,
1753                                                   Assembler::branch_condition cond,
1754                                                   Label&   branch_target,
1755                                                   bool     len64,
1756                                                   bool     has_sign) {
1757   unsigned int casenum = (len64 ? 2 : 0) + (has_sign ? 0 : 1);
1758 
1759   if (branch_target.is_bound()) {
1760     address branch_addr = target(branch_target);
1761     compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign);
1762   } else {
1763     if (VM_Version::has_CompareBranch() && branch_target.is_near()) {
1764       switch (casenum) {
1765         case 0: z_crj(  r1, r2, cond, branch_target); break;
1766         case 1: z_clrj( r1, r2, cond, branch_target); break;
1767         case 2: z_cgrj( r1, r2, cond, branch_target); break;
1768         case 3: z_clgrj(r1, r2, cond, branch_target); break;
1769         default: ShouldNotReachHere(); break;
1770       }
1771     } else {
1772       switch (casenum) {
1773         case 0: z_cr( r1, r2); break;
1774         case 1: z_clr(r1, r2); break;
1775         case 2: z_cgr(r1, r2); break;
1776         case 3: z_clgr(r1, r2); break;
1777         default: ShouldNotReachHere(); break;
1778       }
1779       branch_optimized(cond, branch_target);
1780     }
1781   }
1782 }
1783 
1784 //===========================================================================
1785 //===   END     H I G H E R   L E V E L   B R A N C H   E M I T T E R S   ===
1786 //===========================================================================
1787 
1788 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
1789   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1790   int index = oop_recorder()->allocate_metadata_index(obj);
1791   RelocationHolder rspec = metadata_Relocation::spec(index);
1792   return AddressLiteral((address)obj, rspec);
1793 }
1794 
1795 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
1796   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1797   int index = oop_recorder()->find_index(obj);
1798   RelocationHolder rspec = metadata_Relocation::spec(index);
1799   return AddressLiteral((address)obj, rspec);
1800 }
1801 
1802 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
1803   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1804   int oop_index = oop_recorder()->allocate_oop_index(obj);
1805   return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
1806 }
1807 
1808 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
1809   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1810   int oop_index = oop_recorder()->find_index(obj);
1811   return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
1812 }
1813 
1814 // NOTE: destroys r
1815 void MacroAssembler::c2bool(Register r, Register t) {
1816   z_lcr(t, r);   // t = -r
1817   z_or(r, t);    // r = -r OR r
1818   z_srl(r, 31);  // Yields 0 if r was 0, 1 otherwise.
1819 }
1820 
1821 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
1822                                                       Register tmp,
1823                                                       int offset) {
1824   intptr_t value = *delayed_value_addr;
1825   if (value != 0) {
1826     return RegisterOrConstant(value + offset);
1827   }
1828 
1829   BLOCK_COMMENT("delayed_value {");
1830   // Load indirectly to solve generation ordering problem.
1831   load_absolute_address(tmp, (address) delayed_value_addr); // tmp = a;
1832   z_lg(tmp, 0, tmp);                   // tmp = *tmp;
1833 
1834 #ifdef ASSERT
1835   NearLabel L;
1836   compare64_and_branch(tmp, (intptr_t)0L, Assembler::bcondNotEqual, L);
1837   z_illtrap();
1838   bind(L);
1839 #endif
1840 
1841   if (offset != 0) {
1842     z_agfi(tmp, offset);               // tmp = tmp + offset;
1843   }
1844 
1845   BLOCK_COMMENT("} delayed_value");
1846   return RegisterOrConstant(tmp);
1847 }
1848 
1849 // Patch instruction `inst' at offset `inst_pos' to refer to `dest_pos'
1850 // and return the resulting instruction.
1851 // Dest_pos and inst_pos are 32 bit only. These parms can only designate
1852 // relative positions.
1853 // Use correct argument types. Do not pre-calculate distance.
1854 unsigned long MacroAssembler::patched_branch(address dest_pos, unsigned long inst, address inst_pos) {
1855   int c = 0;
1856   unsigned long patched_inst = 0;
1857   if (is_call_pcrelative_short(inst) ||
1858       is_branch_pcrelative_short(inst) ||
1859       is_branchoncount_pcrelative_short(inst) ||
1860       is_branchonindex32_pcrelative_short(inst)) {
1861     c = 1;
1862     int m = fmask(15, 0);    // simm16(-1, 16, 32);
1863     int v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 32);
1864     patched_inst = (inst & ~m) | v;
1865   } else if (is_compareandbranch_pcrelative_short(inst)) {
1866     c = 2;
1867     long m = fmask(31, 16);  // simm16(-1, 16, 48);
1868     long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48);
1869     patched_inst = (inst & ~m) | v;
1870   } else if (is_branchonindex64_pcrelative_short(inst)) {
1871     c = 3;
1872     long m = fmask(31, 16);  // simm16(-1, 16, 48);
1873     long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48);
1874     patched_inst = (inst & ~m) | v;
1875   } else if (is_call_pcrelative_long(inst) || is_branch_pcrelative_long(inst)) {
1876     c = 4;
1877     long m = fmask(31, 0);  // simm32(-1, 16, 48);
1878     long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48);
1879     patched_inst = (inst & ~m) | v;
1880   } else if (is_pcrelative_long(inst)) { // These are the non-branch pc-relative instructions.
1881     c = 5;
1882     long m = fmask(31, 0);  // simm32(-1, 16, 48);
1883     long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48);
1884     patched_inst = (inst & ~m) | v;
1885   } else {
1886     print_dbg_msg(tty, inst, "not a relative branch", 0);
1887     dump_code_range(tty, inst_pos, 32, "not a pcrelative branch");
1888     ShouldNotReachHere();
1889   }
1890 
1891   long new_off = get_pcrel_offset(patched_inst);
1892   if (new_off != (dest_pos-inst_pos)) {
1893     tty->print_cr("case %d: dest_pos = %p, inst_pos = %p, disp = %ld(%12.12lx)", c, dest_pos, inst_pos, new_off, new_off);
1894     print_dbg_msg(tty, inst,         "<- original instruction: branch patching error", 0);
1895     print_dbg_msg(tty, patched_inst, "<- patched  instruction: branch patching error", 0);
1896 #ifdef LUCY_DBG
1897     VM_Version::z_SIGSEGV();
1898 #endif
1899     ShouldNotReachHere();
1900   }
1901   return patched_inst;
1902 }
1903 
1904 // Only called when binding labels (share/vm/asm/assembler.cpp)
1905 // Pass arguments as intended. Do not pre-calculate distance.
1906 void MacroAssembler::pd_patch_instruction(address branch, address target, const char* file, int line) {
1907   unsigned long stub_inst;
1908   int           inst_len = get_instruction(branch, &stub_inst);
1909 
1910   set_instruction(branch, patched_branch(target, stub_inst, branch), inst_len);
1911 }
1912 
1913 
1914 // Extract relative address (aka offset).
1915 // inv_simm16 works for 4-byte instructions only.
1916 // compare and branch instructions are 6-byte and have a 16bit offset "in the middle".
1917 long MacroAssembler::get_pcrel_offset(unsigned long inst) {
1918 
1919   if (MacroAssembler::is_pcrelative_short(inst)) {
1920     if (((inst&0xFFFFffff00000000UL) == 0) && ((inst&0x00000000FFFF0000UL) != 0)) {
1921       return RelAddr::inv_pcrel_off16(inv_simm16(inst));
1922     } else {
1923       return RelAddr::inv_pcrel_off16(inv_simm16_48(inst));
1924     }
1925   }
1926 
1927   if (MacroAssembler::is_pcrelative_long(inst)) {
1928     return RelAddr::inv_pcrel_off32(inv_simm32(inst));
1929   }
1930 
1931   print_dbg_msg(tty, inst, "not a pcrelative instruction", 6);
1932 #ifdef LUCY_DBG
1933   VM_Version::z_SIGSEGV();
1934 #else
1935   ShouldNotReachHere();
1936 #endif
1937   return -1;
1938 }
1939 
1940 long MacroAssembler::get_pcrel_offset(address pc) {
1941   unsigned long inst;
1942   unsigned int  len = get_instruction(pc, &inst);
1943 
1944 #ifdef ASSERT
1945   long offset;
1946   if (MacroAssembler::is_pcrelative_short(inst) || MacroAssembler::is_pcrelative_long(inst)) {
1947     offset = get_pcrel_offset(inst);
1948   } else {
1949     offset = -1;
1950   }
1951 
1952   if (offset == -1) {
1953     dump_code_range(tty, pc, 32, "not a pcrelative instruction");
1954 #ifdef LUCY_DBG
1955     VM_Version::z_SIGSEGV();
1956 #else
1957     ShouldNotReachHere();
1958 #endif
1959   }
1960   return offset;
1961 #else
1962   return get_pcrel_offset(inst);
1963 #endif // ASSERT
1964 }
1965 
1966 // Get target address from pc-relative instructions.
1967 address MacroAssembler::get_target_addr_pcrel(address pc) {
1968   assert(is_pcrelative_long(pc), "not a pcrelative instruction");
1969   return pc + get_pcrel_offset(pc);
1970 }
1971 
1972 // Patch pc relative load address.
1973 void MacroAssembler::patch_target_addr_pcrel(address pc, address con) {
1974   unsigned long inst;
1975   // Offset is +/- 2**32 -> use long.
1976   ptrdiff_t distance = con - pc;
1977 
1978   get_instruction(pc, &inst);
1979 
1980   if (is_pcrelative_short(inst)) {
1981     *(short *)(pc+2) = RelAddr::pcrel_off16(con, pc);  // Instructions are at least 2-byte aligned, no test required.
1982 
1983     // Some extra safety net.
1984     if (!RelAddr::is_in_range_of_RelAddr16(distance)) {
1985       print_dbg_msg(tty, inst, "distance out of range (16bit)", 4);
1986       dump_code_range(tty, pc, 32, "distance out of range (16bit)");
1987       guarantee(RelAddr::is_in_range_of_RelAddr16(distance), "too far away (more than +/- 2**16");
1988     }
1989     return;
1990   }
1991 
1992   if (is_pcrelative_long(inst)) {
1993     *(int *)(pc+2)   = RelAddr::pcrel_off32(con, pc);
1994 
1995     // Some Extra safety net.
1996     if (!RelAddr::is_in_range_of_RelAddr32(distance)) {
1997       print_dbg_msg(tty, inst, "distance out of range (32bit)", 6);
1998       dump_code_range(tty, pc, 32, "distance out of range (32bit)");
1999       guarantee(RelAddr::is_in_range_of_RelAddr32(distance), "too far away (more than +/- 2**32");
2000     }
2001     return;
2002   }
2003 
2004   guarantee(false, "not a pcrelative instruction to patch!");
2005 }
2006 
2007 // "Current PC" here means the address just behind the basr instruction.
2008 address MacroAssembler::get_PC(Register result) {
2009   z_basr(result, Z_R0); // Don't branch, just save next instruction address in result.
2010   return pc();
2011 }
2012 
2013 // Get current PC + offset.
2014 // Offset given in bytes, must be even!
2015 // "Current PC" here means the address of the larl instruction plus the given offset.
2016 address MacroAssembler::get_PC(Register result, int64_t offset) {
2017   address here = pc();
2018   z_larl(result, offset/2); // Save target instruction address in result.
2019   return here + offset;
2020 }
2021 
2022 void MacroAssembler::instr_size(Register size, Register pc) {
2023   // Extract 2 most significant bits of current instruction.
2024   z_llgc(size, Address(pc));
2025   z_srl(size, 6);
2026   // Compute (x+3)&6 which translates 0->2, 1->4, 2->4, 3->6.
2027   z_ahi(size, 3);
2028   z_nill(size, 6);
2029 }
2030 
2031 // Resize_frame with SP(new) = SP(old) - [offset].
2032 void MacroAssembler::resize_frame_sub(Register offset, Register fp, bool load_fp)
2033 {
2034   assert_different_registers(offset, fp, Z_SP);
2035   if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); }
2036 
2037   z_sgr(Z_SP, offset);
2038   z_stg(fp, _z_abi(callers_sp), Z_SP);
2039 }
2040 
2041 // Resize_frame with SP(new) = [newSP] + offset.
2042 //   This emitter is useful if we already have calculated a pointer
2043 //   into the to-be-allocated stack space, e.g. with special alignment properties,
2044 //   but need some additional space, e.g. for spilling.
2045 //   newSP    is the pre-calculated pointer. It must not be modified.
2046 //   fp       holds, or is filled with, the frame pointer.
2047 //   offset   is the additional increment which is added to addr to form the new SP.
2048 //            Note: specify a negative value to reserve more space!
2049 //   load_fp == true  only indicates that fp is not pre-filled with the frame pointer.
2050 //                    It does not guarantee that fp contains the frame pointer at the end.
2051 void MacroAssembler::resize_frame_abs_with_offset(Register newSP, Register fp, int offset, bool load_fp) {
2052   assert_different_registers(newSP, fp, Z_SP);
2053 
2054   if (load_fp) {
2055     z_lg(fp, _z_abi(callers_sp), Z_SP);
2056   }
2057 
2058   add2reg(Z_SP, offset, newSP);
2059   z_stg(fp, _z_abi(callers_sp), Z_SP);
2060 }
2061 
2062 // Resize_frame with SP(new) = [newSP].
2063 //   load_fp == true  only indicates that fp is not pre-filled with the frame pointer.
2064 //                    It does not guarantee that fp contains the frame pointer at the end.
2065 void MacroAssembler::resize_frame_absolute(Register newSP, Register fp, bool load_fp) {
2066   assert_different_registers(newSP, fp, Z_SP);
2067 
2068   if (load_fp) {
2069     z_lg(fp, _z_abi(callers_sp), Z_SP); // need to use load/store.
2070   }
2071 
2072   z_lgr(Z_SP, newSP);
2073   if (newSP != Z_R0) { // make sure we generate correct code, no matter what register newSP uses.
2074     z_stg(fp, _z_abi(callers_sp), newSP);
2075   } else {
2076     z_stg(fp, _z_abi(callers_sp), Z_SP);
2077   }
2078 }
2079 
2080 // Resize_frame with SP(new) = SP(old) + offset.
2081 void MacroAssembler::resize_frame(RegisterOrConstant offset, Register fp, bool load_fp) {
2082   assert_different_registers(fp, Z_SP);
2083 
2084   if (load_fp) {
2085     z_lg(fp, _z_abi(callers_sp), Z_SP);
2086   }
2087   add64(Z_SP, offset);
2088   z_stg(fp, _z_abi(callers_sp), Z_SP);
2089 }
2090 
2091 void MacroAssembler::push_frame(Register bytes, Register old_sp, bool copy_sp, bool bytes_with_inverted_sign) {
2092 #ifdef ASSERT
2093   assert_different_registers(bytes, old_sp, Z_SP);
2094   if (!copy_sp) {
2095     z_cgr(old_sp, Z_SP);
2096     asm_assert_eq("[old_sp]!=[Z_SP]", 0x211);
2097   }
2098 #endif
2099   if (copy_sp) { z_lgr(old_sp, Z_SP); }
2100   if (bytes_with_inverted_sign) {
2101     z_agr(Z_SP, bytes);
2102   } else {
2103     z_sgr(Z_SP, bytes); // Z_sgfr sufficient, but probably not faster.
2104   }
2105   z_stg(old_sp, _z_abi(callers_sp), Z_SP);
2106 }
2107 
2108 unsigned int MacroAssembler::push_frame(unsigned int bytes, Register scratch) {
2109   long offset = Assembler::align(bytes, frame::alignment_in_bytes);
2110   assert(offset > 0, "should push a frame with positive size, size = %ld.", offset);
2111   assert(Displacement::is_validDisp(-offset), "frame size out of range, size = %ld", offset);
2112 
2113   // We must not write outside the current stack bounds (given by Z_SP).
2114   // Thus, we have to first update Z_SP and then store the previous SP as stack linkage.
2115   // We rely on Z_R0 by default to be available as scratch.
2116   z_lgr(scratch, Z_SP);
2117   add2reg(Z_SP, -offset);
2118   z_stg(scratch, _z_abi(callers_sp), Z_SP);
2119 #ifdef ASSERT
2120   // Just make sure nobody uses the value in the default scratch register.
2121   // When another register is used, the caller might rely on it containing the frame pointer.
2122   if (scratch == Z_R0) {
2123     z_iihf(scratch, 0xbaadbabe);
2124     z_iilf(scratch, 0xdeadbeef);
2125   }
2126 #endif
2127   return offset;
2128 }
2129 
2130 // Push a frame of size `bytes' plus abi160 on top.
2131 unsigned int MacroAssembler::push_frame_abi160(unsigned int bytes) {
2132   BLOCK_COMMENT("push_frame_abi160 {");
2133   unsigned int res = push_frame(bytes + frame::z_abi_160_size);
2134   BLOCK_COMMENT("} push_frame_abi160");
2135   return res;
2136 }
2137 
2138 // Pop current C frame.
2139 void MacroAssembler::pop_frame() {
2140   BLOCK_COMMENT("pop_frame:");
2141   Assembler::z_lg(Z_SP, _z_abi(callers_sp), Z_SP);
2142 }
2143 
2144 // Pop current C frame and restore return PC register (Z_R14).
2145 void MacroAssembler::pop_frame_restore_retPC(int frame_size_in_bytes) {
2146   BLOCK_COMMENT("pop_frame_restore_retPC:");
2147   int retPC_offset = _z_abi16(return_pc) + frame_size_in_bytes;
2148   // If possible, pop frame by add instead of load (a penny saved is a penny got :-).
2149   if (Displacement::is_validDisp(retPC_offset)) {
2150     z_lg(Z_R14, retPC_offset, Z_SP);
2151     add2reg(Z_SP, frame_size_in_bytes);
2152   } else {
2153     add2reg(Z_SP, frame_size_in_bytes);
2154     restore_return_pc();
2155   }
2156 }
2157 
2158 void MacroAssembler::call_VM_leaf_base(address entry_point, bool allow_relocation) {
2159   if (allow_relocation) {
2160     call_c(entry_point);
2161   } else {
2162     call_c_static(entry_point);
2163   }
2164 }
2165 
2166 void MacroAssembler::call_VM_leaf_base(address entry_point) {
2167   bool allow_relocation = true;
2168   call_VM_leaf_base(entry_point, allow_relocation);
2169 }
2170 
2171 void MacroAssembler::call_VM_base(Register oop_result,
2172                                   Register last_java_sp,
2173                                   address  entry_point,
2174                                   bool     allow_relocation,
2175                                   bool     check_exceptions) { // Defaults to true.
2176   // Allow_relocation indicates, if true, that the generated code shall
2177   // be fit for code relocation or referenced data relocation. In other
2178   // words: all addresses must be considered variable. PC-relative addressing
2179   // is not possible then.
2180   // On the other hand, if (allow_relocation == false), addresses and offsets
2181   // may be considered stable, enabling us to take advantage of some PC-relative
2182   // addressing tweaks. These might improve performance and reduce code size.
2183 
2184   // Determine last_java_sp register.
2185   if (!last_java_sp->is_valid()) {
2186     last_java_sp = Z_SP;  // Load Z_SP as SP.
2187   }
2188 
2189   set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, Z_R1, allow_relocation);
2190 
2191   // ARG1 must hold thread address.
2192   z_lgr(Z_ARG1, Z_thread);
2193 
2194   address return_pc = NULL;
2195   if (allow_relocation) {
2196     return_pc = call_c(entry_point);
2197   } else {
2198     return_pc = call_c_static(entry_point);
2199   }
2200 
2201   reset_last_Java_frame(allow_relocation);
2202 
2203   // C++ interp handles this in the interpreter.
2204   check_and_handle_popframe(Z_thread);
2205   check_and_handle_earlyret(Z_thread);
2206 
2207   // Check for pending exceptions.
2208   if (check_exceptions) {
2209     // Check for pending exceptions (java_thread is set upon return).
2210     load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset()));
2211 
2212     // This used to conditionally jump to forward_exception however it is
2213     // possible if we relocate that the branch will not reach. So we must jump
2214     // around so we can always reach.
2215 
2216     Label ok;
2217     z_bre(ok); // Bcondequal is the same as bcondZero.
2218     call_stub(StubRoutines::forward_exception_entry());
2219     bind(ok);
2220   }
2221 
2222   // Get oop result if there is one and reset the value in the thread.
2223   if (oop_result->is_valid()) {
2224     get_vm_result(oop_result);
2225   }
2226 
2227   _last_calls_return_pc = return_pc;  // Wipe out other (error handling) calls.
2228 }
2229 
2230 void MacroAssembler::call_VM_base(Register oop_result,
2231                                   Register last_java_sp,
2232                                   address  entry_point,
2233                                   bool     check_exceptions) { // Defaults to true.
2234   bool allow_relocation = true;
2235   call_VM_base(oop_result, last_java_sp, entry_point, allow_relocation, check_exceptions);
2236 }
2237 
2238 // VM calls without explicit last_java_sp.
2239 
2240 void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
2241   // Call takes possible detour via InterpreterMacroAssembler.
2242   call_VM_base(oop_result, noreg, entry_point, true, check_exceptions);
2243 }
2244 
2245 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
2246   // Z_ARG1 is reserved for the thread.
2247   lgr_if_needed(Z_ARG2, arg_1);
2248   call_VM(oop_result, entry_point, check_exceptions);
2249 }
2250 
2251 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
2252   // Z_ARG1 is reserved for the thread.
2253   lgr_if_needed(Z_ARG2, arg_1);
2254   assert(arg_2 != Z_ARG2, "smashed argument");
2255   lgr_if_needed(Z_ARG3, arg_2);
2256   call_VM(oop_result, entry_point, check_exceptions);
2257 }
2258 
2259 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2,
2260                              Register arg_3, bool check_exceptions) {
2261   // Z_ARG1 is reserved for the thread.
2262   lgr_if_needed(Z_ARG2, arg_1);
2263   assert(arg_2 != Z_ARG2, "smashed argument");
2264   lgr_if_needed(Z_ARG3, arg_2);
2265   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2266   lgr_if_needed(Z_ARG4, arg_3);
2267   call_VM(oop_result, entry_point, check_exceptions);
2268 }
2269 
2270 // VM static calls without explicit last_java_sp.
2271 
2272 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, bool check_exceptions) {
2273   // Call takes possible detour via InterpreterMacroAssembler.
2274   call_VM_base(oop_result, noreg, entry_point, false, check_exceptions);
2275 }
2276 
2277 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2,
2278                                     Register arg_3, bool check_exceptions) {
2279   // Z_ARG1 is reserved for the thread.
2280   lgr_if_needed(Z_ARG2, arg_1);
2281   assert(arg_2 != Z_ARG2, "smashed argument");
2282   lgr_if_needed(Z_ARG3, arg_2);
2283   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2284   lgr_if_needed(Z_ARG4, arg_3);
2285   call_VM_static(oop_result, entry_point, check_exceptions);
2286 }
2287 
2288 // VM calls with explicit last_java_sp.
2289 
2290 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions) {
2291   // Call takes possible detour via InterpreterMacroAssembler.
2292   call_VM_base(oop_result, last_java_sp, entry_point, true, check_exceptions);
2293 }
2294 
2295 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
2296    // Z_ARG1 is reserved for the thread.
2297    lgr_if_needed(Z_ARG2, arg_1);
2298    call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2299 }
2300 
2301 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1,
2302                              Register arg_2, bool check_exceptions) {
2303    // Z_ARG1 is reserved for the thread.
2304    lgr_if_needed(Z_ARG2, arg_1);
2305    assert(arg_2 != Z_ARG2, "smashed argument");
2306    lgr_if_needed(Z_ARG3, arg_2);
2307    call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2308 }
2309 
2310 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1,
2311                              Register arg_2, Register arg_3, bool check_exceptions) {
2312   // Z_ARG1 is reserved for the thread.
2313   lgr_if_needed(Z_ARG2, arg_1);
2314   assert(arg_2 != Z_ARG2, "smashed argument");
2315   lgr_if_needed(Z_ARG3, arg_2);
2316   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2317   lgr_if_needed(Z_ARG4, arg_3);
2318   call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2319 }
2320 
2321 // VM leaf calls.
2322 
2323 void MacroAssembler::call_VM_leaf(address entry_point) {
2324   // Call takes possible detour via InterpreterMacroAssembler.
2325   call_VM_leaf_base(entry_point, true);
2326 }
2327 
2328 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {
2329   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2330   call_VM_leaf(entry_point);
2331 }
2332 
2333 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
2334   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2335   assert(arg_2 != Z_ARG1, "smashed argument");
2336   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2337   call_VM_leaf(entry_point);
2338 }
2339 
2340 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
2341   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2342   assert(arg_2 != Z_ARG1, "smashed argument");
2343   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2344   assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument");
2345   if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3);
2346   call_VM_leaf(entry_point);
2347 }
2348 
2349 // Static VM leaf calls.
2350 // Really static VM leaf calls are never patched.
2351 
2352 void MacroAssembler::call_VM_leaf_static(address entry_point) {
2353   // Call takes possible detour via InterpreterMacroAssembler.
2354   call_VM_leaf_base(entry_point, false);
2355 }
2356 
2357 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1) {
2358   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2359   call_VM_leaf_static(entry_point);
2360 }
2361 
2362 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2) {
2363   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2364   assert(arg_2 != Z_ARG1, "smashed argument");
2365   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2366   call_VM_leaf_static(entry_point);
2367 }
2368 
2369 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
2370   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2371   assert(arg_2 != Z_ARG1, "smashed argument");
2372   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2373   assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument");
2374   if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3);
2375   call_VM_leaf_static(entry_point);
2376 }
2377 
2378 // Don't use detour via call_c(reg).
2379 address MacroAssembler::call_c(address function_entry) {
2380   load_const(Z_R1, function_entry);
2381   return call(Z_R1);
2382 }
2383 
2384 // Variant for really static (non-relocatable) calls which are never patched.
2385 address MacroAssembler::call_c_static(address function_entry) {
2386   load_absolute_address(Z_R1, function_entry);
2387 #if 0 // def ASSERT
2388   // Verify that call site did not move.
2389   load_const_optimized(Z_R0, function_entry);
2390   z_cgr(Z_R1, Z_R0);
2391   z_brc(bcondEqual, 3);
2392   z_illtrap(0xba);
2393 #endif
2394   return call(Z_R1);
2395 }
2396 
2397 address MacroAssembler::call_c_opt(address function_entry) {
2398   bool success = call_far_patchable(function_entry, -2 /* emit relocation + constant */);
2399   _last_calls_return_pc = success ? pc() : NULL;
2400   return _last_calls_return_pc;
2401 }
2402 
2403 // Identify a call_far_patchable instruction: LARL + LG + BASR
2404 //
2405 //    nop                   ; optionally, if required for alignment
2406 //    lgrl rx,A(TOC entry)  ; PC-relative access into constant pool
2407 //    basr Z_R14,rx         ; end of this instruction must be aligned to a word boundary
2408 //
2409 // Code pattern will eventually get patched into variant2 (see below for detection code).
2410 //
2411 bool MacroAssembler::is_call_far_patchable_variant0_at(address instruction_addr) {
2412   address iaddr = instruction_addr;
2413 
2414   // Check for the actual load instruction.
2415   if (!is_load_const_from_toc(iaddr)) { return false; }
2416   iaddr += load_const_from_toc_size();
2417 
2418   // Check for the call (BASR) instruction, finally.
2419   assert(iaddr-instruction_addr+call_byregister_size() == call_far_patchable_size(), "size mismatch");
2420   return is_call_byregister(iaddr);
2421 }
2422 
2423 // Identify a call_far_patchable instruction: BRASL
2424 //
2425 // Code pattern to suits atomic patching:
2426 //    nop                       ; Optionally, if required for alignment.
2427 //    nop    ...                ; Multiple filler nops to compensate for size difference (variant0 is longer).
2428 //    nop                       ; For code pattern detection: Prepend each BRASL with a nop.
2429 //    brasl  Z_R14,<reladdr>    ; End of code must be 4-byte aligned !
2430 bool MacroAssembler::is_call_far_patchable_variant2_at(address instruction_addr) {
2431   const address call_addr = (address)((intptr_t)instruction_addr + call_far_patchable_size() - call_far_pcrelative_size());
2432 
2433   // Check for correct number of leading nops.
2434   address iaddr;
2435   for (iaddr = instruction_addr; iaddr < call_addr; iaddr += nop_size()) {
2436     if (!is_z_nop(iaddr)) { return false; }
2437   }
2438   assert(iaddr == call_addr, "sanity");
2439 
2440   // --> Check for call instruction.
2441   if (is_call_far_pcrelative(call_addr)) {
2442     assert(call_addr-instruction_addr+call_far_pcrelative_size() == call_far_patchable_size(), "size mismatch");
2443     return true;
2444   }
2445 
2446   return false;
2447 }
2448 
2449 // Emit a NOT mt-safely patchable 64 bit absolute call.
2450 // If toc_offset == -2, then the destination of the call (= target) is emitted
2451 //                      to the constant pool and a runtime_call relocation is added
2452 //                      to the code buffer.
2453 // If toc_offset != -2, target must already be in the constant pool at
2454 //                      _ctableStart+toc_offset (a caller can retrieve toc_offset
2455 //                      from the runtime_call relocation).
2456 // Special handling of emitting to scratch buffer when there is no constant pool.
2457 // Slightly changed code pattern. We emit an additional nop if we would
2458 // not end emitting at a word aligned address. This is to ensure
2459 // an atomically patchable displacement in brasl instructions.
2460 //
2461 // A call_far_patchable comes in different flavors:
2462 //  - LARL(CP) / LG(CP) / BR (address in constant pool, access via CP register)
2463 //  - LGRL(CP) / BR          (address in constant pool, pc-relative accesss)
2464 //  - BRASL                  (relative address of call target coded in instruction)
2465 // All flavors occupy the same amount of space. Length differences are compensated
2466 // by leading nops, such that the instruction sequence always ends at the same
2467 // byte offset. This is required to keep the return offset constant.
2468 // Furthermore, the return address (the end of the instruction sequence) is forced
2469 // to be on a 4-byte boundary. This is required for atomic patching, should we ever
2470 // need to patch the call target of the BRASL flavor.
2471 // RETURN value: false, if no constant pool entry could be allocated, true otherwise.
2472 bool MacroAssembler::call_far_patchable(address target, int64_t tocOffset) {
2473   // Get current pc and ensure word alignment for end of instr sequence.
2474   const address start_pc = pc();
2475   const intptr_t       start_off = offset();
2476   assert(!call_far_patchable_requires_alignment_nop(start_pc), "call_far_patchable requires aligned address");
2477   const ptrdiff_t      dist      = (ptrdiff_t)(target - (start_pc + 2)); // Prepend each BRASL with a nop.
2478   const bool emit_target_to_pool = (tocOffset == -2) && !code_section()->scratch_emit();
2479   const bool emit_relative_call  = !emit_target_to_pool &&
2480                                    RelAddr::is_in_range_of_RelAddr32(dist) &&
2481                                    ReoptimizeCallSequences &&
2482                                    !code_section()->scratch_emit();
2483 
2484   if (emit_relative_call) {
2485     // Add padding to get the same size as below.
2486     const unsigned int padding = call_far_patchable_size() - call_far_pcrelative_size();
2487     unsigned int current_padding;
2488     for (current_padding = 0; current_padding < padding; current_padding += nop_size()) { z_nop(); }
2489     assert(current_padding == padding, "sanity");
2490 
2491     // relative call: len = 2(nop) + 6 (brasl)
2492     // CodeBlob resize cannot occur in this case because
2493     // this call is emitted into pre-existing space.
2494     z_nop(); // Prepend each BRASL with a nop.
2495     z_brasl(Z_R14, target);
2496   } else {
2497     // absolute call: Get address from TOC.
2498     // len = (load TOC){6|0} + (load from TOC){6} + (basr){2} = {14|8}
2499     if (emit_target_to_pool) {
2500       // When emitting the call for the first time, we do not need to use
2501       // the pc-relative version. It will be patched anyway, when the code
2502       // buffer is copied.
2503       // Relocation is not needed when !ReoptimizeCallSequences.
2504       relocInfo::relocType rt = ReoptimizeCallSequences ? relocInfo::runtime_call_w_cp_type : relocInfo::none;
2505       AddressLiteral dest(target, rt);
2506       // Store_oop_in_toc() adds dest to the constant table. As side effect, this kills
2507       // inst_mark(). Reset if possible.
2508       bool reset_mark = (inst_mark() == pc());
2509       tocOffset = store_oop_in_toc(dest);
2510       if (reset_mark) { set_inst_mark(); }
2511       if (tocOffset == -1) {
2512         return false; // Couldn't create constant pool entry.
2513       }
2514     }
2515     assert(offset() == start_off, "emit no code before this point!");
2516 
2517     address tocPos = pc() + tocOffset;
2518     if (emit_target_to_pool) {
2519       tocPos = code()->consts()->start() + tocOffset;
2520     }
2521     load_long_pcrelative(Z_R14, tocPos);
2522     z_basr(Z_R14, Z_R14);
2523   }
2524 
2525 #ifdef ASSERT
2526   // Assert that we can identify the emitted call.
2527   assert(is_call_far_patchable_at(addr_at(start_off)), "can't identify emitted call");
2528   assert(offset() == start_off+call_far_patchable_size(), "wrong size");
2529 
2530   if (emit_target_to_pool) {
2531     assert(get_dest_of_call_far_patchable_at(addr_at(start_off), code()->consts()->start()) == target,
2532            "wrong encoding of dest address");
2533   }
2534 #endif
2535   return true; // success
2536 }
2537 
2538 // Identify a call_far_patchable instruction.
2539 // For more detailed information see header comment of call_far_patchable.
2540 bool MacroAssembler::is_call_far_patchable_at(address instruction_addr) {
2541   return is_call_far_patchable_variant2_at(instruction_addr)  || // short version: BRASL
2542          is_call_far_patchable_variant0_at(instruction_addr);    // long version LARL + LG + BASR
2543 }
2544 
2545 // Does the call_far_patchable instruction use a pc-relative encoding
2546 // of the call destination?
2547 bool MacroAssembler::is_call_far_patchable_pcrelative_at(address instruction_addr) {
2548   // Variant 2 is pc-relative.
2549   return is_call_far_patchable_variant2_at(instruction_addr);
2550 }
2551 
2552 bool MacroAssembler::is_call_far_pcrelative(address instruction_addr) {
2553   // Prepend each BRASL with a nop.
2554   return is_z_nop(instruction_addr) && is_z_brasl(instruction_addr + nop_size());  // Match at position after one nop required.
2555 }
2556 
2557 // Set destination address of a call_far_patchable instruction.
2558 void MacroAssembler::set_dest_of_call_far_patchable_at(address instruction_addr, address dest, int64_t tocOffset) {
2559   ResourceMark rm;
2560 
2561   // Now that CP entry is verified, patch call to a pc-relative call (if circumstances permit).
2562   int code_size = MacroAssembler::call_far_patchable_size();
2563   CodeBuffer buf(instruction_addr, code_size);
2564   MacroAssembler masm(&buf);
2565   masm.call_far_patchable(dest, tocOffset);
2566   ICache::invalidate_range(instruction_addr, code_size); // Empty on z.
2567 }
2568 
2569 // Get dest address of a call_far_patchable instruction.
2570 address MacroAssembler::get_dest_of_call_far_patchable_at(address instruction_addr, address ctable) {
2571   // Dynamic TOC: absolute address in constant pool.
2572   // Check variant2 first, it is more frequent.
2573 
2574   // Relative address encoded in call instruction.
2575   if (is_call_far_patchable_variant2_at(instruction_addr)) {
2576     return MacroAssembler::get_target_addr_pcrel(instruction_addr + nop_size()); // Prepend each BRASL with a nop.
2577 
2578   // Absolute address in constant pool.
2579   } else if (is_call_far_patchable_variant0_at(instruction_addr)) {
2580     address iaddr = instruction_addr;
2581 
2582     long    tocOffset = get_load_const_from_toc_offset(iaddr);
2583     address tocLoc    = iaddr + tocOffset;
2584     return *(address *)(tocLoc);
2585   } else {
2586     fprintf(stderr, "MacroAssembler::get_dest_of_call_far_patchable_at has a problem at %p:\n", instruction_addr);
2587     fprintf(stderr, "not a call_far_patchable: %16.16lx %16.16lx, len = %d\n",
2588             *(unsigned long*)instruction_addr,
2589             *(unsigned long*)(instruction_addr+8),
2590             call_far_patchable_size());
2591     Disassembler::decode(instruction_addr, instruction_addr+call_far_patchable_size());
2592     ShouldNotReachHere();
2593     return NULL;
2594   }
2595 }
2596 
2597 void MacroAssembler::align_call_far_patchable(address pc) {
2598   if (call_far_patchable_requires_alignment_nop(pc)) { z_nop(); }
2599 }
2600 
2601 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
2602 }
2603 
2604 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
2605 }
2606 
2607 // Read from the polling page.
2608 // Use TM or TMY instruction, depending on read offset.
2609 //   offset = 0: Use TM, safepoint polling.
2610 //   offset < 0: Use TMY, profiling safepoint polling.
2611 void MacroAssembler::load_from_polling_page(Register polling_page_address, int64_t offset) {
2612   if (Immediate::is_uimm12(offset)) {
2613     z_tm(offset, polling_page_address, mask_safepoint);
2614   } else {
2615     z_tmy(offset, polling_page_address, mask_profiling);
2616   }
2617 }
2618 
2619 // Check whether z_instruction is a read access to the polling page
2620 // which was emitted by load_from_polling_page(..).
2621 bool MacroAssembler::is_load_from_polling_page(address instr_loc) {
2622   unsigned long z_instruction;
2623   unsigned int  ilen = get_instruction(instr_loc, &z_instruction);
2624 
2625   if (ilen == 2) { return false; } // It's none of the allowed instructions.
2626 
2627   if (ilen == 4) {
2628     if (!is_z_tm(z_instruction)) { return false; } // It's len=4, but not a z_tm. fail.
2629 
2630     int ms = inv_mask(z_instruction,8,32);  // mask
2631     int ra = inv_reg(z_instruction,16,32);  // base register
2632     int ds = inv_uimm12(z_instruction);     // displacement
2633 
2634     if (!(ds == 0 && ra != 0 && ms == mask_safepoint)) {
2635       return false; // It's not a z_tm(0, ra, mask_safepoint). Fail.
2636     }
2637 
2638   } else { /* if (ilen == 6) */
2639 
2640     assert(!is_z_lg(z_instruction), "old form (LG) polling page access. Please fix and use TM(Y).");
2641 
2642     if (!is_z_tmy(z_instruction)) { return false; } // It's len=6, but not a z_tmy. fail.
2643 
2644     int ms = inv_mask(z_instruction,8,48);  // mask
2645     int ra = inv_reg(z_instruction,16,48);  // base register
2646     int ds = inv_simm20(z_instruction);     // displacement
2647   }
2648 
2649   return true;
2650 }
2651 
2652 // Extract poll address from instruction and ucontext.
2653 address MacroAssembler::get_poll_address(address instr_loc, void* ucontext) {
2654   assert(ucontext != NULL, "must have ucontext");
2655   ucontext_t* uc = (ucontext_t*) ucontext;
2656   unsigned long z_instruction;
2657   unsigned int ilen = get_instruction(instr_loc, &z_instruction);
2658 
2659   if (ilen == 4 && is_z_tm(z_instruction)) {
2660     int ra = inv_reg(z_instruction, 16, 32);  // base register
2661     int ds = inv_uimm12(z_instruction);       // displacement
2662     address addr = (address)uc->uc_mcontext.gregs[ra];
2663     return addr + ds;
2664   } else if (ilen == 6 && is_z_tmy(z_instruction)) {
2665     int ra = inv_reg(z_instruction, 16, 48);  // base register
2666     int ds = inv_simm20(z_instruction);       // displacement
2667     address addr = (address)uc->uc_mcontext.gregs[ra];
2668     return addr + ds;
2669   }
2670 
2671   ShouldNotReachHere();
2672   return NULL;
2673 }
2674 
2675 // Extract poll register from instruction.
2676 uint MacroAssembler::get_poll_register(address instr_loc) {
2677   unsigned long z_instruction;
2678   unsigned int ilen = get_instruction(instr_loc, &z_instruction);
2679 
2680   if (ilen == 4 && is_z_tm(z_instruction)) {
2681     return (uint)inv_reg(z_instruction, 16, 32);  // base register
2682   } else if (ilen == 6 && is_z_tmy(z_instruction)) {
2683     return (uint)inv_reg(z_instruction, 16, 48);  // base register
2684   }
2685 
2686   ShouldNotReachHere();
2687   return 0;
2688 }
2689 
2690 void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) {
2691   if (SafepointMechanism::uses_thread_local_poll()) {
2692     const Address poll_byte_addr(Z_thread, in_bytes(Thread::polling_page_offset()) + 7 /* Big Endian */);
2693     // Armed page has poll_bit set.
2694     z_tm(poll_byte_addr, SafepointMechanism::poll_bit());
2695     z_brnaz(slow_path);
2696   } else {
2697     load_const_optimized(temp_reg, SafepointSynchronize::address_of_state());
2698     z_cli(/*SafepointSynchronize::sz_state()*/4-1, temp_reg, SafepointSynchronize::_not_synchronized);
2699     z_brne(slow_path);
2700   }
2701 }
2702 
2703 // Don't rely on register locking, always use Z_R1 as scratch register instead.
2704 void MacroAssembler::bang_stack_with_offset(int offset) {
2705   // Stack grows down, caller passes positive offset.
2706   assert(offset > 0, "must bang with positive offset");
2707   if (Displacement::is_validDisp(-offset)) {
2708     z_tmy(-offset, Z_SP, mask_stackbang);
2709   } else {
2710     add2reg(Z_R1, -offset, Z_SP);    // Do not destroy Z_SP!!!
2711     z_tm(0, Z_R1, mask_stackbang);  // Just banging.
2712   }
2713 }
2714 
2715 void MacroAssembler::reserved_stack_check(Register return_pc) {
2716   // Test if reserved zone needs to be enabled.
2717   Label no_reserved_zone_enabling;
2718   assert(return_pc == Z_R14, "Return pc must be in R14 before z_br() to StackOverflow stub.");
2719   BLOCK_COMMENT("reserved_stack_check {");
2720 
2721   z_clg(Z_SP, Address(Z_thread, JavaThread::reserved_stack_activation_offset()));
2722   z_brl(no_reserved_zone_enabling);
2723 
2724   // Enable reserved zone again, throw stack overflow exception.
2725   save_return_pc();
2726   push_frame_abi160(0);
2727   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), Z_thread);
2728   pop_frame();
2729   restore_return_pc();
2730 
2731   load_const_optimized(Z_R1, StubRoutines::throw_delayed_StackOverflowError_entry());
2732   // Don't use call() or z_basr(), they will invalidate Z_R14 which contains the return pc.
2733   z_br(Z_R1);
2734 
2735   should_not_reach_here();
2736 
2737   bind(no_reserved_zone_enabling);
2738   BLOCK_COMMENT("} reserved_stack_check");
2739 }
2740 
2741 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
2742 void MacroAssembler::tlab_allocate(Register obj,
2743                                    Register var_size_in_bytes,
2744                                    int con_size_in_bytes,
2745                                    Register t1,
2746                                    Label& slow_case) {
2747   assert_different_registers(obj, var_size_in_bytes, t1);
2748   Register end = t1;
2749   Register thread = Z_thread;
2750 
2751   z_lg(obj, Address(thread, JavaThread::tlab_top_offset()));
2752   if (var_size_in_bytes == noreg) {
2753     z_lay(end, Address(obj, con_size_in_bytes));
2754   } else {
2755     z_lay(end, Address(obj, var_size_in_bytes));
2756   }
2757   z_cg(end, Address(thread, JavaThread::tlab_end_offset()));
2758   branch_optimized(bcondHigh, slow_case);
2759 
2760   // Update the tlab top pointer.
2761   z_stg(end, Address(thread, JavaThread::tlab_top_offset()));
2762 
2763   // Recover var_size_in_bytes if necessary.
2764   if (var_size_in_bytes == end) {
2765     z_sgr(var_size_in_bytes, obj);
2766   }
2767 }
2768 
2769 // Emitter for interface method lookup.
2770 //   input: recv_klass, intf_klass, itable_index
2771 //   output: method_result
2772 //   kills: itable_index, temp1_reg, Z_R0, Z_R1
2773 // TODO: Temp2_reg is unused. we may use this emitter also in the itable stubs.
2774 // If the register is still not needed then, remove it.
2775 void MacroAssembler::lookup_interface_method(Register           recv_klass,
2776                                              Register           intf_klass,
2777                                              RegisterOrConstant itable_index,
2778                                              Register           method_result,
2779                                              Register           temp1_reg,
2780                                              Label&             no_such_interface,
2781                                              bool               return_method) {
2782 
2783   const Register vtable_len = temp1_reg;    // Used to compute itable_entry_addr.
2784   const Register itable_entry_addr = Z_R1_scratch;
2785   const Register itable_interface = Z_R0_scratch;
2786 
2787   BLOCK_COMMENT("lookup_interface_method {");
2788 
2789   // Load start of itable entries into itable_entry_addr.
2790   z_llgf(vtable_len, Address(recv_klass, Klass::vtable_length_offset()));
2791   z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
2792 
2793   // Loop over all itable entries until desired interfaceOop(Rinterface) found.
2794   const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
2795 
2796   add2reg_with_index(itable_entry_addr,
2797                      vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(),
2798                      recv_klass, vtable_len);
2799 
2800   const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize;
2801   Label     search;
2802 
2803   bind(search);
2804 
2805   // Handle IncompatibleClassChangeError.
2806   // If the entry is NULL then we've reached the end of the table
2807   // without finding the expected interface, so throw an exception.
2808   load_and_test_long(itable_interface, Address(itable_entry_addr));
2809   z_bre(no_such_interface);
2810 
2811   add2reg(itable_entry_addr, itable_offset_search_inc);
2812   z_cgr(itable_interface, intf_klass);
2813   z_brne(search);
2814 
2815   // Entry found and itable_entry_addr points to it, get offset of vtable for interface.
2816   if (return_method) {
2817     const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() -
2818                                       itableOffsetEntry::interface_offset_in_bytes()) -
2819                                      itable_offset_search_inc;
2820 
2821     // Compute itableMethodEntry and get method and entry point
2822     // we use addressing with index and displacement, since the formula
2823     // for computing the entry's offset has a fixed and a dynamic part,
2824     // the latter depending on the matched interface entry and on the case,
2825     // that the itable index has been passed as a register, not a constant value.
2826     int method_offset = itableMethodEntry::method_offset_in_bytes();
2827                              // Fixed part (displacement), common operand.
2828     Register itable_offset = method_result;  // Dynamic part (index register).
2829 
2830     if (itable_index.is_register()) {
2831        // Compute the method's offset in that register, for the formula, see the
2832        // else-clause below.
2833        z_sllg(itable_offset, itable_index.as_register(), exact_log2(itableMethodEntry::size() * wordSize));
2834        z_agf(itable_offset, vtable_offset_offset, itable_entry_addr);
2835     } else {
2836       // Displacement increases.
2837       method_offset += itableMethodEntry::size() * wordSize * itable_index.as_constant();
2838 
2839       // Load index from itable.
2840       z_llgf(itable_offset, vtable_offset_offset, itable_entry_addr);
2841     }
2842 
2843     // Finally load the method's oop.
2844     z_lg(method_result, method_offset, itable_offset, recv_klass);
2845   }
2846   BLOCK_COMMENT("} lookup_interface_method");
2847 }
2848 
2849 // Lookup for virtual method invocation.
2850 void MacroAssembler::lookup_virtual_method(Register           recv_klass,
2851                                            RegisterOrConstant vtable_index,
2852                                            Register           method_result) {
2853   assert_different_registers(recv_klass, vtable_index.register_or_noreg());
2854   assert(vtableEntry::size() * wordSize == wordSize,
2855          "else adjust the scaling in the code below");
2856 
2857   BLOCK_COMMENT("lookup_virtual_method {");
2858 
2859   const int base = in_bytes(Klass::vtable_start_offset());
2860 
2861   if (vtable_index.is_constant()) {
2862     // Load with base + disp.
2863     Address vtable_entry_addr(recv_klass,
2864                               vtable_index.as_constant() * wordSize +
2865                               base +
2866                               vtableEntry::method_offset_in_bytes());
2867 
2868     z_lg(method_result, vtable_entry_addr);
2869   } else {
2870     // Shift index properly and load with base + index + disp.
2871     Register vindex = vtable_index.as_register();
2872     Address  vtable_entry_addr(recv_klass, vindex,
2873                                base + vtableEntry::method_offset_in_bytes());
2874 
2875     z_sllg(vindex, vindex, exact_log2(wordSize));
2876     z_lg(method_result, vtable_entry_addr);
2877   }
2878   BLOCK_COMMENT("} lookup_virtual_method");
2879 }
2880 
2881 // Factor out code to call ic_miss_handler.
2882 // Generate code to call the inline cache miss handler.
2883 //
2884 // In most cases, this code will be generated out-of-line.
2885 // The method parameters are intended to provide some variability.
2886 //   ICM          - Label which has to be bound to the start of useful code (past any traps).
2887 //   trapMarker   - Marking byte for the generated illtrap instructions (if any).
2888 //                  Any value except 0x00 is supported.
2889 //                  = 0x00 - do not generate illtrap instructions.
2890 //                         use nops to fill ununsed space.
2891 //   requiredSize - required size of the generated code. If the actually
2892 //                  generated code is smaller, use padding instructions to fill up.
2893 //                  = 0 - no size requirement, no padding.
2894 //   scratch      - scratch register to hold branch target address.
2895 //
2896 //  The method returns the code offset of the bound label.
2897 unsigned int MacroAssembler::call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch) {
2898   intptr_t startOffset = offset();
2899 
2900   // Prevent entry at content_begin().
2901   if (trapMarker != 0) {
2902     z_illtrap(trapMarker);
2903   }
2904 
2905   // Load address of inline cache miss code into scratch register
2906   // and branch to cache miss handler.
2907   BLOCK_COMMENT("IC miss handler {");
2908   BIND(ICM);
2909   unsigned int   labelOffset = offset();
2910   AddressLiteral icmiss(SharedRuntime::get_ic_miss_stub());
2911 
2912   load_const_optimized(scratch, icmiss);
2913   z_br(scratch);
2914 
2915   // Fill unused space.
2916   if (requiredSize > 0) {
2917     while ((offset() - startOffset) < requiredSize) {
2918       if (trapMarker == 0) {
2919         z_nop();
2920       } else {
2921         z_illtrap(trapMarker);
2922       }
2923     }
2924   }
2925   BLOCK_COMMENT("} IC miss handler");
2926   return labelOffset;
2927 }
2928 
2929 void MacroAssembler::nmethod_UEP(Label& ic_miss) {
2930 #ifdef COMPILER2
2931   Register ic_reg       = as_Register(Matcher::inline_cache_reg_encode());
2932 #else
2933   Register ic_reg       = as_Register(0);
2934 #endif
2935   int      klass_offset = oopDesc::klass_offset_in_bytes();
2936   if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) {
2937     if (VM_Version::has_CompareBranch()) {
2938       z_cgij(Z_ARG1, 0, Assembler::bcondEqual, ic_miss);
2939     } else {
2940       z_ltgr(Z_ARG1, Z_ARG1);
2941       z_bre(ic_miss);
2942     }
2943   }
2944   // Compare cached class against klass from receiver.
2945   compare_klass_ptr(ic_reg, klass_offset, Z_ARG1, false);
2946   z_brne(ic_miss);
2947 }
2948 
2949 void MacroAssembler::check_klass_subtype_fast_path(Register   sub_klass,
2950                                                    Register   super_klass,
2951                                                    Register   temp1_reg,
2952                                                    Label*     L_success,
2953                                                    Label*     L_failure,
2954                                                    Label*     L_slow_path,
2955                                                    RegisterOrConstant super_check_offset) {
2956 
2957   const int sc_offset  = in_bytes(Klass::secondary_super_cache_offset());
2958   const int sco_offset = in_bytes(Klass::super_check_offset_offset());
2959 
2960   bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
2961   bool need_slow_path = (must_load_sco ||
2962                          super_check_offset.constant_or_zero() == sc_offset);
2963 
2964   // Input registers must not overlap.
2965   assert_different_registers(sub_klass, super_klass, temp1_reg);
2966   if (super_check_offset.is_register()) {
2967     assert_different_registers(sub_klass, super_klass,
2968                                super_check_offset.as_register());
2969   } else if (must_load_sco) {
2970     assert(temp1_reg != noreg, "supply either a temp or a register offset");
2971   }
2972 
2973   const Register Rsuper_check_offset = temp1_reg;
2974 
2975   NearLabel L_fallthrough;
2976   int label_nulls = 0;
2977   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
2978   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
2979   if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
2980   assert(label_nulls <= 1 ||
2981          (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
2982          "at most one NULL in the batch, usually");
2983 
2984   BLOCK_COMMENT("check_klass_subtype_fast_path {");
2985   // If the pointers are equal, we are done (e.g., String[] elements).
2986   // This self-check enables sharing of secondary supertype arrays among
2987   // non-primary types such as array-of-interface. Otherwise, each such
2988   // type would need its own customized SSA.
2989   // We move this check to the front of the fast path because many
2990   // type checks are in fact trivially successful in this manner,
2991   // so we get a nicely predicted branch right at the start of the check.
2992   compare64_and_branch(sub_klass, super_klass, bcondEqual, *L_success);
2993 
2994   // Check the supertype display, which is uint.
2995   if (must_load_sco) {
2996     z_llgf(Rsuper_check_offset, sco_offset, super_klass);
2997     super_check_offset = RegisterOrConstant(Rsuper_check_offset);
2998   }
2999   Address super_check_addr(sub_klass, super_check_offset, 0);
3000   z_cg(super_klass, super_check_addr); // compare w/ displayed supertype
3001 
3002   // This check has worked decisively for primary supers.
3003   // Secondary supers are sought in the super_cache ('super_cache_addr').
3004   // (Secondary supers are interfaces and very deeply nested subtypes.)
3005   // This works in the same check above because of a tricky aliasing
3006   // between the super_cache and the primary super display elements.
3007   // (The 'super_check_addr' can address either, as the case requires.)
3008   // Note that the cache is updated below if it does not help us find
3009   // what we need immediately.
3010   // So if it was a primary super, we can just fail immediately.
3011   // Otherwise, it's the slow path for us (no success at this point).
3012 
3013   // Hacked jmp, which may only be used just before L_fallthrough.
3014 #define final_jmp(label)                                                \
3015   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
3016   else                            { branch_optimized(Assembler::bcondAlways, label); } /*omit semicolon*/
3017 
3018   if (super_check_offset.is_register()) {
3019     branch_optimized(Assembler::bcondEqual, *L_success);
3020     z_cfi(super_check_offset.as_register(), sc_offset);
3021     if (L_failure == &L_fallthrough) {
3022       branch_optimized(Assembler::bcondEqual, *L_slow_path);
3023     } else {
3024       branch_optimized(Assembler::bcondNotEqual, *L_failure);
3025       final_jmp(*L_slow_path);
3026     }
3027   } else if (super_check_offset.as_constant() == sc_offset) {
3028     // Need a slow path; fast failure is impossible.
3029     if (L_slow_path == &L_fallthrough) {
3030       branch_optimized(Assembler::bcondEqual, *L_success);
3031     } else {
3032       branch_optimized(Assembler::bcondNotEqual, *L_slow_path);
3033       final_jmp(*L_success);
3034     }
3035   } else {
3036     // No slow path; it's a fast decision.
3037     if (L_failure == &L_fallthrough) {
3038       branch_optimized(Assembler::bcondEqual, *L_success);
3039     } else {
3040       branch_optimized(Assembler::bcondNotEqual, *L_failure);
3041       final_jmp(*L_success);
3042     }
3043   }
3044 
3045   bind(L_fallthrough);
3046 #undef local_brc
3047 #undef final_jmp
3048   BLOCK_COMMENT("} check_klass_subtype_fast_path");
3049   // fallthru (to slow path)
3050 }
3051 
3052 void MacroAssembler::check_klass_subtype_slow_path(Register Rsubklass,
3053                                                    Register Rsuperklass,
3054                                                    Register Rarray_ptr,  // tmp
3055                                                    Register Rlength,     // tmp
3056                                                    Label* L_success,
3057                                                    Label* L_failure) {
3058   // Input registers must not overlap.
3059   // Also check for R1 which is explicitely used here.
3060   assert_different_registers(Z_R1, Rsubklass, Rsuperklass, Rarray_ptr, Rlength);
3061   NearLabel L_fallthrough;
3062   int label_nulls = 0;
3063   if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
3064   if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
3065   assert(label_nulls <= 1, "at most one NULL in the batch");
3066 
3067   const int ss_offset = in_bytes(Klass::secondary_supers_offset());
3068   const int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
3069 
3070   const int length_offset = Array<Klass*>::length_offset_in_bytes();
3071   const int base_offset   = Array<Klass*>::base_offset_in_bytes();
3072 
3073   // Hacked jmp, which may only be used just before L_fallthrough.
3074 #define final_jmp(label)                                                \
3075   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
3076   else                            branch_optimized(Assembler::bcondAlways, label) /*omit semicolon*/
3077 
3078   NearLabel loop_iterate, loop_count, match;
3079 
3080   BLOCK_COMMENT("check_klass_subtype_slow_path {");
3081   z_lg(Rarray_ptr, ss_offset, Rsubklass);
3082 
3083   load_and_test_int(Rlength, Address(Rarray_ptr, length_offset));
3084   branch_optimized(Assembler::bcondZero, *L_failure);
3085 
3086   // Oops in table are NO MORE compressed.
3087   z_cg(Rsuperklass, base_offset, Rarray_ptr); // Check array element for match.
3088   z_bre(match);                               // Shortcut for array length = 1.
3089 
3090   // No match yet, so we must walk the array's elements.
3091   z_lngfr(Rlength, Rlength);
3092   z_sllg(Rlength, Rlength, LogBytesPerWord); // -#bytes of cache array
3093   z_llill(Z_R1, BytesPerWord);               // Set increment/end index.
3094   add2reg(Rlength, 2 * BytesPerWord);        // start index  = -(n-2)*BytesPerWord
3095   z_slgr(Rarray_ptr, Rlength);               // start addr: +=  (n-2)*BytesPerWord
3096   z_bru(loop_count);
3097 
3098   BIND(loop_iterate);
3099   z_cg(Rsuperklass, base_offset, Rlength, Rarray_ptr); // Check array element for match.
3100   z_bre(match);
3101   BIND(loop_count);
3102   z_brxlg(Rlength, Z_R1, loop_iterate);
3103 
3104   // Rsuperklass not found among secondary super classes -> failure.
3105   branch_optimized(Assembler::bcondAlways, *L_failure);
3106 
3107   // Got a hit. Return success (zero result). Set cache.
3108   // Cache load doesn't happen here. For speed it is directly emitted by the compiler.
3109 
3110   BIND(match);
3111 
3112   z_stg(Rsuperklass, sc_offset, Rsubklass); // Save result to cache.
3113 
3114   final_jmp(*L_success);
3115 
3116   // Exit to the surrounding code.
3117   BIND(L_fallthrough);
3118 #undef local_brc
3119 #undef final_jmp
3120   BLOCK_COMMENT("} check_klass_subtype_slow_path");
3121 }
3122 
3123 // Emitter for combining fast and slow path.
3124 void MacroAssembler::check_klass_subtype(Register sub_klass,
3125                                          Register super_klass,
3126                                          Register temp1_reg,
3127                                          Register temp2_reg,
3128                                          Label&   L_success) {
3129   NearLabel failure;
3130   BLOCK_COMMENT(err_msg("check_klass_subtype(%s subclass of %s) {", sub_klass->name(), super_klass->name()));
3131   check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg,
3132                                 &L_success, &failure, NULL);
3133   check_klass_subtype_slow_path(sub_klass, super_klass,
3134                                 temp1_reg, temp2_reg, &L_success, NULL);
3135   BIND(failure);
3136   BLOCK_COMMENT("} check_klass_subtype");
3137 }
3138 
3139 void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) {
3140   assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required");
3141 
3142   Label L_fallthrough;
3143   if (L_fast_path == NULL) {
3144     L_fast_path = &L_fallthrough;
3145   } else if (L_slow_path == NULL) {
3146     L_slow_path = &L_fallthrough;
3147   }
3148 
3149   // Fast path check: class is fully initialized
3150   z_cli(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
3151   z_bre(*L_fast_path);
3152 
3153   // Fast path check: current thread is initializer thread
3154   z_cg(thread, Address(klass, InstanceKlass::init_thread_offset()));
3155   if (L_slow_path == &L_fallthrough) {
3156     z_bre(*L_fast_path);
3157   } else if (L_fast_path == &L_fallthrough) {
3158     z_brne(*L_slow_path);
3159   } else {
3160     Unimplemented();
3161   }
3162 
3163   bind(L_fallthrough);
3164 }
3165 
3166 // Increment a counter at counter_address when the eq condition code is
3167 // set. Kills registers tmp1_reg and tmp2_reg and preserves the condition code.
3168 void MacroAssembler::increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg) {
3169   Label l;
3170   z_brne(l);
3171   load_const(tmp1_reg, counter_address);
3172   add2mem_32(Address(tmp1_reg), 1, tmp2_reg);
3173   z_cr(tmp1_reg, tmp1_reg); // Set cc to eq.
3174   bind(l);
3175 }
3176 
3177 // Semantics are dependent on the slow_case label:
3178 //   If the slow_case label is not NULL, failure to biased-lock the object
3179 //   transfers control to the location of the slow_case label. If the
3180 //   object could be biased-locked, control is transferred to the done label.
3181 //   The condition code is unpredictable.
3182 //
3183 //   If the slow_case label is NULL, failure to biased-lock the object results
3184 //   in a transfer of control to the done label with a condition code of not_equal.
3185 //   If the biased-lock could be successfully obtained, control is transfered to
3186 //   the done label with a condition code of equal.
3187 //   It is mandatory to react on the condition code At the done label.
3188 //
3189 void MacroAssembler::biased_locking_enter(Register  obj_reg,
3190                                           Register  mark_reg,
3191                                           Register  temp_reg,
3192                                           Register  temp2_reg,    // May be Z_RO!
3193                                           Label    &done,
3194                                           Label    *slow_case) {
3195   assert(UseBiasedLocking, "why call this otherwise?");
3196   assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg);
3197 
3198   Label cas_label; // Try, if implemented, CAS locking. Fall thru to slow path otherwise.
3199 
3200   BLOCK_COMMENT("biased_locking_enter {");
3201 
3202   // Biased locking
3203   // See whether the lock is currently biased toward our thread and
3204   // whether the epoch is still valid.
3205   // Note that the runtime guarantees sufficient alignment of JavaThread
3206   // pointers to allow age to be placed into low bits.
3207   assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits,
3208          "biased locking makes assumptions about bit layout");
3209   z_lr(temp_reg, mark_reg);
3210   z_nilf(temp_reg, markWord::biased_lock_mask_in_place);
3211   z_chi(temp_reg, markWord::biased_lock_pattern);
3212   z_brne(cas_label);  // Try cas if object is not biased, i.e. cannot be biased locked.
3213 
3214   load_prototype_header(temp_reg, obj_reg);
3215   load_const_optimized(temp2_reg, ~((int) markWord::age_mask_in_place));
3216 
3217   z_ogr(temp_reg, Z_thread);
3218   z_xgr(temp_reg, mark_reg);
3219   z_ngr(temp_reg, temp2_reg);
3220   if (PrintBiasedLockingStatistics) {
3221     increment_counter_eq((address) BiasedLocking::biased_lock_entry_count_addr(), mark_reg, temp2_reg);
3222     // Restore mark_reg.
3223     z_lg(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
3224   }
3225   branch_optimized(Assembler::bcondEqual, done);  // Biased lock obtained, return success.
3226 
3227   Label try_revoke_bias;
3228   Label try_rebias;
3229   Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes());
3230 
3231   //----------------------------------------------------------------------------
3232   // At this point we know that the header has the bias pattern and
3233   // that we are not the bias owner in the current epoch. We need to
3234   // figure out more details about the state of the header in order to
3235   // know what operations can be legally performed on the object's
3236   // header.
3237 
3238   // If the low three bits in the xor result aren't clear, that means
3239   // the prototype header is no longer biased and we have to revoke
3240   // the bias on this object.
3241   z_tmll(temp_reg, markWord::biased_lock_mask_in_place);
3242   z_brnaz(try_revoke_bias);
3243 
3244   // Biasing is still enabled for this data type. See whether the
3245   // epoch of the current bias is still valid, meaning that the epoch
3246   // bits of the mark word are equal to the epoch bits of the
3247   // prototype header. (Note that the prototype header's epoch bits
3248   // only change at a safepoint.) If not, attempt to rebias the object
3249   // toward the current thread. Note that we must be absolutely sure
3250   // that the current epoch is invalid in order to do this because
3251   // otherwise the manipulations it performs on the mark word are
3252   // illegal.
3253   z_tmll(temp_reg, markWord::epoch_mask_in_place);
3254   z_brnaz(try_rebias);
3255 
3256   //----------------------------------------------------------------------------
3257   // The epoch of the current bias is still valid but we know nothing
3258   // about the owner; it might be set or it might be clear. Try to
3259   // acquire the bias of the object using an atomic operation. If this
3260   // fails we will go in to the runtime to revoke the object's bias.
3261   // Note that we first construct the presumed unbiased header so we
3262   // don't accidentally blow away another thread's valid bias.
3263   z_nilf(mark_reg, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place |
3264          markWord::epoch_mask_in_place);
3265   z_lgr(temp_reg, Z_thread);
3266   z_llgfr(mark_reg, mark_reg);
3267   z_ogr(temp_reg, mark_reg);
3268 
3269   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3270 
3271   z_csg(mark_reg, temp_reg, 0, obj_reg);
3272 
3273   // If the biasing toward our thread failed, this means that
3274   // another thread succeeded in biasing it toward itself and we
3275   // need to revoke that bias. The revocation will occur in the
3276   // interpreter runtime in the slow case.
3277 
3278   if (PrintBiasedLockingStatistics) {
3279     increment_counter_eq((address) BiasedLocking::anonymously_biased_lock_entry_count_addr(),
3280                          temp_reg, temp2_reg);
3281   }
3282   if (slow_case != NULL) {
3283     branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way.
3284   }
3285   branch_optimized(Assembler::bcondAlways, done);           // Biased lock status given in condition code.
3286 
3287   //----------------------------------------------------------------------------
3288   bind(try_rebias);
3289   // At this point we know the epoch has expired, meaning that the
3290   // current "bias owner", if any, is actually invalid. Under these
3291   // circumstances _only_, we are allowed to use the current header's
3292   // value as the comparison value when doing the cas to acquire the
3293   // bias in the current epoch. In other words, we allow transfer of
3294   // the bias from one thread to another directly in this situation.
3295 
3296   z_nilf(mark_reg, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place);
3297   load_prototype_header(temp_reg, obj_reg);
3298   z_llgfr(mark_reg, mark_reg);
3299 
3300   z_ogr(temp_reg, Z_thread);
3301 
3302   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3303 
3304   z_csg(mark_reg, temp_reg, 0, obj_reg);
3305 
3306   // If the biasing toward our thread failed, this means that
3307   // another thread succeeded in biasing it toward itself and we
3308   // need to revoke that bias. The revocation will occur in the
3309   // interpreter runtime in the slow case.
3310 
3311   if (PrintBiasedLockingStatistics) {
3312     increment_counter_eq((address) BiasedLocking::rebiased_lock_entry_count_addr(), temp_reg, temp2_reg);
3313   }
3314   if (slow_case != NULL) {
3315     branch_optimized(Assembler::bcondNotEqual, *slow_case);  // Biased lock not obtained, need to go the long way.
3316   }
3317   z_bru(done);           // Biased lock status given in condition code.
3318 
3319   //----------------------------------------------------------------------------
3320   bind(try_revoke_bias);
3321   // The prototype mark in the klass doesn't have the bias bit set any
3322   // more, indicating that objects of this data type are not supposed
3323   // to be biased any more. We are going to try to reset the mark of
3324   // this object to the prototype value and fall through to the
3325   // CAS-based locking scheme. Note that if our CAS fails, it means
3326   // that another thread raced us for the privilege of revoking the
3327   // bias of this particular object, so it's okay to continue in the
3328   // normal locking code.
3329   load_prototype_header(temp_reg, obj_reg);
3330 
3331   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3332 
3333   z_csg(mark_reg, temp_reg, 0, obj_reg);
3334 
3335   // Fall through to the normal CAS-based lock, because no matter what
3336   // the result of the above CAS, some thread must have succeeded in
3337   // removing the bias bit from the object's header.
3338   if (PrintBiasedLockingStatistics) {
3339     // z_cgr(mark_reg, temp2_reg);
3340     increment_counter_eq((address) BiasedLocking::revoked_lock_entry_count_addr(), temp_reg, temp2_reg);
3341   }
3342 
3343   bind(cas_label);
3344   BLOCK_COMMENT("} biased_locking_enter");
3345 }
3346 
3347 void MacroAssembler::biased_locking_exit(Register mark_addr, Register temp_reg, Label& done) {
3348   // Check for biased locking unlock case, which is a no-op
3349   // Note: we do not have to check the thread ID for two reasons.
3350   // First, the interpreter checks for IllegalMonitorStateException at
3351   // a higher level. Second, if the bias was revoked while we held the
3352   // lock, the object could not be rebiased toward another thread, so
3353   // the bias bit would be clear.
3354   BLOCK_COMMENT("biased_locking_exit {");
3355 
3356   z_lg(temp_reg, 0, mark_addr);
3357   z_nilf(temp_reg, markWord::biased_lock_mask_in_place);
3358 
3359   z_chi(temp_reg, markWord::biased_lock_pattern);
3360   z_bre(done);
3361   BLOCK_COMMENT("} biased_locking_exit");
3362 }
3363 
3364 void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) {
3365   Register displacedHeader = temp1;
3366   Register currentHeader = temp1;
3367   Register temp = temp2;
3368   NearLabel done, object_has_monitor;
3369 
3370   BLOCK_COMMENT("compiler_fast_lock_object {");
3371 
3372   // Load markWord from oop into mark.
3373   z_lg(displacedHeader, 0, oop);
3374 
3375   if (try_bias) {
3376     biased_locking_enter(oop, displacedHeader, temp, Z_R0, done);
3377   }
3378 
3379   // Handle existing monitor.
3380   // The object has an existing monitor iff (mark & monitor_value) != 0.
3381   guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");
3382   z_lr(temp, displacedHeader);
3383   z_nill(temp, markWord::monitor_value);
3384   z_brne(object_has_monitor);
3385 
3386   // Set mark to markWord | markWord::unlocked_value.
3387   z_oill(displacedHeader, markWord::unlocked_value);
3388 
3389   // Load Compare Value application register.
3390 
3391   // Initialize the box (must happen before we update the object mark).
3392   z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box);
3393 
3394   // Memory Fence (in cmpxchgd)
3395   // Compare object markWord with mark and if equal exchange scratch1 with object markWord.
3396 
3397   // If the compare-and-swap succeeded, then we found an unlocked object and we
3398   // have now locked it.
3399   z_csg(displacedHeader, box, 0, oop);
3400   assert(currentHeader==displacedHeader, "must be same register"); // Identified two registers from z/Architecture.
3401   z_bre(done);
3402 
3403   // We did not see an unlocked object so try the fast recursive case.
3404 
3405   z_sgr(currentHeader, Z_SP);
3406   load_const_optimized(temp, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
3407 
3408   z_ngr(currentHeader, temp);
3409   //   z_brne(done);
3410   //   z_release();
3411   z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box);
3412 
3413   z_bru(done);
3414 
3415   Register zero = temp;
3416   Register monitor_tagged = displacedHeader; // Tagged with markWord::monitor_value.
3417   bind(object_has_monitor);
3418   // The object's monitor m is unlocked iff m->owner == NULL,
3419   // otherwise m->owner may contain a thread or a stack address.
3420   //
3421   // Try to CAS m->owner from NULL to current thread.
3422   z_lghi(zero, 0);
3423   // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ.
3424   z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged);
3425   // Store a non-null value into the box.
3426   z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box);
3427 #ifdef ASSERT
3428   z_brne(done);
3429   // We've acquired the monitor, check some invariants.
3430   // Invariant 1: _recursions should be 0.
3431   asm_assert_mem8_is_zero(OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), monitor_tagged,
3432                           "monitor->_recursions should be 0", -1);
3433   z_ltgr(zero, zero); // Set CR=EQ.
3434 #endif
3435   bind(done);
3436 
3437   BLOCK_COMMENT("} compiler_fast_lock_object");
3438   // If locking was successful, CR should indicate 'EQ'.
3439   // The compiler or the native wrapper generates a branch to the runtime call
3440   // _complete_monitor_locking_Java.
3441 }
3442 
3443 void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) {
3444   Register displacedHeader = temp1;
3445   Register currentHeader = temp2;
3446   Register temp = temp1;
3447   Register monitor = temp2;
3448 
3449   Label done, object_has_monitor;
3450 
3451   BLOCK_COMMENT("compiler_fast_unlock_object {");
3452 
3453   if (try_bias) {
3454     biased_locking_exit(oop, currentHeader, done);
3455   }
3456 
3457   // Find the lock address and load the displaced header from the stack.
3458   // if the displaced header is zero, we have a recursive unlock.
3459   load_and_test_long(displacedHeader, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3460   z_bre(done);
3461 
3462   // Handle existing monitor.
3463   // The object has an existing monitor iff (mark & monitor_value) != 0.
3464   z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);
3465   guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");
3466   z_nill(currentHeader, markWord::monitor_value);
3467   z_brne(object_has_monitor);
3468 
3469   // Check if it is still a light weight lock, this is true if we see
3470   // the stack address of the basicLock in the markWord of the object
3471   // copy box to currentHeader such that csg does not kill it.
3472   z_lgr(currentHeader, box);
3473   z_csg(currentHeader, displacedHeader, 0, oop);
3474   z_bru(done); // Csg sets CR as desired.
3475 
3476   // Handle existing monitor.
3477   bind(object_has_monitor);
3478   z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);    // CurrentHeader is tagged with monitor_value set.
3479   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
3480   z_brne(done);
3481   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
3482   z_brne(done);
3483   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
3484   z_brne(done);
3485   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
3486   z_brne(done);
3487   z_release();
3488   z_stg(temp/*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), currentHeader);
3489 
3490   bind(done);
3491 
3492   BLOCK_COMMENT("} compiler_fast_unlock_object");
3493   // flag == EQ indicates success
3494   // flag == NE indicates failure
3495 }
3496 
3497 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) {
3498   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3499   bs->resolve_jobject(this, value, tmp1, tmp2);
3500 }
3501 
3502 // Last_Java_sp must comply to the rules in frame_s390.hpp.
3503 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation) {
3504   BLOCK_COMMENT("set_last_Java_frame {");
3505 
3506   // Always set last_Java_pc and flags first because once last_Java_sp
3507   // is visible has_last_Java_frame is true and users will look at the
3508   // rest of the fields. (Note: flags should always be zero before we
3509   // get here so doesn't need to be set.)
3510 
3511   // Verify that last_Java_pc was zeroed on return to Java.
3512   if (allow_relocation) {
3513     asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()),
3514                             Z_thread,
3515                             "last_Java_pc not zeroed before leaving Java",
3516                             0x200);
3517   } else {
3518     asm_assert_mem8_is_zero_static(in_bytes(JavaThread::last_Java_pc_offset()),
3519                                    Z_thread,
3520                                    "last_Java_pc not zeroed before leaving Java",
3521                                    0x200);
3522   }
3523 
3524   // When returning from calling out from Java mode the frame anchor's
3525   // last_Java_pc will always be set to NULL. It is set here so that
3526   // if we are doing a call to native (not VM) that we capture the
3527   // known pc and don't have to rely on the native call having a
3528   // standard frame linkage where we can find the pc.
3529   if (last_Java_pc!=noreg) {
3530     z_stg(last_Java_pc, Address(Z_thread, JavaThread::last_Java_pc_offset()));
3531   }
3532 
3533   // This membar release is not required on z/Architecture, since the sequence of stores
3534   // in maintained. Nevertheless, we leave it in to document the required ordering.
3535   // The implementation of z_release() should be empty.
3536   // z_release();
3537 
3538   z_stg(last_Java_sp, Address(Z_thread, JavaThread::last_Java_sp_offset()));
3539   BLOCK_COMMENT("} set_last_Java_frame");
3540 }
3541 
3542 void MacroAssembler::reset_last_Java_frame(bool allow_relocation) {
3543   BLOCK_COMMENT("reset_last_Java_frame {");
3544 
3545   if (allow_relocation) {
3546     asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()),
3547                                Z_thread,
3548                                "SP was not set, still zero",
3549                                0x202);
3550   } else {
3551     asm_assert_mem8_isnot_zero_static(in_bytes(JavaThread::last_Java_sp_offset()),
3552                                       Z_thread,
3553                                       "SP was not set, still zero",
3554                                       0x202);
3555   }
3556 
3557   // _last_Java_sp = 0
3558   // Clearing storage must be atomic here, so don't use clear_mem()!
3559   store_const(Address(Z_thread, JavaThread::last_Java_sp_offset()), 0);
3560 
3561   // _last_Java_pc = 0
3562   store_const(Address(Z_thread, JavaThread::last_Java_pc_offset()), 0);
3563 
3564   BLOCK_COMMENT("} reset_last_Java_frame");
3565   return;
3566 }
3567 
3568 void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation) {
3569   assert_different_registers(sp, tmp1);
3570 
3571   // We cannot trust that code generated by the C++ compiler saves R14
3572   // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at
3573   // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()).
3574   // Therefore we load the PC into tmp1 and let set_last_Java_frame() save
3575   // it into the frame anchor.
3576   get_PC(tmp1);
3577   set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1, allow_relocation);
3578 }
3579 
3580 void MacroAssembler::set_thread_state(JavaThreadState new_state) {
3581   z_release();
3582 
3583   assert(Immediate::is_uimm16(_thread_max_state), "enum value out of range for instruction");
3584   assert(sizeof(JavaThreadState) == sizeof(int), "enum value must have base type int");
3585   store_const(Address(Z_thread, JavaThread::thread_state_offset()), new_state, Z_R0, false);
3586 }
3587 
3588 void MacroAssembler::get_vm_result(Register oop_result) {
3589   verify_thread();
3590 
3591   z_lg(oop_result, Address(Z_thread, JavaThread::vm_result_offset()));
3592   clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(void*));
3593 
3594   verify_oop(oop_result);
3595 }
3596 
3597 void MacroAssembler::get_vm_result_2(Register result) {
3598   verify_thread();
3599 
3600   z_lg(result, Address(Z_thread, JavaThread::vm_result_2_offset()));
3601   clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(void*));
3602 }
3603 
3604 // We require that C code which does not return a value in vm_result will
3605 // leave it undisturbed.
3606 void MacroAssembler::set_vm_result(Register oop_result) {
3607   z_stg(oop_result, Address(Z_thread, JavaThread::vm_result_offset()));
3608 }
3609 
3610 // Explicit null checks (used for method handle code).
3611 void MacroAssembler::null_check(Register reg, Register tmp, int64_t offset) {
3612   if (!ImplicitNullChecks) {
3613     NearLabel ok;
3614 
3615     compare64_and_branch(reg, (intptr_t) 0, Assembler::bcondNotEqual, ok);
3616 
3617     // We just put the address into reg if it was 0 (tmp==Z_R0 is allowed so we can't use it for the address).
3618     address exception_entry = Interpreter::throw_NullPointerException_entry();
3619     load_absolute_address(reg, exception_entry);
3620     z_br(reg);
3621 
3622     bind(ok);
3623   } else {
3624     if (needs_explicit_null_check((intptr_t)offset)) {
3625       // Provoke OS NULL exception if reg = NULL by
3626       // accessing M[reg] w/o changing any registers.
3627       z_lg(tmp, 0, reg);
3628     }
3629     // else
3630       // Nothing to do, (later) access of M[reg + offset]
3631       // will provoke OS NULL exception if reg = NULL.
3632   }
3633 }
3634 
3635 //-------------------------------------
3636 //  Compressed Klass Pointers
3637 //-------------------------------------
3638 
3639 // Klass oop manipulations if compressed.
3640 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
3641   Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible.
3642   address  base    = CompressedKlassPointers::base();
3643   int      shift   = CompressedKlassPointers::shift();
3644   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3645 
3646   BLOCK_COMMENT("cKlass encoder {");
3647 
3648 #ifdef ASSERT
3649   Label ok;
3650   z_tmll(current, KlassAlignmentInBytes-1); // Check alignment.
3651   z_brc(Assembler::bcondAllZero, ok);
3652   // The plain disassembler does not recognize illtrap. It instead displays
3653   // a 32-bit value. Issueing two illtraps assures the disassembler finds
3654   // the proper beginning of the next instruction.
3655   z_illtrap(0xee);
3656   z_illtrap(0xee);
3657   bind(ok);
3658 #endif
3659 
3660   if (base != NULL) {
3661     unsigned int base_h = ((unsigned long)base)>>32;
3662     unsigned int base_l = (unsigned int)((unsigned long)base);
3663     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3664       lgr_if_needed(dst, current);
3665       z_aih(dst, -((int)base_h));     // Base has no set bits in lower half.
3666     } else if ((base_h == 0) && (base_l != 0)) {
3667       lgr_if_needed(dst, current);
3668       z_agfi(dst, -(int)base_l);
3669     } else {
3670       load_const(Z_R0, base);
3671       lgr_if_needed(dst, current);
3672       z_sgr(dst, Z_R0);
3673     }
3674     current = dst;
3675   }
3676   if (shift != 0) {
3677     assert (LogKlassAlignmentInBytes == shift, "decode alg wrong");
3678     z_srlg(dst, current, shift);
3679     current = dst;
3680   }
3681   lgr_if_needed(dst, current); // Move may be required (if neither base nor shift != 0).
3682 
3683   BLOCK_COMMENT("} cKlass encoder");
3684 }
3685 
3686 // This function calculates the size of the code generated by
3687 //   decode_klass_not_null(register dst, Register src)
3688 // when (Universe::heap() != NULL). Hence, if the instructions
3689 // it generates change, then this method needs to be updated.
3690 int MacroAssembler::instr_size_for_decode_klass_not_null() {
3691   address  base    = CompressedKlassPointers::base();
3692   int shift_size   = CompressedKlassPointers::shift() == 0 ? 0 : 6; /* sllg */
3693   int addbase_size = 0;
3694   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3695 
3696   if (base != NULL) {
3697     unsigned int base_h = ((unsigned long)base)>>32;
3698     unsigned int base_l = (unsigned int)((unsigned long)base);
3699     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3700       addbase_size += 6; /* aih */
3701     } else if ((base_h == 0) && (base_l != 0)) {
3702       addbase_size += 6; /* algfi */
3703     } else {
3704       addbase_size += load_const_size();
3705       addbase_size += 4; /* algr */
3706     }
3707   }
3708 #ifdef ASSERT
3709   addbase_size += 10;
3710   addbase_size += 2; // Extra sigill.
3711 #endif
3712   return addbase_size + shift_size;
3713 }
3714 
3715 // !!! If the instructions that get generated here change
3716 //     then function instr_size_for_decode_klass_not_null()
3717 //     needs to get updated.
3718 // This variant of decode_klass_not_null() must generate predictable code!
3719 // The code must only depend on globally known parameters.
3720 void MacroAssembler::decode_klass_not_null(Register dst) {
3721   address  base    = CompressedKlassPointers::base();
3722   int      shift   = CompressedKlassPointers::shift();
3723   int      beg_off = offset();
3724   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3725 
3726   BLOCK_COMMENT("cKlass decoder (const size) {");
3727 
3728   if (shift != 0) { // Shift required?
3729     z_sllg(dst, dst, shift);
3730   }
3731   if (base != NULL) {
3732     unsigned int base_h = ((unsigned long)base)>>32;
3733     unsigned int base_l = (unsigned int)((unsigned long)base);
3734     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3735       z_aih(dst, base_h);     // Base has no set bits in lower half.
3736     } else if ((base_h == 0) && (base_l != 0)) {
3737       z_algfi(dst, base_l);   // Base has no set bits in upper half.
3738     } else {
3739       load_const(Z_R0, base); // Base has set bits everywhere.
3740       z_algr(dst, Z_R0);
3741     }
3742   }
3743 
3744 #ifdef ASSERT
3745   Label ok;
3746   z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
3747   z_brc(Assembler::bcondAllZero, ok);
3748   // The plain disassembler does not recognize illtrap. It instead displays
3749   // a 32-bit value. Issueing two illtraps assures the disassembler finds
3750   // the proper beginning of the next instruction.
3751   z_illtrap(0xd1);
3752   z_illtrap(0xd1);
3753   bind(ok);
3754 #endif
3755   assert(offset() == beg_off + instr_size_for_decode_klass_not_null(), "Code gen mismatch.");
3756 
3757   BLOCK_COMMENT("} cKlass decoder (const size)");
3758 }
3759 
3760 // This variant of decode_klass_not_null() is for cases where
3761 //  1) the size of the generated instructions may vary
3762 //  2) the result is (potentially) stored in a register different from the source.
3763 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
3764   address base  = CompressedKlassPointers::base();
3765   int     shift = CompressedKlassPointers::shift();
3766   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3767 
3768   BLOCK_COMMENT("cKlass decoder {");
3769 
3770   if (src == noreg) src = dst;
3771 
3772   if (shift != 0) { // Shift or at least move required?
3773     z_sllg(dst, src, shift);
3774   } else {
3775     lgr_if_needed(dst, src);
3776   }
3777 
3778   if (base != NULL) {
3779     unsigned int base_h = ((unsigned long)base)>>32;
3780     unsigned int base_l = (unsigned int)((unsigned long)base);
3781     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3782       z_aih(dst, base_h);     // Base has not set bits in lower half.
3783     } else if ((base_h == 0) && (base_l != 0)) {
3784       z_algfi(dst, base_l);   // Base has no set bits in upper half.
3785     } else {
3786       load_const_optimized(Z_R0, base); // Base has set bits everywhere.
3787       z_algr(dst, Z_R0);
3788     }
3789   }
3790 
3791 #ifdef ASSERT
3792   Label ok;
3793   z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
3794   z_brc(Assembler::bcondAllZero, ok);
3795   // The plain disassembler does not recognize illtrap. It instead displays
3796   // a 32-bit value. Issueing two illtraps assures the disassembler finds
3797   // the proper beginning of the next instruction.
3798   z_illtrap(0xd2);
3799   z_illtrap(0xd2);
3800   bind(ok);
3801 #endif
3802   BLOCK_COMMENT("} cKlass decoder");
3803 }
3804 
3805 void MacroAssembler::load_klass(Register klass, Address mem) {
3806   if (UseCompressedClassPointers) {
3807     z_llgf(klass, mem);
3808     // Attention: no null check here!
3809     decode_klass_not_null(klass);
3810   } else {
3811     z_lg(klass, mem);
3812   }
3813 }
3814 
3815 void MacroAssembler::load_klass(Register klass, Register src_oop) {
3816   if (UseCompressedClassPointers) {
3817     z_llgf(klass, oopDesc::klass_offset_in_bytes(), src_oop);
3818     // Attention: no null check here!
3819     decode_klass_not_null(klass);
3820   } else {
3821     z_lg(klass, oopDesc::klass_offset_in_bytes(), src_oop);
3822   }
3823 }
3824 
3825 void MacroAssembler::load_prototype_header(Register Rheader, Register Rsrc_oop) {
3826   assert_different_registers(Rheader, Rsrc_oop);
3827   load_klass(Rheader, Rsrc_oop);
3828   z_lg(Rheader, Address(Rheader, Klass::prototype_header_offset()));
3829 }
3830 
3831 void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) {
3832   if (UseCompressedClassPointers) {
3833     assert_different_registers(dst_oop, klass, Z_R0);
3834     if (ck == noreg) ck = klass;
3835     encode_klass_not_null(ck, klass);
3836     z_st(ck, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
3837   } else {
3838     z_stg(klass, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
3839   }
3840 }
3841 
3842 void MacroAssembler::store_klass_gap(Register s, Register d) {
3843   if (UseCompressedClassPointers) {
3844     assert(s != d, "not enough registers");
3845     // Support s = noreg.
3846     if (s != noreg) {
3847       z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes()));
3848     } else {
3849       z_mvhi(Address(d, oopDesc::klass_gap_offset_in_bytes()), 0);
3850     }
3851   }
3852 }
3853 
3854 // Compare klass ptr in memory against klass ptr in register.
3855 //
3856 // Rop1            - klass in register, always uncompressed.
3857 // disp            - Offset of klass in memory, compressed/uncompressed, depending on runtime flag.
3858 // Rbase           - Base address of cKlass in memory.
3859 // maybeNULL       - True if Rop1 possibly is a NULL.
3860 void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL) {
3861 
3862   BLOCK_COMMENT("compare klass ptr {");
3863 
3864   if (UseCompressedClassPointers) {
3865     const int shift = CompressedKlassPointers::shift();
3866     address   base  = CompressedKlassPointers::base();
3867 
3868     assert((shift == 0) || (shift == LogKlassAlignmentInBytes), "cKlass encoder detected bad shift");
3869     assert_different_registers(Rop1, Z_R0);
3870     assert_different_registers(Rop1, Rbase, Z_R1);
3871 
3872     // First encode register oop and then compare with cOop in memory.
3873     // This sequence saves an unnecessary cOop load and decode.
3874     if (base == NULL) {
3875       if (shift == 0) {
3876         z_cl(Rop1, disp, Rbase);     // Unscaled
3877       } else {
3878         z_srlg(Z_R0, Rop1, shift);   // ZeroBased
3879         z_cl(Z_R0, disp, Rbase);
3880       }
3881     } else {                         // HeapBased
3882 #ifdef ASSERT
3883       bool     used_R0 = true;
3884       bool     used_R1 = true;
3885 #endif
3886       Register current = Rop1;
3887       Label    done;
3888 
3889       if (maybeNULL) {       // NULL ptr must be preserved!
3890         z_ltgr(Z_R0, current);
3891         z_bre(done);
3892         current = Z_R0;
3893       }
3894 
3895       unsigned int base_h = ((unsigned long)base)>>32;
3896       unsigned int base_l = (unsigned int)((unsigned long)base);
3897       if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3898         lgr_if_needed(Z_R0, current);
3899         z_aih(Z_R0, -((int)base_h));     // Base has no set bits in lower half.
3900       } else if ((base_h == 0) && (base_l != 0)) {
3901         lgr_if_needed(Z_R0, current);
3902         z_agfi(Z_R0, -(int)base_l);
3903       } else {
3904         int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
3905         add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); // Subtract base by adding complement.
3906       }
3907 
3908       if (shift != 0) {
3909         z_srlg(Z_R0, Z_R0, shift);
3910       }
3911       bind(done);
3912       z_cl(Z_R0, disp, Rbase);
3913 #ifdef ASSERT
3914       if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);
3915       if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);
3916 #endif
3917     }
3918   } else {
3919     z_clg(Rop1, disp, Z_R0, Rbase);
3920   }
3921   BLOCK_COMMENT("} compare klass ptr");
3922 }
3923 
3924 //---------------------------
3925 //  Compressed oops
3926 //---------------------------
3927 
3928 void MacroAssembler::encode_heap_oop(Register oop) {
3929   oop_encoder(oop, oop, true /*maybe null*/);
3930 }
3931 
3932 void MacroAssembler::encode_heap_oop_not_null(Register oop) {
3933   oop_encoder(oop, oop, false /*not null*/);
3934 }
3935 
3936 // Called with something derived from the oop base. e.g. oop_base>>3.
3937 int MacroAssembler::get_oop_base_pow2_offset(uint64_t oop_base) {
3938   unsigned int oop_base_ll = ((unsigned int)(oop_base >>  0)) & 0xffff;
3939   unsigned int oop_base_lh = ((unsigned int)(oop_base >> 16)) & 0xffff;
3940   unsigned int oop_base_hl = ((unsigned int)(oop_base >> 32)) & 0xffff;
3941   unsigned int oop_base_hh = ((unsigned int)(oop_base >> 48)) & 0xffff;
3942   unsigned int n_notzero_parts = (oop_base_ll == 0 ? 0:1)
3943                                + (oop_base_lh == 0 ? 0:1)
3944                                + (oop_base_hl == 0 ? 0:1)
3945                                + (oop_base_hh == 0 ? 0:1);
3946 
3947   assert(oop_base != 0, "This is for HeapBased cOops only");
3948 
3949   if (n_notzero_parts != 1) { //  Check if oop_base is just a few pages shy of a power of 2.
3950     uint64_t pow2_offset = 0x10000 - oop_base_ll;
3951     if (pow2_offset < 0x8000) {  // This might not be necessary.
3952       uint64_t oop_base2 = oop_base + pow2_offset;
3953 
3954       oop_base_ll = ((unsigned int)(oop_base2 >>  0)) & 0xffff;
3955       oop_base_lh = ((unsigned int)(oop_base2 >> 16)) & 0xffff;
3956       oop_base_hl = ((unsigned int)(oop_base2 >> 32)) & 0xffff;
3957       oop_base_hh = ((unsigned int)(oop_base2 >> 48)) & 0xffff;
3958       n_notzero_parts = (oop_base_ll == 0 ? 0:1) +
3959                         (oop_base_lh == 0 ? 0:1) +
3960                         (oop_base_hl == 0 ? 0:1) +
3961                         (oop_base_hh == 0 ? 0:1);
3962       if (n_notzero_parts == 1) {
3963         assert(-(int64_t)pow2_offset != (int64_t)-1, "We use -1 to signal uninitialized base register");
3964         return -pow2_offset;
3965       }
3966     }
3967   }
3968   return 0;
3969 }
3970 
3971 // If base address is offset from a straight power of two by just a few pages,
3972 // return this offset to the caller for a possible later composite add.
3973 // TODO/FIX: will only work correctly for 4k pages.
3974 int MacroAssembler::get_oop_base(Register Rbase, uint64_t oop_base) {
3975   int pow2_offset = get_oop_base_pow2_offset(oop_base);
3976 
3977   load_const_optimized(Rbase, oop_base - pow2_offset); // Best job possible.
3978 
3979   return pow2_offset;
3980 }
3981 
3982 int MacroAssembler::get_oop_base_complement(Register Rbase, uint64_t oop_base) {
3983   int offset = get_oop_base(Rbase, oop_base);
3984   z_lcgr(Rbase, Rbase);
3985   return -offset;
3986 }
3987 
3988 // Compare compressed oop in memory against oop in register.
3989 // Rop1            - Oop in register.
3990 // disp            - Offset of cOop in memory.
3991 // Rbase           - Base address of cOop in memory.
3992 // maybeNULL       - True if Rop1 possibly is a NULL.
3993 // maybeNULLtarget - Branch target for Rop1 == NULL, if flow control shall NOT continue with compare instruction.
3994 void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL) {
3995   Register Rbase  = mem.baseOrR0();
3996   Register Rindex = mem.indexOrR0();
3997   int64_t  disp   = mem.disp();
3998 
3999   const int shift = CompressedOops::shift();
4000   address   base  = CompressedOops::base();
4001 
4002   assert(UseCompressedOops, "must be on to call this method");
4003   assert(Universe::heap() != NULL, "java heap must be initialized to call this method");
4004   assert((shift == 0) || (shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");
4005   assert_different_registers(Rop1, Z_R0);
4006   assert_different_registers(Rop1, Rbase, Z_R1);
4007   assert_different_registers(Rop1, Rindex, Z_R1);
4008 
4009   BLOCK_COMMENT("compare heap oop {");
4010 
4011   // First encode register oop and then compare with cOop in memory.
4012   // This sequence saves an unnecessary cOop load and decode.
4013   if (base == NULL) {
4014     if (shift == 0) {
4015       z_cl(Rop1, disp, Rindex, Rbase);  // Unscaled
4016     } else {
4017       z_srlg(Z_R0, Rop1, shift);        // ZeroBased
4018       z_cl(Z_R0, disp, Rindex, Rbase);
4019     }
4020   } else {                              // HeapBased
4021 #ifdef ASSERT
4022     bool  used_R0 = true;
4023     bool  used_R1 = true;
4024 #endif
4025     Label done;
4026     int   pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
4027 
4028     if (maybeNULL) {       // NULL ptr must be preserved!
4029       z_ltgr(Z_R0, Rop1);
4030       z_bre(done);
4031     }
4032 
4033     add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1);
4034     z_srlg(Z_R0, Z_R0, shift);
4035 
4036     bind(done);
4037     z_cl(Z_R0, disp, Rindex, Rbase);
4038 #ifdef ASSERT
4039     if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);
4040     if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);
4041 #endif
4042   }
4043   BLOCK_COMMENT("} compare heap oop");
4044 }
4045 
4046 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
4047                                      const Address& addr, Register val,
4048                                      Register tmp1, Register tmp2, Register tmp3) {
4049   assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL |
4050                          ON_UNKNOWN_OOP_REF)) == 0, "unsupported decorator");
4051   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
4052   decorators = AccessInternal::decorator_fixup(decorators);
4053   bool as_raw = (decorators & AS_RAW) != 0;
4054   if (as_raw) {
4055     bs->BarrierSetAssembler::store_at(this, decorators, type,
4056                                       addr, val,
4057                                       tmp1, tmp2, tmp3);
4058   } else {
4059     bs->store_at(this, decorators, type,
4060                  addr, val,
4061                  tmp1, tmp2, tmp3);
4062   }
4063 }
4064 
4065 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
4066                                     const Address& addr, Register dst,
4067                                     Register tmp1, Register tmp2, Label *is_null) {
4068   assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL |
4069                          ON_PHANTOM_OOP_REF | ON_WEAK_OOP_REF)) == 0, "unsupported decorator");
4070   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
4071   decorators = AccessInternal::decorator_fixup(decorators);
4072   bool as_raw = (decorators & AS_RAW) != 0;
4073   if (as_raw) {
4074     bs->BarrierSetAssembler::load_at(this, decorators, type,
4075                                      addr, dst,
4076                                      tmp1, tmp2, is_null);
4077   } else {
4078     bs->load_at(this, decorators, type,
4079                 addr, dst,
4080                 tmp1, tmp2, is_null);
4081   }
4082 }
4083 
4084 void MacroAssembler::load_heap_oop(Register dest, const Address &a,
4085                                    Register tmp1, Register tmp2,
4086                                    DecoratorSet decorators, Label *is_null) {
4087   access_load_at(T_OBJECT, IN_HEAP | decorators, a, dest, tmp1, tmp2, is_null);
4088 }
4089 
4090 void MacroAssembler::store_heap_oop(Register Roop, const Address &a,
4091                                     Register tmp1, Register tmp2, Register tmp3,
4092                                     DecoratorSet decorators) {
4093   access_store_at(T_OBJECT, IN_HEAP | decorators, a, Roop, tmp1, tmp2, tmp3);
4094 }
4095 
4096 //-------------------------------------------------
4097 // Encode compressed oop. Generally usable encoder.
4098 //-------------------------------------------------
4099 // Rsrc - contains regular oop on entry. It remains unchanged.
4100 // Rdst - contains compressed oop on exit.
4101 // Rdst and Rsrc may indicate same register, in which case Rsrc does not remain unchanged.
4102 //
4103 // Rdst must not indicate scratch register Z_R1 (Z_R1_scratch) for functionality.
4104 // Rdst should not indicate scratch register Z_R0 (Z_R0_scratch) for performance.
4105 //
4106 // only32bitValid is set, if later code only uses the lower 32 bits. In this
4107 // case we must not fix the upper 32 bits.
4108 void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL,
4109                                  Register Rbase, int pow2_offset, bool only32bitValid) {
4110 
4111   const address oop_base  = CompressedOops::base();
4112   const int     oop_shift = CompressedOops::shift();
4113   const bool    disjoint  = CompressedOops::base_disjoint();
4114 
4115   assert(UseCompressedOops, "must be on to call this method");
4116   assert(Universe::heap() != NULL, "java heap must be initialized to call this encoder");
4117   assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");
4118 
4119   if (disjoint || (oop_base == NULL)) {
4120     BLOCK_COMMENT("cOop encoder zeroBase {");
4121     if (oop_shift == 0) {
4122       if (oop_base != NULL && !only32bitValid) {
4123         z_llgfr(Rdst, Rsrc); // Clear upper bits in case the register will be decoded again.
4124       } else {
4125         lgr_if_needed(Rdst, Rsrc);
4126       }
4127     } else {
4128       z_srlg(Rdst, Rsrc, oop_shift);
4129       if (oop_base != NULL && !only32bitValid) {
4130         z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4131       }
4132     }
4133     BLOCK_COMMENT("} cOop encoder zeroBase");
4134     return;
4135   }
4136 
4137   bool used_R0 = false;
4138   bool used_R1 = false;
4139 
4140   BLOCK_COMMENT("cOop encoder general {");
4141   assert_different_registers(Rdst, Z_R1);
4142   assert_different_registers(Rsrc, Rbase);
4143   if (maybeNULL) {
4144     Label done;
4145     // We reorder shifting and subtracting, so that we can compare
4146     // and shift in parallel:
4147     //
4148     // cycle 0:  potential LoadN, base = <const>
4149     // cycle 1:  base = !base     dst = src >> 3,    cmp cr = (src != 0)
4150     // cycle 2:  if (cr) br,      dst = dst + base + offset
4151 
4152     // Get oop_base components.
4153     if (pow2_offset == -1) {
4154       if (Rdst == Rbase) {
4155         if (Rdst == Z_R1 || Rsrc == Z_R1) {
4156           Rbase = Z_R0;
4157           used_R0 = true;
4158         } else {
4159           Rdst = Z_R1;
4160           used_R1 = true;
4161         }
4162       }
4163       if (Rbase == Z_R1) {
4164         used_R1 = true;
4165       }
4166       pow2_offset = get_oop_base_complement(Rbase, ((uint64_t)(intptr_t)oop_base) >> oop_shift);
4167     }
4168     assert_different_registers(Rdst, Rbase);
4169 
4170     // Check for NULL oop (must be left alone) and shift.
4171     if (oop_shift != 0) {  // Shift out alignment bits
4172       if (((intptr_t)oop_base&0xc000000000000000L) == 0L) { // We are sure: no single address will have the leftmost bit set.
4173         z_srag(Rdst, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4174       } else {
4175         z_srlg(Rdst, Rsrc, oop_shift);
4176         z_ltgr(Rsrc, Rsrc);  // This is the recommended way of testing for zero.
4177         // This probably is faster, as it does not write a register. No!
4178         // z_cghi(Rsrc, 0);
4179       }
4180     } else {
4181       z_ltgr(Rdst, Rsrc);   // Move NULL to result register.
4182     }
4183     z_bre(done);
4184 
4185     // Subtract oop_base components.
4186     if ((Rdst == Z_R0) || (Rbase == Z_R0)) {
4187       z_algr(Rdst, Rbase);
4188       if (pow2_offset != 0) { add2reg(Rdst, pow2_offset); }
4189     } else {
4190       add2reg_with_index(Rdst, pow2_offset, Rbase, Rdst);
4191     }
4192     if (!only32bitValid) {
4193       z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4194     }
4195     bind(done);
4196 
4197   } else {  // not null
4198     // Get oop_base components.
4199     if (pow2_offset == -1) {
4200       pow2_offset = get_oop_base_complement(Rbase, (uint64_t)(intptr_t)oop_base);
4201     }
4202 
4203     // Subtract oop_base components and shift.
4204     if (Rdst == Z_R0 || Rsrc == Z_R0 || Rbase == Z_R0) {
4205       // Don't use lay instruction.
4206       if (Rdst == Rsrc) {
4207         z_algr(Rdst, Rbase);
4208       } else {
4209         lgr_if_needed(Rdst, Rbase);
4210         z_algr(Rdst, Rsrc);
4211       }
4212       if (pow2_offset != 0) add2reg(Rdst, pow2_offset);
4213     } else {
4214       add2reg_with_index(Rdst, pow2_offset, Rbase, Rsrc);
4215     }
4216     if (oop_shift != 0) {   // Shift out alignment bits.
4217       z_srlg(Rdst, Rdst, oop_shift);
4218     }
4219     if (!only32bitValid) {
4220       z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4221     }
4222   }
4223 #ifdef ASSERT
4224   if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb01bUL, 2); }
4225   if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb02bUL, 2); }
4226 #endif
4227   BLOCK_COMMENT("} cOop encoder general");
4228 }
4229 
4230 //-------------------------------------------------
4231 // decode compressed oop. Generally usable decoder.
4232 //-------------------------------------------------
4233 // Rsrc - contains compressed oop on entry.
4234 // Rdst - contains regular oop on exit.
4235 // Rdst and Rsrc may indicate same register.
4236 // Rdst must not be the same register as Rbase, if Rbase was preloaded (before call).
4237 // Rdst can be the same register as Rbase. Then, either Z_R0 or Z_R1 must be available as scratch.
4238 // Rbase - register to use for the base
4239 // pow2_offset - offset of base to nice value. If -1, base must be loaded.
4240 // For performance, it is good to
4241 //  - avoid Z_R0 for any of the argument registers.
4242 //  - keep Rdst and Rsrc distinct from Rbase. Rdst == Rsrc is ok for performance.
4243 //  - avoid Z_R1 for Rdst if Rdst == Rbase.
4244 void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, Register Rbase, int pow2_offset) {
4245 
4246   const address oop_base  = CompressedOops::base();
4247   const int     oop_shift = CompressedOops::shift();
4248   const bool    disjoint  = CompressedOops::base_disjoint();
4249 
4250   assert(UseCompressedOops, "must be on to call this method");
4251   assert(Universe::heap() != NULL, "java heap must be initialized to call this decoder");
4252   assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes),
4253          "cOop encoder detected bad shift");
4254 
4255   // cOops are always loaded zero-extended from memory. No explicit zero-extension necessary.
4256 
4257   if (oop_base != NULL) {
4258     unsigned int oop_base_hl = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xffff;
4259     unsigned int oop_base_hh = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 48)) & 0xffff;
4260     unsigned int oop_base_hf = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xFFFFffff;
4261     if (disjoint && (oop_base_hl == 0 || oop_base_hh == 0)) {
4262       BLOCK_COMMENT("cOop decoder disjointBase {");
4263       // We do not need to load the base. Instead, we can install the upper bits
4264       // with an OR instead of an ADD.
4265       Label done;
4266 
4267       // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.
4268       if (maybeNULL) {  // NULL ptr must be preserved!
4269         z_slag(Rdst, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4270         z_bre(done);
4271       } else {
4272         z_sllg(Rdst, Rsrc, oop_shift);  // Logical shift leaves condition code alone.
4273       }
4274       if ((oop_base_hl != 0) && (oop_base_hh != 0)) {
4275         z_oihf(Rdst, oop_base_hf);
4276       } else if (oop_base_hl != 0) {
4277         z_oihl(Rdst, oop_base_hl);
4278       } else {
4279         assert(oop_base_hh != 0, "not heapbased mode");
4280         z_oihh(Rdst, oop_base_hh);
4281       }
4282       bind(done);
4283       BLOCK_COMMENT("} cOop decoder disjointBase");
4284     } else {
4285       BLOCK_COMMENT("cOop decoder general {");
4286       // There are three decode steps:
4287       //   scale oop offset (shift left)
4288       //   get base (in reg) and pow2_offset (constant)
4289       //   add base, pow2_offset, and oop offset
4290       // The following register overlap situations may exist:
4291       // Rdst == Rsrc,  Rbase any other
4292       //   not a problem. Scaling in-place leaves Rbase undisturbed.
4293       //   Loading Rbase does not impact the scaled offset.
4294       // Rdst == Rbase, Rsrc  any other
4295       //   scaling would destroy a possibly preloaded Rbase. Loading Rbase
4296       //   would destroy the scaled offset.
4297       //   Remedy: use Rdst_tmp if Rbase has been preloaded.
4298       //           use Rbase_tmp if base has to be loaded.
4299       // Rsrc == Rbase, Rdst  any other
4300       //   Only possible without preloaded Rbase.
4301       //   Loading Rbase does not destroy compressed oop because it was scaled into Rdst before.
4302       // Rsrc == Rbase, Rdst == Rbase
4303       //   Only possible without preloaded Rbase.
4304       //   Loading Rbase would destroy compressed oop. Scaling in-place is ok.
4305       //   Remedy: use Rbase_tmp.
4306       //
4307       Label    done;
4308       Register Rdst_tmp       = Rdst;
4309       Register Rbase_tmp      = Rbase;
4310       bool     used_R0        = false;
4311       bool     used_R1        = false;
4312       bool     base_preloaded = pow2_offset >= 0;
4313       guarantee(!(base_preloaded && (Rsrc == Rbase)), "Register clash, check caller");
4314       assert(oop_shift != 0, "room for optimization");
4315 
4316       // Check if we need to use scratch registers.
4317       if (Rdst == Rbase) {
4318         assert(!(((Rdst == Z_R0) && (Rsrc == Z_R1)) || ((Rdst == Z_R1) && (Rsrc == Z_R0))), "need a scratch reg");
4319         if (Rdst != Rsrc) {
4320           if (base_preloaded) { Rdst_tmp  = (Rdst == Z_R1) ? Z_R0 : Z_R1; }
4321           else                { Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; }
4322         } else {
4323           Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1;
4324         }
4325       }
4326       if (base_preloaded) lgr_if_needed(Rbase_tmp, Rbase);
4327 
4328       // Scale oop and check for NULL.
4329       // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.
4330       if (maybeNULL) {  // NULL ptr must be preserved!
4331         z_slag(Rdst_tmp, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4332         z_bre(done);
4333       } else {
4334         z_sllg(Rdst_tmp, Rsrc, oop_shift);  // Logical shift leaves condition code alone.
4335       }
4336 
4337       // Get oop_base components.
4338       if (!base_preloaded) {
4339         pow2_offset = get_oop_base(Rbase_tmp, (uint64_t)(intptr_t)oop_base);
4340       }
4341 
4342       // Add up all components.
4343       if ((Rbase_tmp == Z_R0) || (Rdst_tmp == Z_R0)) {
4344         z_algr(Rdst_tmp, Rbase_tmp);
4345         if (pow2_offset != 0) { add2reg(Rdst_tmp, pow2_offset); }
4346       } else {
4347         add2reg_with_index(Rdst_tmp, pow2_offset, Rbase_tmp, Rdst_tmp);
4348       }
4349 
4350       bind(done);
4351       lgr_if_needed(Rdst, Rdst_tmp);
4352 #ifdef ASSERT
4353       if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb03bUL, 2); }
4354       if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb04bUL, 2); }
4355 #endif
4356       BLOCK_COMMENT("} cOop decoder general");
4357     }
4358   } else {
4359     BLOCK_COMMENT("cOop decoder zeroBase {");
4360     if (oop_shift == 0) {
4361       lgr_if_needed(Rdst, Rsrc);
4362     } else {
4363       z_sllg(Rdst, Rsrc, oop_shift);
4364     }
4365     BLOCK_COMMENT("} cOop decoder zeroBase");
4366   }
4367 }
4368 
4369 // ((OopHandle)result).resolve();
4370 void MacroAssembler::resolve_oop_handle(Register result) {
4371   // OopHandle::resolve is an indirection.
4372   z_lg(result, 0, result);
4373 }
4374 
4375 void MacroAssembler::load_mirror_from_const_method(Register mirror, Register const_method) {
4376   mem2reg_opt(mirror, Address(const_method, ConstMethod::constants_offset()));
4377   mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes()));
4378   mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset()));
4379   resolve_oop_handle(mirror);
4380 }
4381 
4382 void MacroAssembler::load_method_holder(Register holder, Register method) {
4383   mem2reg_opt(holder, Address(method, Method::const_offset()));
4384   mem2reg_opt(holder, Address(holder, ConstMethod::constants_offset()));
4385   mem2reg_opt(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes()));
4386 }
4387 
4388 //---------------------------------------------------------------
4389 //---  Operations on arrays.
4390 //---------------------------------------------------------------
4391 
4392 // Compiler ensures base is doubleword aligned and cnt is #doublewords.
4393 // Emitter does not KILL cnt and base arguments, since they need to be copied to
4394 // work registers anyway.
4395 // Actually, only r0, r1, and r5 are killed.
4396 unsigned int MacroAssembler::Clear_Array(Register cnt_arg, Register base_pointer_arg, Register odd_tmp_reg) {
4397 
4398   int      block_start = offset();
4399   Register dst_len  = Z_R1;    // Holds dst len  for MVCLE.
4400   Register dst_addr = Z_R0;    // Holds dst addr for MVCLE.
4401 
4402   Label doXC, doMVCLE, done;
4403 
4404   BLOCK_COMMENT("Clear_Array {");
4405 
4406   // Check for zero len and convert to long.
4407   z_ltgfr(odd_tmp_reg, cnt_arg);
4408   z_bre(done);                    // Nothing to do if len == 0.
4409 
4410   // Prefetch data to be cleared.
4411   if (VM_Version::has_Prefetch()) {
4412     z_pfd(0x02,   0, Z_R0, base_pointer_arg);
4413     z_pfd(0x02, 256, Z_R0, base_pointer_arg);
4414   }
4415 
4416   z_sllg(dst_len, odd_tmp_reg, 3); // #bytes to clear.
4417   z_cghi(odd_tmp_reg, 32);         // Check for len <= 256 bytes (<=32 DW).
4418   z_brnh(doXC);                    // If so, use executed XC to clear.
4419 
4420   // MVCLE: initialize long arrays (general case).
4421   bind(doMVCLE);
4422   z_lgr(dst_addr, base_pointer_arg);
4423   // Pass 0 as source length to MVCLE: destination will be filled with padding byte 0.
4424   // The even register of the register pair is not killed.
4425   clear_reg(odd_tmp_reg, true, false);
4426   MacroAssembler::move_long_ext(dst_addr, as_Register(odd_tmp_reg->encoding()-1), 0);
4427   z_bru(done);
4428 
4429   // XC: initialize short arrays.
4430   Label XC_template; // Instr template, never exec directly!
4431     bind(XC_template);
4432     z_xc(0,0,base_pointer_arg,0,base_pointer_arg);
4433 
4434   bind(doXC);
4435     add2reg(dst_len, -1);               // Get #bytes-1 for EXECUTE.
4436     if (VM_Version::has_ExecuteExtensions()) {
4437       z_exrl(dst_len, XC_template);     // Execute XC with var. len.
4438     } else {
4439       z_larl(odd_tmp_reg, XC_template);
4440       z_ex(dst_len,0,Z_R0,odd_tmp_reg); // Execute XC with var. len.
4441     }
4442     // z_bru(done);      // fallthru
4443 
4444   bind(done);
4445 
4446   BLOCK_COMMENT("} Clear_Array");
4447 
4448   int block_end = offset();
4449   return block_end - block_start;
4450 }
4451 
4452 // Compiler ensures base is doubleword aligned and cnt is count of doublewords.
4453 // Emitter does not KILL any arguments nor work registers.
4454 // Emitter generates up to 16 XC instructions, depending on the array length.
4455 unsigned int MacroAssembler::Clear_Array_Const(long cnt, Register base) {
4456   int  block_start    = offset();
4457   int  off;
4458   int  lineSize_Bytes = AllocatePrefetchStepSize;
4459   int  lineSize_DW    = AllocatePrefetchStepSize>>LogBytesPerWord;
4460   bool doPrefetch     = VM_Version::has_Prefetch();
4461   int  XC_maxlen      = 256;
4462   int  numXCInstr     = cnt > 0 ? (cnt*BytesPerWord-1)/XC_maxlen+1 : 0;
4463 
4464   BLOCK_COMMENT("Clear_Array_Const {");
4465   assert(cnt*BytesPerWord <= 4096, "ClearArrayConst can handle 4k only");
4466 
4467   // Do less prefetching for very short arrays.
4468   if (numXCInstr > 0) {
4469     // Prefetch only some cache lines, then begin clearing.
4470     if (doPrefetch) {
4471       if (cnt*BytesPerWord <= lineSize_Bytes/4) {  // If less than 1/4 of a cache line to clear,
4472         z_pfd(0x02, 0, Z_R0, base);                // prefetch just the first cache line.
4473       } else {
4474         assert(XC_maxlen == lineSize_Bytes, "ClearArrayConst needs 256B cache lines");
4475         for (off = 0; (off < AllocatePrefetchLines) && (off <= numXCInstr); off ++) {
4476           z_pfd(0x02, off*lineSize_Bytes, Z_R0, base);
4477         }
4478       }
4479     }
4480 
4481     for (off=0; off<(numXCInstr-1); off++) {
4482       z_xc(off*XC_maxlen, XC_maxlen-1, base, off*XC_maxlen, base);
4483 
4484       // Prefetch some cache lines in advance.
4485       if (doPrefetch && (off <= numXCInstr-AllocatePrefetchLines)) {
4486         z_pfd(0x02, (off+AllocatePrefetchLines)*lineSize_Bytes, Z_R0, base);
4487       }
4488     }
4489     if (off*XC_maxlen < cnt*BytesPerWord) {
4490       z_xc(off*XC_maxlen, (cnt*BytesPerWord-off*XC_maxlen)-1, base, off*XC_maxlen, base);
4491     }
4492   }
4493   BLOCK_COMMENT("} Clear_Array_Const");
4494 
4495   int block_end = offset();
4496   return block_end - block_start;
4497 }
4498 
4499 // Compiler ensures base is doubleword aligned and cnt is #doublewords.
4500 // Emitter does not KILL cnt and base arguments, since they need to be copied to
4501 // work registers anyway.
4502 // Actually, only r0, r1, (which are work registers) and odd_tmp_reg are killed.
4503 //
4504 // For very large arrays, exploit MVCLE H/W support.
4505 // MVCLE instruction automatically exploits H/W-optimized page mover.
4506 // - Bytes up to next page boundary are cleared with a series of XC to self.
4507 // - All full pages are cleared with the page mover H/W assist.
4508 // - Remaining bytes are again cleared by a series of XC to self.
4509 //
4510 unsigned int MacroAssembler::Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register odd_tmp_reg) {
4511 
4512   int      block_start = offset();
4513   Register dst_len  = Z_R1;      // Holds dst len  for MVCLE.
4514   Register dst_addr = Z_R0;      // Holds dst addr for MVCLE.
4515 
4516   BLOCK_COMMENT("Clear_Array_Const_Big {");
4517 
4518   // Get len to clear.
4519   load_const_optimized(dst_len, (long)cnt*8L);  // in Bytes = #DW*8
4520 
4521   // Prepare other args to MVCLE.
4522   z_lgr(dst_addr, base_pointer_arg);
4523   // Pass 0 as source length to MVCLE: destination will be filled with padding byte 0.
4524   // The even register of the register pair is not killed.
4525   (void) clear_reg(odd_tmp_reg, true, false);  // Src len of MVCLE is zero.
4526   MacroAssembler::move_long_ext(dst_addr, as_Register(odd_tmp_reg->encoding() - 1), 0);
4527   BLOCK_COMMENT("} Clear_Array_Const_Big");
4528 
4529   int block_end = offset();
4530   return block_end - block_start;
4531 }
4532 
4533 // Allocator.
4534 unsigned int MacroAssembler::CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg,
4535                                                            Register cnt_reg,
4536                                                            Register tmp1_reg, Register tmp2_reg) {
4537   // Tmp1 is oddReg.
4538   // Tmp2 is evenReg.
4539 
4540   int block_start = offset();
4541   Label doMVC, doMVCLE, done, MVC_template;
4542 
4543   BLOCK_COMMENT("CopyRawMemory_AlignedDisjoint {");
4544 
4545   // Check for zero len and convert to long.
4546   z_ltgfr(cnt_reg, cnt_reg);      // Remember casted value for doSTG case.
4547   z_bre(done);                    // Nothing to do if len == 0.
4548 
4549   z_sllg(Z_R1, cnt_reg, 3);       // Dst len in bytes. calc early to have the result ready.
4550 
4551   z_cghi(cnt_reg, 32);            // Check for len <= 256 bytes (<=32 DW).
4552   z_brnh(doMVC);                  // If so, use executed MVC to clear.
4553 
4554   bind(doMVCLE);                  // A lot of data (more than 256 bytes).
4555   // Prep dest reg pair.
4556   z_lgr(Z_R0, dst_reg);           // dst addr
4557   // Dst len already in Z_R1.
4558   // Prep src reg pair.
4559   z_lgr(tmp2_reg, src_reg);       // src addr
4560   z_lgr(tmp1_reg, Z_R1);          // Src len same as dst len.
4561 
4562   // Do the copy.
4563   move_long_ext(Z_R0, tmp2_reg, 0xb0); // Bypass cache.
4564   z_bru(done);                         // All done.
4565 
4566   bind(MVC_template);             // Just some data (not more than 256 bytes).
4567   z_mvc(0, 0, dst_reg, 0, src_reg);
4568 
4569   bind(doMVC);
4570 
4571   if (VM_Version::has_ExecuteExtensions()) {
4572     add2reg(Z_R1, -1);
4573   } else {
4574     add2reg(tmp1_reg, -1, Z_R1);
4575     z_larl(Z_R1, MVC_template);
4576   }
4577 
4578   if (VM_Version::has_Prefetch()) {
4579     z_pfd(1,  0,Z_R0,src_reg);
4580     z_pfd(2,  0,Z_R0,dst_reg);
4581     //    z_pfd(1,256,Z_R0,src_reg);    // Assume very short copy.
4582     //    z_pfd(2,256,Z_R0,dst_reg);
4583   }
4584 
4585   if (VM_Version::has_ExecuteExtensions()) {
4586     z_exrl(Z_R1, MVC_template);
4587   } else {
4588     z_ex(tmp1_reg, 0, Z_R0, Z_R1);
4589   }
4590 
4591   bind(done);
4592 
4593   BLOCK_COMMENT("} CopyRawMemory_AlignedDisjoint");
4594 
4595   int block_end = offset();
4596   return block_end - block_start;
4597 }
4598 
4599 //------------------------------------------------------
4600 //   Special String Intrinsics. Implementation
4601 //------------------------------------------------------
4602 
4603 // Intrinsics for CompactStrings
4604 
4605 // Compress char[] to byte[].
4606 //   Restores: src, dst
4607 //   Uses:     cnt
4608 //   Kills:    tmp, Z_R0, Z_R1.
4609 //   Early clobber: result.
4610 // Note:
4611 //   cnt is signed int. Do not rely on high word!
4612 //       counts # characters, not bytes.
4613 // The result is the number of characters copied before the first incompatible character was found.
4614 // If precise is true, the processing stops exactly at this point. Otherwise, the result may be off
4615 // by a few bytes. The result always indicates the number of copied characters.
4616 // When used as a character index, the returned value points to the first incompatible character.
4617 //
4618 // Note: Does not behave exactly like package private StringUTF16 compress java implementation in case of failure:
4619 // - Different number of characters may have been written to dead array (if precise is false).
4620 // - Returns a number <cnt instead of 0. (Result gets compared with cnt.)
4621 unsigned int MacroAssembler::string_compress(Register result, Register src, Register dst, Register cnt,
4622                                              Register tmp,    bool precise) {
4623   assert_different_registers(Z_R0, Z_R1, result, src, dst, cnt, tmp);
4624 
4625   if (precise) {
4626     BLOCK_COMMENT("encode_iso_array {");
4627   } else {
4628     BLOCK_COMMENT("string_compress {");
4629   }
4630   int  block_start = offset();
4631 
4632   Register       Rsrc  = src;
4633   Register       Rdst  = dst;
4634   Register       Rix   = tmp;
4635   Register       Rcnt  = cnt;
4636   Register       Rmask = result;  // holds incompatibility check mask until result value is stored.
4637   Label          ScalarShortcut, AllDone;
4638 
4639   z_iilf(Rmask, 0xFF00FF00);
4640   z_iihf(Rmask, 0xFF00FF00);
4641 
4642 #if 0  // Sacrifice shortcuts for code compactness
4643   {
4644     //---<  shortcuts for short strings (very frequent)   >---
4645     //   Strings with 4 and 8 characters were fond to occur very frequently.
4646     //   Therefore, we handle them right away with minimal overhead.
4647     Label     skipShortcut, skip4Shortcut, skip8Shortcut;
4648     Register  Rout = Z_R0;
4649     z_chi(Rcnt, 4);
4650     z_brne(skip4Shortcut);                 // 4 characters are very frequent
4651       z_lg(Z_R0, 0, Rsrc);                 // Treat exactly 4 characters specially.
4652       if (VM_Version::has_DistinctOpnds()) {
4653         Rout = Z_R0;
4654         z_ngrk(Rix, Z_R0, Rmask);
4655       } else {
4656         Rout = Rix;
4657         z_lgr(Rix, Z_R0);
4658         z_ngr(Z_R0, Rmask);
4659       }
4660       z_brnz(skipShortcut);
4661       z_stcmh(Rout, 5, 0, Rdst);
4662       z_stcm(Rout,  5, 2, Rdst);
4663       z_lgfr(result, Rcnt);
4664       z_bru(AllDone);
4665     bind(skip4Shortcut);
4666 
4667     z_chi(Rcnt, 8);
4668     z_brne(skip8Shortcut);                 // There's more to do...
4669       z_lmg(Z_R0, Z_R1, 0, Rsrc);          // Treat exactly 8 characters specially.
4670       if (VM_Version::has_DistinctOpnds()) {
4671         Rout = Z_R0;
4672         z_ogrk(Rix, Z_R0, Z_R1);
4673         z_ngr(Rix, Rmask);
4674       } else {
4675         Rout = Rix;
4676         z_lgr(Rix, Z_R0);
4677         z_ogr(Z_R0, Z_R1);
4678         z_ngr(Z_R0, Rmask);
4679       }
4680       z_brnz(skipShortcut);
4681       z_stcmh(Rout, 5, 0, Rdst);
4682       z_stcm(Rout,  5, 2, Rdst);
4683       z_stcmh(Z_R1, 5, 4, Rdst);
4684       z_stcm(Z_R1,  5, 6, Rdst);
4685       z_lgfr(result, Rcnt);
4686       z_bru(AllDone);
4687 
4688     bind(skip8Shortcut);
4689     clear_reg(Z_R0, true, false);          // #characters already processed (none). Precond for scalar loop.
4690     z_brl(ScalarShortcut);                 // Just a few characters
4691 
4692     bind(skipShortcut);
4693   }
4694 #endif
4695   clear_reg(Z_R0);                         // make sure register is properly initialized.
4696 
4697   if (VM_Version::has_VectorFacility()) {
4698     const int  min_vcnt     = 32;          // Minimum #characters required to use vector instructions.
4699                                            // Otherwise just do nothing in vector mode.
4700                                            // Must be multiple of 2*(vector register length in chars (8 HW = 128 bits)).
4701     const int  log_min_vcnt = exact_log2(min_vcnt);
4702     Label      VectorLoop, VectorDone, VectorBreak;
4703 
4704     VectorRegister Vtmp1      = Z_V16;
4705     VectorRegister Vtmp2      = Z_V17;
4706     VectorRegister Vmask      = Z_V18;
4707     VectorRegister Vzero      = Z_V19;
4708     VectorRegister Vsrc_first = Z_V20;
4709     VectorRegister Vsrc_last  = Z_V23;
4710 
4711     assert((Vsrc_last->encoding() - Vsrc_first->encoding() + 1) == min_vcnt/8, "logic error");
4712     assert(VM_Version::has_DistinctOpnds(), "Assumption when has_VectorFacility()");
4713     z_srak(Rix, Rcnt, log_min_vcnt);       // # vector loop iterations
4714     z_brz(VectorDone);                     // not enough data for vector loop
4715 
4716     z_vzero(Vzero);                        // all zeroes
4717     z_vgmh(Vmask, 0, 7);                   // generate 0xff00 mask for all 2-byte elements
4718     z_sllg(Z_R0, Rix, log_min_vcnt);       // remember #chars that will be processed by vector loop
4719 
4720     bind(VectorLoop);
4721       z_vlm(Vsrc_first, Vsrc_last, 0, Rsrc);
4722       add2reg(Rsrc, min_vcnt*2);
4723 
4724       //---<  check for incompatible character  >---
4725       z_vo(Vtmp1, Z_V20, Z_V21);
4726       z_vo(Vtmp2, Z_V22, Z_V23);
4727       z_vo(Vtmp1, Vtmp1, Vtmp2);
4728       z_vn(Vtmp1, Vtmp1, Vmask);
4729       z_vceqhs(Vtmp1, Vtmp1, Vzero);       // high half of all chars must be zero for successful compress.
4730       z_bvnt(VectorBreak);                 // break vector loop if not all vector elements compare eq -> incompatible character found.
4731                                            // re-process data from current iteration in break handler.
4732 
4733       //---<  pack & store characters  >---
4734       z_vpkh(Vtmp1, Z_V20, Z_V21);         // pack (src1, src2) -> tmp1
4735       z_vpkh(Vtmp2, Z_V22, Z_V23);         // pack (src3, src4) -> tmp2
4736       z_vstm(Vtmp1, Vtmp2, 0, Rdst);       // store packed string
4737       add2reg(Rdst, min_vcnt);
4738 
4739       z_brct(Rix, VectorLoop);
4740 
4741     z_bru(VectorDone);
4742 
4743     bind(VectorBreak);
4744       add2reg(Rsrc, -min_vcnt*2);          // Fix Rsrc. Rsrc was already updated, but Rdst and Rix are not.
4745       z_sll(Rix, log_min_vcnt);            // # chars processed so far in VectorLoop, excl. current iteration.
4746       z_sr(Z_R0, Rix);                     // correct # chars processed in total.
4747 
4748     bind(VectorDone);
4749   }
4750 
4751   {
4752     const int  min_cnt     =  8;           // Minimum #characters required to use unrolled loop.
4753                                            // Otherwise just do nothing in unrolled loop.
4754                                            // Must be multiple of 8.
4755     const int  log_min_cnt = exact_log2(min_cnt);
4756     Label      UnrolledLoop, UnrolledDone, UnrolledBreak;
4757 
4758     if (VM_Version::has_DistinctOpnds()) {
4759       z_srk(Rix, Rcnt, Z_R0);              // remaining # chars to compress in unrolled loop
4760     } else {
4761       z_lr(Rix, Rcnt);
4762       z_sr(Rix, Z_R0);
4763     }
4764     z_sra(Rix, log_min_cnt);             // unrolled loop count
4765     z_brz(UnrolledDone);
4766 
4767     bind(UnrolledLoop);
4768       z_lmg(Z_R0, Z_R1, 0, Rsrc);
4769       if (precise) {
4770         z_ogr(Z_R1, Z_R0);                 // check all 8 chars for incompatibility
4771         z_ngr(Z_R1, Rmask);
4772         z_brnz(UnrolledBreak);
4773 
4774         z_lg(Z_R1, 8, Rsrc);               // reload destroyed register
4775         z_stcmh(Z_R0, 5, 0, Rdst);
4776         z_stcm(Z_R0,  5, 2, Rdst);
4777       } else {
4778         z_stcmh(Z_R0, 5, 0, Rdst);
4779         z_stcm(Z_R0,  5, 2, Rdst);
4780 
4781         z_ogr(Z_R0, Z_R1);
4782         z_ngr(Z_R0, Rmask);
4783         z_brnz(UnrolledBreak);
4784       }
4785       z_stcmh(Z_R1, 5, 4, Rdst);
4786       z_stcm(Z_R1,  5, 6, Rdst);
4787 
4788       add2reg(Rsrc, min_cnt*2);
4789       add2reg(Rdst, min_cnt);
4790       z_brct(Rix, UnrolledLoop);
4791 
4792     z_lgfr(Z_R0, Rcnt);                    // # chars processed in total after unrolled loop.
4793     z_nilf(Z_R0, ~(min_cnt-1));
4794     z_tmll(Rcnt, min_cnt-1);
4795     z_brnaz(ScalarShortcut);               // if all bits zero, there is nothing left to do for scalar loop.
4796                                            // Rix == 0 in all cases.
4797     z_sllg(Z_R1, Rcnt, 1);                 // # src bytes already processed. Only lower 32 bits are valid!
4798                                            //   Z_R1 contents must be treated as unsigned operand! For huge strings,
4799                                            //   (Rcnt >= 2**30), the value may spill into the sign bit by sllg.
4800     z_lgfr(result, Rcnt);                  // all characters processed.
4801     z_slgfr(Rdst, Rcnt);                   // restore ptr
4802     z_slgfr(Rsrc, Z_R1);                   // restore ptr, double the element count for Rsrc restore
4803     z_bru(AllDone);
4804 
4805     bind(UnrolledBreak);
4806     z_lgfr(Z_R0, Rcnt);                    // # chars processed in total after unrolled loop
4807     z_nilf(Z_R0, ~(min_cnt-1));
4808     z_sll(Rix, log_min_cnt);               // # chars not yet processed in UnrolledLoop (due to break), broken iteration not included.
4809     z_sr(Z_R0, Rix);                       // fix # chars processed OK so far.
4810     if (!precise) {
4811       z_lgfr(result, Z_R0);
4812       z_sllg(Z_R1, Z_R0, 1);               // # src bytes already processed. Only lower 32 bits are valid!
4813                                            //   Z_R1 contents must be treated as unsigned operand! For huge strings,
4814                                            //   (Rcnt >= 2**30), the value may spill into the sign bit by sllg.
4815       z_aghi(result, min_cnt/2);           // min_cnt/2 characters have already been written
4816                                            // but ptrs were not updated yet.
4817       z_slgfr(Rdst, Z_R0);                 // restore ptr
4818       z_slgfr(Rsrc, Z_R1);                 // restore ptr, double the element count for Rsrc restore
4819       z_bru(AllDone);
4820     }
4821     bind(UnrolledDone);
4822   }
4823 
4824   {
4825     Label     ScalarLoop, ScalarDone, ScalarBreak;
4826 
4827     bind(ScalarShortcut);
4828     z_ltgfr(result, Rcnt);
4829     z_brz(AllDone);
4830 
4831 #if 0  // Sacrifice shortcuts for code compactness
4832     {
4833       //---<  Special treatment for very short strings (one or two characters)  >---
4834       //   For these strings, we are sure that the above code was skipped.
4835       //   Thus, no registers were modified, register restore is not required.
4836       Label     ScalarDoit, Scalar2Char;
4837       z_chi(Rcnt, 2);
4838       z_brh(ScalarDoit);
4839       z_llh(Z_R1,  0, Z_R0, Rsrc);
4840       z_bre(Scalar2Char);
4841       z_tmll(Z_R1, 0xff00);
4842       z_lghi(result, 0);                   // cnt == 1, first char invalid, no chars successfully processed
4843       z_brnaz(AllDone);
4844       z_stc(Z_R1,  0, Z_R0, Rdst);
4845       z_lghi(result, 1);
4846       z_bru(AllDone);
4847 
4848       bind(Scalar2Char);
4849       z_llh(Z_R0,  2, Z_R0, Rsrc);
4850       z_tmll(Z_R1, 0xff00);
4851       z_lghi(result, 0);                   // cnt == 2, first char invalid, no chars successfully processed
4852       z_brnaz(AllDone);
4853       z_stc(Z_R1,  0, Z_R0, Rdst);
4854       z_tmll(Z_R0, 0xff00);
4855       z_lghi(result, 1);                   // cnt == 2, second char invalid, one char successfully processed
4856       z_brnaz(AllDone);
4857       z_stc(Z_R0,  1, Z_R0, Rdst);
4858       z_lghi(result, 2);
4859       z_bru(AllDone);
4860 
4861       bind(ScalarDoit);
4862     }
4863 #endif
4864 
4865     if (VM_Version::has_DistinctOpnds()) {
4866       z_srk(Rix, Rcnt, Z_R0);              // remaining # chars to compress in unrolled loop
4867     } else {
4868       z_lr(Rix, Rcnt);
4869       z_sr(Rix, Z_R0);
4870     }
4871     z_lgfr(result, Rcnt);                  // # processed characters (if all runs ok).
4872     z_brz(ScalarDone);                     // uses CC from Rix calculation
4873 
4874     bind(ScalarLoop);
4875       z_llh(Z_R1, 0, Z_R0, Rsrc);
4876       z_tmll(Z_R1, 0xff00);
4877       z_brnaz(ScalarBreak);
4878       z_stc(Z_R1, 0, Z_R0, Rdst);
4879       add2reg(Rsrc, 2);
4880       add2reg(Rdst, 1);
4881       z_brct(Rix, ScalarLoop);
4882 
4883     z_bru(ScalarDone);
4884 
4885     bind(ScalarBreak);
4886     z_sr(result, Rix);
4887 
4888     bind(ScalarDone);
4889     z_sgfr(Rdst, result);                  // restore ptr
4890     z_sgfr(Rsrc, result);                  // restore ptr, double the element count for Rsrc restore
4891     z_sgfr(Rsrc, result);
4892   }
4893   bind(AllDone);
4894 
4895   if (precise) {
4896     BLOCK_COMMENT("} encode_iso_array");
4897   } else {
4898     BLOCK_COMMENT("} string_compress");
4899   }
4900   return offset() - block_start;
4901 }
4902 
4903 // Inflate byte[] to char[].
4904 unsigned int MacroAssembler::string_inflate_trot(Register src, Register dst, Register cnt, Register tmp) {
4905   int block_start = offset();
4906 
4907   BLOCK_COMMENT("string_inflate {");
4908 
4909   Register stop_char = Z_R0;
4910   Register table     = Z_R1;
4911   Register src_addr  = tmp;
4912 
4913   assert_different_registers(Z_R0, Z_R1, tmp, src, dst, cnt);
4914   assert(dst->encoding()%2 == 0, "must be even reg");
4915   assert(cnt->encoding()%2 == 1, "must be odd reg");
4916   assert(cnt->encoding() - dst->encoding() == 1, "must be even/odd pair");
4917 
4918   StubRoutines::zarch::generate_load_trot_table_addr(this, table);  // kills Z_R0 (if ASSERT)
4919   clear_reg(stop_char);  // Stop character. Not used here, but initialized to have a defined value.
4920   lgr_if_needed(src_addr, src);
4921   z_llgfr(cnt, cnt);     // # src characters, must be a positive simm32.
4922 
4923   translate_ot(dst, src_addr, /* mask = */ 0x0001);
4924 
4925   BLOCK_COMMENT("} string_inflate");
4926 
4927   return offset() - block_start;
4928 }
4929 
4930 // Inflate byte[] to char[].
4931 //   Restores: src, dst
4932 //   Uses:     cnt
4933 //   Kills:    tmp, Z_R0, Z_R1.
4934 // Note:
4935 //   cnt is signed int. Do not rely on high word!
4936 //       counts # characters, not bytes.
4937 unsigned int MacroAssembler::string_inflate(Register src, Register dst, Register cnt, Register tmp) {
4938   assert_different_registers(Z_R0, Z_R1, src, dst, cnt, tmp);
4939 
4940   BLOCK_COMMENT("string_inflate {");
4941   int block_start = offset();
4942 
4943   Register   Rcnt = cnt;   // # characters (src: bytes, dst: char (2-byte)), remaining after current loop.
4944   Register   Rix  = tmp;   // loop index
4945   Register   Rsrc = src;   // addr(src array)
4946   Register   Rdst = dst;   // addr(dst array)
4947   Label      ScalarShortcut, AllDone;
4948 
4949 #if 0  // Sacrifice shortcuts for code compactness
4950   {
4951     //---<  shortcuts for short strings (very frequent)   >---
4952     Label   skipShortcut, skip4Shortcut;
4953     z_ltr(Rcnt, Rcnt);                     // absolutely nothing to do for strings of len == 0.
4954     z_brz(AllDone);
4955     clear_reg(Z_R0);                       // make sure registers are properly initialized.
4956     clear_reg(Z_R1);
4957     z_chi(Rcnt, 4);
4958     z_brne(skip4Shortcut);                 // 4 characters are very frequent
4959       z_icm(Z_R0, 5,    0, Rsrc);          // Treat exactly 4 characters specially.
4960       z_icm(Z_R1, 5,    2, Rsrc);
4961       z_stm(Z_R0, Z_R1, 0, Rdst);
4962       z_bru(AllDone);
4963     bind(skip4Shortcut);
4964 
4965     z_chi(Rcnt, 8);
4966     z_brh(skipShortcut);                   // There's a lot to do...
4967     z_lgfr(Z_R0, Rcnt);                    // remaining #characters (<= 8). Precond for scalar loop.
4968                                            // This does not destroy the "register cleared" state of Z_R0.
4969     z_brl(ScalarShortcut);                 // Just a few characters
4970       z_icmh(Z_R0, 5, 0, Rsrc);            // Treat exactly 8 characters specially.
4971       z_icmh(Z_R1, 5, 4, Rsrc);
4972       z_icm(Z_R0,  5, 2, Rsrc);
4973       z_icm(Z_R1,  5, 6, Rsrc);
4974       z_stmg(Z_R0, Z_R1, 0, Rdst);
4975       z_bru(AllDone);
4976     bind(skipShortcut);
4977   }
4978 #endif
4979   clear_reg(Z_R0);                         // make sure register is properly initialized.
4980 
4981   if (VM_Version::has_VectorFacility()) {
4982     const int  min_vcnt     = 32;          // Minimum #characters required to use vector instructions.
4983                                            // Otherwise just do nothing in vector mode.
4984                                            // Must be multiple of vector register length (16 bytes = 128 bits).
4985     const int  log_min_vcnt = exact_log2(min_vcnt);
4986     Label      VectorLoop, VectorDone;
4987 
4988     assert(VM_Version::has_DistinctOpnds(), "Assumption when has_VectorFacility()");
4989     z_srak(Rix, Rcnt, log_min_vcnt);       // calculate # vector loop iterations
4990     z_brz(VectorDone);                     // skip if none
4991 
4992     z_sllg(Z_R0, Rix, log_min_vcnt);       // remember #chars that will be processed by vector loop
4993 
4994     bind(VectorLoop);
4995       z_vlm(Z_V20, Z_V21, 0, Rsrc);        // get next 32 characters (single-byte)
4996       add2reg(Rsrc, min_vcnt);
4997 
4998       z_vuplhb(Z_V22, Z_V20);              // V2 <- (expand) V0(high)
4999       z_vupllb(Z_V23, Z_V20);              // V3 <- (expand) V0(low)
5000       z_vuplhb(Z_V24, Z_V21);              // V4 <- (expand) V1(high)
5001       z_vupllb(Z_V25, Z_V21);              // V5 <- (expand) V1(low)
5002       z_vstm(Z_V22, Z_V25, 0, Rdst);       // store next 32 bytes
5003       add2reg(Rdst, min_vcnt*2);
5004 
5005       z_brct(Rix, VectorLoop);
5006 
5007     bind(VectorDone);
5008   }
5009 
5010   const int  min_cnt     =  8;             // Minimum #characters required to use unrolled scalar loop.
5011                                            // Otherwise just do nothing in unrolled scalar mode.
5012                                            // Must be multiple of 8.
5013   {
5014     const int  log_min_cnt = exact_log2(min_cnt);
5015     Label      UnrolledLoop, UnrolledDone;
5016 
5017 
5018     if (VM_Version::has_DistinctOpnds()) {
5019       z_srk(Rix, Rcnt, Z_R0);              // remaining # chars to process in unrolled loop
5020     } else {
5021       z_lr(Rix, Rcnt);
5022       z_sr(Rix, Z_R0);
5023     }
5024     z_sra(Rix, log_min_cnt);               // unrolled loop count
5025     z_brz(UnrolledDone);
5026 
5027     clear_reg(Z_R0);
5028     clear_reg(Z_R1);
5029 
5030     bind(UnrolledLoop);
5031       z_icmh(Z_R0, 5, 0, Rsrc);
5032       z_icmh(Z_R1, 5, 4, Rsrc);
5033       z_icm(Z_R0,  5, 2, Rsrc);
5034       z_icm(Z_R1,  5, 6, Rsrc);
5035       add2reg(Rsrc, min_cnt);
5036 
5037       z_stmg(Z_R0, Z_R1, 0, Rdst);
5038 
5039       add2reg(Rdst, min_cnt*2);
5040       z_brct(Rix, UnrolledLoop);
5041 
5042     bind(UnrolledDone);
5043     z_lgfr(Z_R0, Rcnt);                    // # chars left over after unrolled loop.
5044     z_nilf(Z_R0, min_cnt-1);
5045     z_brnz(ScalarShortcut);                // if zero, there is nothing left to do for scalar loop.
5046                                            // Rix == 0 in all cases.
5047     z_sgfr(Z_R0, Rcnt);                    // negative # characters the ptrs have been advanced previously.
5048     z_agr(Rdst, Z_R0);                     // restore ptr, double the element count for Rdst restore.
5049     z_agr(Rdst, Z_R0);
5050     z_agr(Rsrc, Z_R0);                     // restore ptr.
5051     z_bru(AllDone);
5052   }
5053 
5054   {
5055     bind(ScalarShortcut);
5056     // Z_R0 must contain remaining # characters as 64-bit signed int here.
5057     //      register contents is preserved over scalar processing (for register fixup).
5058 
5059 #if 0  // Sacrifice shortcuts for code compactness
5060     {
5061       Label      ScalarDefault;
5062       z_chi(Rcnt, 2);
5063       z_brh(ScalarDefault);
5064       z_llc(Z_R0,  0, Z_R0, Rsrc);     // 6 bytes
5065       z_sth(Z_R0,  0, Z_R0, Rdst);     // 4 bytes
5066       z_brl(AllDone);
5067       z_llc(Z_R0,  1, Z_R0, Rsrc);     // 6 bytes
5068       z_sth(Z_R0,  2, Z_R0, Rdst);     // 4 bytes
5069       z_bru(AllDone);
5070       bind(ScalarDefault);
5071     }
5072 #endif
5073 
5074     Label   CodeTable;
5075     // Some comments on Rix calculation:
5076     //  - Rcnt is small, therefore no bits shifted out of low word (sll(g) instructions).
5077     //  - high word of both Rix and Rcnt may contain garbage
5078     //  - the final lngfr takes care of that garbage, extending the sign to high word
5079     z_sllg(Rix, Z_R0, 2);                // calculate 10*Rix = (4*Rix + Rix)*2
5080     z_ar(Rix, Z_R0);
5081     z_larl(Z_R1, CodeTable);
5082     z_sll(Rix, 1);
5083     z_lngfr(Rix, Rix);      // ix range: [0..7], after inversion & mult: [-(7*12)..(0*12)].
5084     z_bc(Assembler::bcondAlways, 0, Rix, Z_R1);
5085 
5086     z_llc(Z_R1,  6, Z_R0, Rsrc);  // 6 bytes
5087     z_sth(Z_R1, 12, Z_R0, Rdst);  // 4 bytes
5088 
5089     z_llc(Z_R1,  5, Z_R0, Rsrc);
5090     z_sth(Z_R1, 10, Z_R0, Rdst);
5091 
5092     z_llc(Z_R1,  4, Z_R0, Rsrc);
5093     z_sth(Z_R1,  8, Z_R0, Rdst);
5094 
5095     z_llc(Z_R1,  3, Z_R0, Rsrc);
5096     z_sth(Z_R1,  6, Z_R0, Rdst);
5097 
5098     z_llc(Z_R1,  2, Z_R0, Rsrc);
5099     z_sth(Z_R1,  4, Z_R0, Rdst);
5100 
5101     z_llc(Z_R1,  1, Z_R0, Rsrc);
5102     z_sth(Z_R1,  2, Z_R0, Rdst);
5103 
5104     z_llc(Z_R1,  0, Z_R0, Rsrc);
5105     z_sth(Z_R1,  0, Z_R0, Rdst);
5106     bind(CodeTable);
5107 
5108     z_chi(Rcnt, 8);                        // no fixup for small strings. Rdst, Rsrc were not modified.
5109     z_brl(AllDone);
5110 
5111     z_sgfr(Z_R0, Rcnt);                    // # characters the ptrs have been advanced previously.
5112     z_agr(Rdst, Z_R0);                     // restore ptr, double the element count for Rdst restore.
5113     z_agr(Rdst, Z_R0);
5114     z_agr(Rsrc, Z_R0);                     // restore ptr.
5115   }
5116   bind(AllDone);
5117 
5118   BLOCK_COMMENT("} string_inflate");
5119   return offset() - block_start;
5120 }
5121 
5122 // Inflate byte[] to char[], length known at compile time.
5123 //   Restores: src, dst
5124 //   Kills:    tmp, Z_R0, Z_R1.
5125 // Note:
5126 //   len is signed int. Counts # characters, not bytes.
5127 unsigned int MacroAssembler::string_inflate_const(Register src, Register dst, Register tmp, int len) {
5128   assert_different_registers(Z_R0, Z_R1, src, dst, tmp);
5129 
5130   BLOCK_COMMENT("string_inflate_const {");
5131   int block_start = offset();
5132 
5133   Register   Rix  = tmp;   // loop index
5134   Register   Rsrc = src;   // addr(src array)
5135   Register   Rdst = dst;   // addr(dst array)
5136   Label      ScalarShortcut, AllDone;
5137   int        nprocessed = 0;
5138   int        src_off    = 0;  // compensate for saved (optimized away) ptr advancement.
5139   int        dst_off    = 0;  // compensate for saved (optimized away) ptr advancement.
5140   bool       restore_inputs = false;
5141   bool       workreg_clear  = false;
5142 
5143   if ((len >= 32) && VM_Version::has_VectorFacility()) {
5144     const int  min_vcnt     = 32;          // Minimum #characters required to use vector instructions.
5145                                            // Otherwise just do nothing in vector mode.
5146                                            // Must be multiple of vector register length (16 bytes = 128 bits).
5147     const int  log_min_vcnt = exact_log2(min_vcnt);
5148     const int  iterations   = (len - nprocessed) >> log_min_vcnt;
5149     nprocessed             += iterations << log_min_vcnt;
5150     Label      VectorLoop;
5151 
5152     if (iterations == 1) {
5153       z_vlm(Z_V20, Z_V21, 0+src_off, Rsrc);  // get next 32 characters (single-byte)
5154       z_vuplhb(Z_V22, Z_V20);                // V2 <- (expand) V0(high)
5155       z_vupllb(Z_V23, Z_V20);                // V3 <- (expand) V0(low)
5156       z_vuplhb(Z_V24, Z_V21);                // V4 <- (expand) V1(high)
5157       z_vupllb(Z_V25, Z_V21);                // V5 <- (expand) V1(low)
5158       z_vstm(Z_V22, Z_V25, 0+dst_off, Rdst); // store next 32 bytes
5159 
5160       src_off += min_vcnt;
5161       dst_off += min_vcnt*2;
5162     } else {
5163       restore_inputs = true;
5164 
5165       z_lgfi(Rix, len>>log_min_vcnt);
5166       bind(VectorLoop);
5167         z_vlm(Z_V20, Z_V21, 0, Rsrc);        // get next 32 characters (single-byte)
5168         add2reg(Rsrc, min_vcnt);
5169 
5170         z_vuplhb(Z_V22, Z_V20);              // V2 <- (expand) V0(high)
5171         z_vupllb(Z_V23, Z_V20);              // V3 <- (expand) V0(low)
5172         z_vuplhb(Z_V24, Z_V21);              // V4 <- (expand) V1(high)
5173         z_vupllb(Z_V25, Z_V21);              // V5 <- (expand) V1(low)
5174         z_vstm(Z_V22, Z_V25, 0, Rdst);       // store next 32 bytes
5175         add2reg(Rdst, min_vcnt*2);
5176 
5177         z_brct(Rix, VectorLoop);
5178     }
5179   }
5180 
5181   if (((len-nprocessed) >= 16) && VM_Version::has_VectorFacility()) {
5182     const int  min_vcnt     = 16;          // Minimum #characters required to use vector instructions.
5183                                            // Otherwise just do nothing in vector mode.
5184                                            // Must be multiple of vector register length (16 bytes = 128 bits).
5185     const int  log_min_vcnt = exact_log2(min_vcnt);
5186     const int  iterations   = (len - nprocessed) >> log_min_vcnt;
5187     nprocessed             += iterations << log_min_vcnt;
5188     assert(iterations == 1, "must be!");
5189 
5190     z_vl(Z_V20, 0+src_off, Z_R0, Rsrc);    // get next 16 characters (single-byte)
5191     z_vuplhb(Z_V22, Z_V20);                // V2 <- (expand) V0(high)
5192     z_vupllb(Z_V23, Z_V20);                // V3 <- (expand) V0(low)
5193     z_vstm(Z_V22, Z_V23, 0+dst_off, Rdst); // store next 32 bytes
5194 
5195     src_off += min_vcnt;
5196     dst_off += min_vcnt*2;
5197   }
5198 
5199   if ((len-nprocessed) > 8) {
5200     const int  min_cnt     =  8;           // Minimum #characters required to use unrolled scalar loop.
5201                                            // Otherwise just do nothing in unrolled scalar mode.
5202                                            // Must be multiple of 8.
5203     const int  log_min_cnt = exact_log2(min_cnt);
5204     const int  iterations  = (len - nprocessed) >> log_min_cnt;
5205     nprocessed     += iterations << log_min_cnt;
5206 
5207     //---<  avoid loop overhead/ptr increment for small # iterations  >---
5208     if (iterations <= 2) {
5209       clear_reg(Z_R0);
5210       clear_reg(Z_R1);
5211       workreg_clear = true;
5212 
5213       z_icmh(Z_R0, 5, 0+src_off, Rsrc);
5214       z_icmh(Z_R1, 5, 4+src_off, Rsrc);
5215       z_icm(Z_R0,  5, 2+src_off, Rsrc);
5216       z_icm(Z_R1,  5, 6+src_off, Rsrc);
5217       z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst);
5218 
5219       src_off += min_cnt;
5220       dst_off += min_cnt*2;
5221     }
5222 
5223     if (iterations == 2) {
5224       z_icmh(Z_R0, 5, 0+src_off, Rsrc);
5225       z_icmh(Z_R1, 5, 4+src_off, Rsrc);
5226       z_icm(Z_R0,  5, 2+src_off, Rsrc);
5227       z_icm(Z_R1,  5, 6+src_off, Rsrc);
5228       z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst);
5229 
5230       src_off += min_cnt;
5231       dst_off += min_cnt*2;
5232     }
5233 
5234     if (iterations > 2) {
5235       Label      UnrolledLoop;
5236       restore_inputs  = true;
5237 
5238       clear_reg(Z_R0);
5239       clear_reg(Z_R1);
5240       workreg_clear = true;
5241 
5242       z_lgfi(Rix, iterations);
5243       bind(UnrolledLoop);
5244         z_icmh(Z_R0, 5, 0, Rsrc);
5245         z_icmh(Z_R1, 5, 4, Rsrc);
5246         z_icm(Z_R0,  5, 2, Rsrc);
5247         z_icm(Z_R1,  5, 6, Rsrc);
5248         add2reg(Rsrc, min_cnt);
5249 
5250         z_stmg(Z_R0, Z_R1, 0, Rdst);
5251         add2reg(Rdst, min_cnt*2);
5252 
5253         z_brct(Rix, UnrolledLoop);
5254     }
5255   }
5256 
5257   if ((len-nprocessed) > 0) {
5258     switch (len-nprocessed) {
5259       case 8:
5260         if (!workreg_clear) {
5261           clear_reg(Z_R0);
5262           clear_reg(Z_R1);
5263         }
5264         z_icmh(Z_R0, 5, 0+src_off, Rsrc);
5265         z_icmh(Z_R1, 5, 4+src_off, Rsrc);
5266         z_icm(Z_R0,  5, 2+src_off, Rsrc);
5267         z_icm(Z_R1,  5, 6+src_off, Rsrc);
5268         z_stmg(Z_R0, Z_R1, 0+dst_off, Rdst);
5269         break;
5270       case 7:
5271         if (!workreg_clear) {
5272           clear_reg(Z_R0);
5273           clear_reg(Z_R1);
5274         }
5275         clear_reg(Rix);
5276         z_icm(Z_R0,  5, 0+src_off, Rsrc);
5277         z_icm(Z_R1,  5, 2+src_off, Rsrc);
5278         z_icm(Rix,   5, 4+src_off, Rsrc);
5279         z_stm(Z_R0,  Z_R1, 0+dst_off, Rdst);
5280         z_llc(Z_R0,  6+src_off, Z_R0, Rsrc);
5281         z_st(Rix,    8+dst_off, Z_R0, Rdst);
5282         z_sth(Z_R0, 12+dst_off, Z_R0, Rdst);
5283         break;
5284       case 6:
5285         if (!workreg_clear) {
5286           clear_reg(Z_R0);
5287           clear_reg(Z_R1);
5288         }
5289         clear_reg(Rix);
5290         z_icm(Z_R0, 5, 0+src_off, Rsrc);
5291         z_icm(Z_R1, 5, 2+src_off, Rsrc);
5292         z_icm(Rix,  5, 4+src_off, Rsrc);
5293         z_stm(Z_R0, Z_R1, 0+dst_off, Rdst);
5294         z_st(Rix,   8+dst_off, Z_R0, Rdst);
5295         break;
5296       case 5:
5297         if (!workreg_clear) {
5298           clear_reg(Z_R0);
5299           clear_reg(Z_R1);
5300         }
5301         z_icm(Z_R0, 5, 0+src_off, Rsrc);
5302         z_icm(Z_R1, 5, 2+src_off, Rsrc);
5303         z_llc(Rix,  4+src_off, Z_R0, Rsrc);
5304         z_stm(Z_R0, Z_R1, 0+dst_off, Rdst);
5305         z_sth(Rix,  8+dst_off, Z_R0, Rdst);
5306         break;
5307       case 4:
5308         if (!workreg_clear) {
5309           clear_reg(Z_R0);
5310           clear_reg(Z_R1);
5311         }
5312         z_icm(Z_R0, 5, 0+src_off, Rsrc);
5313         z_icm(Z_R1, 5, 2+src_off, Rsrc);
5314         z_stm(Z_R0, Z_R1, 0+dst_off, Rdst);
5315         break;
5316       case 3:
5317         if (!workreg_clear) {
5318           clear_reg(Z_R0);
5319         }
5320         z_llc(Z_R1, 2+src_off, Z_R0, Rsrc);
5321         z_icm(Z_R0, 5, 0+src_off, Rsrc);
5322         z_sth(Z_R1, 4+dst_off, Z_R0, Rdst);
5323         z_st(Z_R0,  0+dst_off, Rdst);
5324         break;
5325       case 2:
5326         z_llc(Z_R0, 0+src_off, Z_R0, Rsrc);
5327         z_llc(Z_R1, 1+src_off, Z_R0, Rsrc);
5328         z_sth(Z_R0, 0+dst_off, Z_R0, Rdst);
5329         z_sth(Z_R1, 2+dst_off, Z_R0, Rdst);
5330         break;
5331       case 1:
5332         z_llc(Z_R0, 0+src_off, Z_R0, Rsrc);
5333         z_sth(Z_R0, 0+dst_off, Z_R0, Rdst);
5334         break;
5335       default:
5336         guarantee(false, "Impossible");
5337         break;
5338     }
5339     src_off   +=  len-nprocessed;
5340     dst_off   += (len-nprocessed)*2;
5341     nprocessed = len;
5342   }
5343 
5344   //---< restore modified input registers  >---
5345   if ((nprocessed > 0) && restore_inputs) {
5346     z_agfi(Rsrc, -(nprocessed-src_off));
5347     if (nprocessed < 1000000000) { // avoid int overflow
5348       z_agfi(Rdst, -(nprocessed*2-dst_off));
5349     } else {
5350       z_agfi(Rdst, -(nprocessed-dst_off));
5351       z_agfi(Rdst, -nprocessed);
5352     }
5353   }
5354 
5355   BLOCK_COMMENT("} string_inflate_const");
5356   return offset() - block_start;
5357 }
5358 
5359 // Kills src.
5360 unsigned int MacroAssembler::has_negatives(Register result, Register src, Register cnt,
5361                                            Register odd_reg, Register even_reg, Register tmp) {
5362   int block_start = offset();
5363   Label Lloop1, Lloop2, Lslow, Lnotfound, Ldone;
5364   const Register addr = src, mask = tmp;
5365 
5366   BLOCK_COMMENT("has_negatives {");
5367 
5368   z_llgfr(Z_R1, cnt);      // Number of bytes to read. (Must be a positive simm32.)
5369   z_llilf(mask, 0x80808080);
5370   z_lhi(result, 1);        // Assume true.
5371   // Last possible addr for fast loop.
5372   z_lay(odd_reg, -16, Z_R1, src);
5373   z_chi(cnt, 16);
5374   z_brl(Lslow);
5375 
5376   // ind1: index, even_reg: index increment, odd_reg: index limit
5377   z_iihf(mask, 0x80808080);
5378   z_lghi(even_reg, 16);
5379 
5380   bind(Lloop1); // 16 bytes per iteration.
5381   z_lg(Z_R0, Address(addr));
5382   z_lg(Z_R1, Address(addr, 8));
5383   z_ogr(Z_R0, Z_R1);
5384   z_ngr(Z_R0, mask);
5385   z_brne(Ldone);           // If found return 1.
5386   z_brxlg(addr, even_reg, Lloop1);
5387 
5388   bind(Lslow);
5389   z_aghi(odd_reg, 16-1);   // Last possible addr for slow loop.
5390   z_lghi(even_reg, 1);
5391   z_cgr(addr, odd_reg);
5392   z_brh(Lnotfound);
5393 
5394   bind(Lloop2); // 1 byte per iteration.
5395   z_cli(Address(addr), 0x80);
5396   z_brnl(Ldone);           // If found return 1.
5397   z_brxlg(addr, even_reg, Lloop2);
5398 
5399   bind(Lnotfound);
5400   z_lhi(result, 0);
5401 
5402   bind(Ldone);
5403 
5404   BLOCK_COMMENT("} has_negatives");
5405 
5406   return offset() - block_start;
5407 }
5408 
5409 #ifdef COMPILER2
5410 // kill: cnt1, cnt2, odd_reg, even_reg; early clobber: result
5411 unsigned int MacroAssembler::string_compare(Register str1, Register str2,
5412                                             Register cnt1, Register cnt2,
5413                                             Register odd_reg, Register even_reg, Register result, int ae) {
5414   int block_start = offset();
5415 
5416   assert_different_registers(str1, cnt1, cnt2, odd_reg, even_reg, result);
5417   assert_different_registers(str2, cnt1, cnt2, odd_reg, even_reg, result);
5418 
5419   // If strings are equal up to min length, return the length difference.
5420   const Register diff = result, // Pre-set result with length difference.
5421                  min  = cnt1,   // min number of bytes
5422                  tmp  = cnt2;
5423 
5424   // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a)
5425   // we interchange str1 and str2 in the UL case and negate the result.
5426   // Like this, str1 is always latin1 encoded, except for the UU case.
5427   // In addition, we need 0 (or sign which is 0) extend when using 64 bit register.
5428   const bool used_as_LU = (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL);
5429 
5430   BLOCK_COMMENT("string_compare {");
5431 
5432   if (used_as_LU) {
5433     z_srl(cnt2, 1);
5434   }
5435 
5436   // See if the lengths are different, and calculate min in cnt1.
5437   // Save diff in case we need it for a tie-breaker.
5438 
5439   // diff = cnt1 - cnt2
5440   if (VM_Version::has_DistinctOpnds()) {
5441     z_srk(diff, cnt1, cnt2);
5442   } else {
5443     z_lr(diff, cnt1);
5444     z_sr(diff, cnt2);
5445   }
5446   if (str1 != str2) {
5447     if (VM_Version::has_LoadStoreConditional()) {
5448       z_locr(min, cnt2, Assembler::bcondHigh);
5449     } else {
5450       Label Lskip;
5451       z_brl(Lskip);    // min ok if cnt1 < cnt2
5452       z_lr(min, cnt2); // min = cnt2
5453       bind(Lskip);
5454     }
5455   }
5456 
5457   if (ae == StrIntrinsicNode::UU) {
5458     z_sra(diff, 1);
5459   }
5460   if (str1 != str2) {
5461     Label Ldone;
5462     if (used_as_LU) {
5463       // Loop which searches the first difference character by character.
5464       Label Lloop;
5465       const Register ind1 = Z_R1,
5466                      ind2 = min;
5467       int stride1 = 1, stride2 = 2; // See comment above.
5468 
5469       // ind1: index, even_reg: index increment, odd_reg: index limit
5470       z_llilf(ind1, (unsigned int)(-stride1));
5471       z_lhi(even_reg, stride1);
5472       add2reg(odd_reg, -stride1, min);
5473       clear_reg(ind2); // kills min
5474 
5475       bind(Lloop);
5476       z_brxh(ind1, even_reg, Ldone);
5477       z_llc(tmp, Address(str1, ind1));
5478       z_llh(Z_R0, Address(str2, ind2));
5479       z_ahi(ind2, stride2);
5480       z_sr(tmp, Z_R0);
5481       z_bre(Lloop);
5482 
5483       z_lr(result, tmp);
5484 
5485     } else {
5486       // Use clcle in fast loop (only for same encoding).
5487       z_lgr(Z_R0, str1);
5488       z_lgr(even_reg, str2);
5489       z_llgfr(Z_R1, min);
5490       z_llgfr(odd_reg, min);
5491 
5492       if (ae == StrIntrinsicNode::LL) {
5493         compare_long_ext(Z_R0, even_reg, 0);
5494       } else {
5495         compare_long_uni(Z_R0, even_reg, 0);
5496       }
5497       z_bre(Ldone);
5498       z_lgr(Z_R1, Z_R0);
5499       if (ae == StrIntrinsicNode::LL) {
5500         z_llc(Z_R0, Address(even_reg));
5501         z_llc(result, Address(Z_R1));
5502       } else {
5503         z_llh(Z_R0, Address(even_reg));
5504         z_llh(result, Address(Z_R1));
5505       }
5506       z_sr(result, Z_R0);
5507     }
5508 
5509     // Otherwise, return the difference between the first mismatched chars.
5510     bind(Ldone);
5511   }
5512 
5513   if (ae == StrIntrinsicNode::UL) {
5514     z_lcr(result, result); // Negate result (see note above).
5515   }
5516 
5517   BLOCK_COMMENT("} string_compare");
5518 
5519   return offset() - block_start;
5520 }
5521 #endif
5522 
5523 unsigned int MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, Register limit,
5524                                           Register odd_reg, Register even_reg, Register result, bool is_byte) {
5525   int block_start = offset();
5526 
5527   BLOCK_COMMENT("array_equals {");
5528 
5529   assert_different_registers(ary1, limit, odd_reg, even_reg);
5530   assert_different_registers(ary2, limit, odd_reg, even_reg);
5531 
5532   Label Ldone, Ldone_true, Ldone_false, Lclcle, CLC_template;
5533   int base_offset = 0;
5534 
5535   if (ary1 != ary2) {
5536     if (is_array_equ) {
5537       base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR);
5538 
5539       // Return true if the same array.
5540       compareU64_and_branch(ary1, ary2, Assembler::bcondEqual, Ldone_true);
5541 
5542       // Return false if one of them is NULL.
5543       compareU64_and_branch(ary1, (intptr_t)0, Assembler::bcondEqual, Ldone_false);
5544       compareU64_and_branch(ary2, (intptr_t)0, Assembler::bcondEqual, Ldone_false);
5545 
5546       // Load the lengths of arrays.
5547       z_llgf(odd_reg, Address(ary1, arrayOopDesc::length_offset_in_bytes()));
5548 
5549       // Return false if the two arrays are not equal length.
5550       z_c(odd_reg, Address(ary2, arrayOopDesc::length_offset_in_bytes()));
5551       z_brne(Ldone_false);
5552 
5553       // string len in bytes (right operand)
5554       if (!is_byte) {
5555         z_chi(odd_reg, 128);
5556         z_sll(odd_reg, 1); // preserves flags
5557         z_brh(Lclcle);
5558       } else {
5559         compareU32_and_branch(odd_reg, (intptr_t)256, Assembler::bcondHigh, Lclcle);
5560       }
5561     } else {
5562       z_llgfr(odd_reg, limit); // Need to zero-extend prior to using the value.
5563       compareU32_and_branch(limit, (intptr_t)256, Assembler::bcondHigh, Lclcle);
5564     }
5565 
5566 
5567     // Use clc instruction for up to 256 bytes.
5568     {
5569       Register str1_reg = ary1,
5570           str2_reg = ary2;
5571       if (is_array_equ) {
5572         str1_reg = Z_R1;
5573         str2_reg = even_reg;
5574         add2reg(str1_reg, base_offset, ary1); // string addr (left operand)
5575         add2reg(str2_reg, base_offset, ary2); // string addr (right operand)
5576       }
5577       z_ahi(odd_reg, -1); // Clc uses decremented limit. Also compare result to 0.
5578       z_brl(Ldone_true);
5579       // Note: We could jump to the template if equal.
5580 
5581       assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware");
5582       z_exrl(odd_reg, CLC_template);
5583       z_bre(Ldone_true);
5584       // fall through
5585 
5586       bind(Ldone_false);
5587       clear_reg(result);
5588       z_bru(Ldone);
5589 
5590       bind(CLC_template);
5591       z_clc(0, 0, str1_reg, 0, str2_reg);
5592     }
5593 
5594     // Use clcle instruction.
5595     {
5596       bind(Lclcle);
5597       add2reg(even_reg, base_offset, ary2); // string addr (right operand)
5598       add2reg(Z_R0, base_offset, ary1);     // string addr (left operand)
5599 
5600       z_lgr(Z_R1, odd_reg); // string len in bytes (left operand)
5601       if (is_byte) {
5602         compare_long_ext(Z_R0, even_reg, 0);
5603       } else {
5604         compare_long_uni(Z_R0, even_reg, 0);
5605       }
5606       z_lghi(result, 0); // Preserve flags.
5607       z_brne(Ldone);
5608     }
5609   }
5610   // fall through
5611 
5612   bind(Ldone_true);
5613   z_lghi(result, 1); // All characters are equal.
5614   bind(Ldone);
5615 
5616   BLOCK_COMMENT("} array_equals");
5617 
5618   return offset() - block_start;
5619 }
5620 
5621 #ifdef COMPILER2
5622 // kill: haycnt, needlecnt, odd_reg, even_reg; early clobber: result
5623 unsigned int MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt,
5624                                             Register needle, Register needlecnt, int needlecntval,
5625                                             Register odd_reg, Register even_reg, int ae) {
5626   int block_start = offset();
5627 
5628   // Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite!
5629   assert(ae != StrIntrinsicNode::LU, "Invalid encoding");
5630   const int h_csize = (ae == StrIntrinsicNode::LL) ? 1 : 2;
5631   const int n_csize = (ae == StrIntrinsicNode::UU) ? 2 : 1;
5632   Label L_needle1, L_Found, L_NotFound;
5633 
5634   BLOCK_COMMENT("string_indexof {");
5635 
5636   if (needle == haystack) {
5637     z_lhi(result, 0);
5638   } else {
5639 
5640   // Load first character of needle (R0 used by search_string instructions).
5641   if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); }
5642 
5643   // Compute last haystack addr to use if no match gets found.
5644   if (needlecnt != noreg) { // variable needlecnt
5645     z_ahi(needlecnt, -1); // Remaining characters after first one.
5646     z_sr(haycnt, needlecnt); // Compute index succeeding last element to compare.
5647     if (n_csize == 2) { z_sll(needlecnt, 1); } // In bytes.
5648   } else { // constant needlecnt
5649     assert((needlecntval & 0x7fff) == needlecntval, "must be positive simm16 immediate");
5650     // Compute index succeeding last element to compare.
5651     if (needlecntval != 1) { z_ahi(haycnt, 1 - needlecntval); }
5652   }
5653 
5654   z_llgfr(haycnt, haycnt); // Clear high half.
5655   z_lgr(result, haystack); // Final result will be computed from needle start pointer.
5656   if (h_csize == 2) { z_sll(haycnt, 1); } // Scale to number of bytes.
5657   z_agr(haycnt, haystack); // Point to address succeeding last element (haystack+scale*(haycnt-needlecnt+1)).
5658 
5659   if (h_csize != n_csize) {
5660     assert(ae == StrIntrinsicNode::UL, "Invalid encoding");
5661 
5662     if (needlecnt != noreg || needlecntval != 1) {
5663       if (needlecnt != noreg) {
5664         compare32_and_branch(needlecnt, (intptr_t)0, Assembler::bcondEqual, L_needle1);
5665       }
5666 
5667       // Main Loop: UL version (now we have at least 2 characters).
5668       Label L_OuterLoop, L_InnerLoop, L_Skip;
5669       bind(L_OuterLoop); // Search for 1st 2 characters.
5670       z_lgr(Z_R1, haycnt);
5671       MacroAssembler::search_string_uni(Z_R1, result);
5672       z_brc(Assembler::bcondNotFound, L_NotFound);
5673       z_lgr(result, Z_R1);
5674 
5675       z_lghi(Z_R1, n_csize);
5676       z_lghi(even_reg, h_csize);
5677       bind(L_InnerLoop);
5678       z_llgc(odd_reg, Address(needle, Z_R1));
5679       z_ch(odd_reg, Address(result, even_reg));
5680       z_brne(L_Skip);
5681       if (needlecnt != noreg) { z_cr(Z_R1, needlecnt); } else { z_chi(Z_R1, needlecntval - 1); }
5682       z_brnl(L_Found);
5683       z_aghi(Z_R1, n_csize);
5684       z_aghi(even_reg, h_csize);
5685       z_bru(L_InnerLoop);
5686 
5687       bind(L_Skip);
5688       z_aghi(result, h_csize); // This is the new address we want to use for comparing.
5689       z_bru(L_OuterLoop);
5690     }
5691 
5692   } else {
5693     const intptr_t needle_bytes = (n_csize == 2) ? ((needlecntval - 1) << 1) : (needlecntval - 1);
5694     Label L_clcle;
5695 
5696     if (needlecnt != noreg || (needlecntval != 1 && needle_bytes <= 256)) {
5697       if (needlecnt != noreg) {
5698         compare32_and_branch(needlecnt, 256, Assembler::bcondHigh, L_clcle);
5699         z_ahi(needlecnt, -1); // remaining bytes -1 (for CLC)
5700         z_brl(L_needle1);
5701       }
5702 
5703       // Main Loop: clc version (now we have at least 2 characters).
5704       Label L_OuterLoop, CLC_template;
5705       bind(L_OuterLoop); // Search for 1st 2 characters.
5706       z_lgr(Z_R1, haycnt);
5707       if (h_csize == 1) {
5708         MacroAssembler::search_string(Z_R1, result);
5709       } else {
5710         MacroAssembler::search_string_uni(Z_R1, result);
5711       }
5712       z_brc(Assembler::bcondNotFound, L_NotFound);
5713       z_lgr(result, Z_R1);
5714 
5715       if (needlecnt != noreg) {
5716         assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware");
5717         z_exrl(needlecnt, CLC_template);
5718       } else {
5719         z_clc(h_csize, needle_bytes -1, Z_R1, n_csize, needle);
5720       }
5721       z_bre(L_Found);
5722       z_aghi(result, h_csize); // This is the new address we want to use for comparing.
5723       z_bru(L_OuterLoop);
5724 
5725       if (needlecnt != noreg) {
5726         bind(CLC_template);
5727         z_clc(h_csize, 0, Z_R1, n_csize, needle);
5728       }
5729     }
5730 
5731     if (needlecnt != noreg || needle_bytes > 256) {
5732       bind(L_clcle);
5733 
5734       // Main Loop: clcle version (now we have at least 256 bytes).
5735       Label L_OuterLoop, CLC_template;
5736       bind(L_OuterLoop); // Search for 1st 2 characters.
5737       z_lgr(Z_R1, haycnt);
5738       if (h_csize == 1) {
5739         MacroAssembler::search_string(Z_R1, result);
5740       } else {
5741         MacroAssembler::search_string_uni(Z_R1, result);
5742       }
5743       z_brc(Assembler::bcondNotFound, L_NotFound);
5744 
5745       add2reg(Z_R0, n_csize, needle);
5746       add2reg(even_reg, h_csize, Z_R1);
5747       z_lgr(result, Z_R1);
5748       if (needlecnt != noreg) {
5749         z_llgfr(Z_R1, needlecnt); // needle len in bytes (left operand)
5750         z_llgfr(odd_reg, needlecnt);
5751       } else {
5752         load_const_optimized(Z_R1, needle_bytes);
5753         if (Immediate::is_simm16(needle_bytes)) { z_lghi(odd_reg, needle_bytes); } else { z_lgr(odd_reg, Z_R1); }
5754       }
5755       if (h_csize == 1) {
5756         compare_long_ext(Z_R0, even_reg, 0);
5757       } else {
5758         compare_long_uni(Z_R0, even_reg, 0);
5759       }
5760       z_bre(L_Found);
5761 
5762       if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); } // Reload.
5763       z_aghi(result, h_csize); // This is the new address we want to use for comparing.
5764       z_bru(L_OuterLoop);
5765     }
5766   }
5767 
5768   if (needlecnt != noreg || needlecntval == 1) {
5769     bind(L_needle1);
5770 
5771     // Single needle character version.
5772     if (h_csize == 1) {
5773       MacroAssembler::search_string(haycnt, result);
5774     } else {
5775       MacroAssembler::search_string_uni(haycnt, result);
5776     }
5777     z_lgr(result, haycnt);
5778     z_brc(Assembler::bcondFound, L_Found);
5779   }
5780 
5781   bind(L_NotFound);
5782   add2reg(result, -1, haystack); // Return -1.
5783 
5784   bind(L_Found); // Return index (or -1 in fallthrough case).
5785   z_sgr(result, haystack);
5786   if (h_csize == 2) { z_srag(result, result, exact_log2(sizeof(jchar))); }
5787   }
5788   BLOCK_COMMENT("} string_indexof");
5789 
5790   return offset() - block_start;
5791 }
5792 
5793 // early clobber: result
5794 unsigned int MacroAssembler::string_indexof_char(Register result, Register haystack, Register haycnt,
5795                                                  Register needle, jchar needleChar, Register odd_reg, Register even_reg, bool is_byte) {
5796   int block_start = offset();
5797 
5798   BLOCK_COMMENT("string_indexof_char {");
5799 
5800   if (needle == haystack) {
5801     z_lhi(result, 0);
5802   } else {
5803 
5804   Label Ldone;
5805 
5806   z_llgfr(odd_reg, haycnt);  // Preset loop ctr/searchrange end.
5807   if (needle == noreg) {
5808     load_const_optimized(Z_R0, (unsigned long)needleChar);
5809   } else {
5810     if (is_byte) {
5811       z_llgcr(Z_R0, needle); // First (and only) needle char.
5812     } else {
5813       z_llghr(Z_R0, needle); // First (and only) needle char.
5814     }
5815   }
5816 
5817   if (!is_byte) {
5818     z_agr(odd_reg, odd_reg); // Calc #bytes to be processed with SRSTU.
5819   }
5820 
5821   z_lgr(even_reg, haystack); // haystack addr
5822   z_agr(odd_reg, haystack);  // First char after range end.
5823   z_lghi(result, -1);
5824 
5825   if (is_byte) {
5826     MacroAssembler::search_string(odd_reg, even_reg);
5827   } else {
5828     MacroAssembler::search_string_uni(odd_reg, even_reg);
5829   }
5830   z_brc(Assembler::bcondNotFound, Ldone);
5831   if (is_byte) {
5832     if (VM_Version::has_DistinctOpnds()) {
5833       z_sgrk(result, odd_reg, haystack);
5834     } else {
5835       z_sgr(odd_reg, haystack);
5836       z_lgr(result, odd_reg);
5837     }
5838   } else {
5839     z_slgr(odd_reg, haystack);
5840     z_srlg(result, odd_reg, exact_log2(sizeof(jchar)));
5841   }
5842 
5843   bind(Ldone);
5844   }
5845   BLOCK_COMMENT("} string_indexof_char");
5846 
5847   return offset() - block_start;
5848 }
5849 #endif
5850 
5851 //-------------------------------------------------
5852 //   Constants (scalar and oop) in constant pool
5853 //-------------------------------------------------
5854 
5855 // Add a non-relocated constant to the CP.
5856 int MacroAssembler::store_const_in_toc(AddressLiteral& val) {
5857   long    value  = val.value();
5858   address tocPos = long_constant(value);
5859 
5860   if (tocPos != NULL) {
5861     int tocOffset = (int)(tocPos - code()->consts()->start());
5862     return tocOffset;
5863   }
5864   // Address_constant returned NULL, so no constant entry has been created.
5865   // In that case, we return a "fatal" offset, just in case that subsequently
5866   // generated access code is executed.
5867   return -1;
5868 }
5869 
5870 // Returns the TOC offset where the address is stored.
5871 // Add a relocated constant to the CP.
5872 int MacroAssembler::store_oop_in_toc(AddressLiteral& oop) {
5873   // Use RelocationHolder::none for the constant pool entry.
5874   // Otherwise we will end up with a failing NativeCall::verify(x),
5875   // where x is the address of the constant pool entry.
5876   address tocPos = address_constant((address)oop.value(), RelocationHolder::none);
5877 
5878   if (tocPos != NULL) {
5879     int              tocOffset = (int)(tocPos - code()->consts()->start());
5880     RelocationHolder rsp = oop.rspec();
5881     Relocation      *rel = rsp.reloc();
5882 
5883     // Store toc_offset in relocation, used by call_far_patchable.
5884     if ((relocInfo::relocType)rel->type() == relocInfo::runtime_call_w_cp_type) {
5885       ((runtime_call_w_cp_Relocation *)(rel))->set_constant_pool_offset(tocOffset);
5886     }
5887     // Relocate at the load's pc.
5888     relocate(rsp);
5889 
5890     return tocOffset;
5891   }
5892   // Address_constant returned NULL, so no constant entry has been created
5893   // in that case, we return a "fatal" offset, just in case that subsequently
5894   // generated access code is executed.
5895   return -1;
5896 }
5897 
5898 bool MacroAssembler::load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc) {
5899   int     tocOffset = store_const_in_toc(a);
5900   if (tocOffset == -1) return false;
5901   address tocPos    = tocOffset + code()->consts()->start();
5902   assert((address)code()->consts()->start() != NULL, "Please add CP address");
5903   relocate(a.rspec());
5904   load_long_pcrelative(dst, tocPos);
5905   return true;
5906 }
5907 
5908 bool MacroAssembler::load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc) {
5909   int     tocOffset = store_oop_in_toc(a);
5910   if (tocOffset == -1) return false;
5911   address tocPos    = tocOffset + code()->consts()->start();
5912   assert((address)code()->consts()->start() != NULL, "Please add CP address");
5913 
5914   load_addr_pcrelative(dst, tocPos);
5915   return true;
5916 }
5917 
5918 // If the instruction sequence at the given pc is a load_const_from_toc
5919 // sequence, return the value currently stored at the referenced position
5920 // in the TOC.
5921 intptr_t MacroAssembler::get_const_from_toc(address pc) {
5922 
5923   assert(is_load_const_from_toc(pc), "must be load_const_from_pool");
5924 
5925   long    offset  = get_load_const_from_toc_offset(pc);
5926   address dataLoc = NULL;
5927   if (is_load_const_from_toc_pcrelative(pc)) {
5928     dataLoc = pc + offset;
5929   } else {
5930     CodeBlob* cb = CodeCache::find_blob_unsafe(pc);   // Else we get assertion if nmethod is zombie.
5931     assert(cb && cb->is_nmethod(), "sanity");
5932     nmethod* nm = (nmethod*)cb;
5933     dataLoc = nm->ctable_begin() + offset;
5934   }
5935   return *(intptr_t *)dataLoc;
5936 }
5937 
5938 // If the instruction sequence at the given pc is a load_const_from_toc
5939 // sequence, copy the passed-in new_data value into the referenced
5940 // position in the TOC.
5941 void MacroAssembler::set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb) {
5942   assert(is_load_const_from_toc(pc), "must be load_const_from_pool");
5943 
5944   long    offset = MacroAssembler::get_load_const_from_toc_offset(pc);
5945   address dataLoc = NULL;
5946   if (is_load_const_from_toc_pcrelative(pc)) {
5947     dataLoc = pc+offset;
5948   } else {
5949     nmethod* nm = CodeCache::find_nmethod(pc);
5950     assert((cb == NULL) || (nm == (nmethod*)cb), "instruction address should be in CodeBlob");
5951     dataLoc = nm->ctable_begin() + offset;
5952   }
5953   if (*(unsigned long *)dataLoc != new_data) { // Prevent cache invalidation: update only if necessary.
5954     *(unsigned long *)dataLoc = new_data;
5955   }
5956 }
5957 
5958 // Dynamic TOC. Getter must only be called if "a" is a load_const_from_toc
5959 // site. Verify by calling is_load_const_from_toc() before!!
5960 // Offset is +/- 2**32 -> use long.
5961 long MacroAssembler::get_load_const_from_toc_offset(address a) {
5962   assert(is_load_const_from_toc_pcrelative(a), "expected pc relative load");
5963   //  expected code sequence:
5964   //    z_lgrl(t, simm32);    len = 6
5965   unsigned long inst;
5966   unsigned int  len = get_instruction(a, &inst);
5967   return get_pcrel_offset(inst);
5968 }
5969 
5970 //**********************************************************************************
5971 //  inspection of generated instruction sequences for a particular pattern
5972 //**********************************************************************************
5973 
5974 bool MacroAssembler::is_load_const_from_toc_pcrelative(address a) {
5975 #ifdef ASSERT
5976   unsigned long inst;
5977   unsigned int  len = get_instruction(a+2, &inst);
5978   if ((len == 6) && is_load_pcrelative_long(a) && is_call_pcrelative_long(inst)) {
5979     const int range = 128;
5980     Assembler::dump_code_range(tty, a, range, "instr(a) == z_lgrl && instr(a+2) == z_brasl");
5981     VM_Version::z_SIGSEGV();
5982   }
5983 #endif
5984   // expected code sequence:
5985   //   z_lgrl(t, relAddr32);    len = 6
5986   //TODO: verify accessed data is in CP, if possible.
5987   return is_load_pcrelative_long(a);  // TODO: might be too general. Currently, only lgrl is used.
5988 }
5989 
5990 bool MacroAssembler::is_load_const_from_toc_call(address a) {
5991   return is_load_const_from_toc(a) && is_call_byregister(a + load_const_from_toc_size());
5992 }
5993 
5994 bool MacroAssembler::is_load_const_call(address a) {
5995   return is_load_const(a) && is_call_byregister(a + load_const_size());
5996 }
5997 
5998 //-------------------------------------------------
5999 //   Emitters for some really CICS instructions
6000 //-------------------------------------------------
6001 
6002 void MacroAssembler::move_long_ext(Register dst, Register src, unsigned int pad) {
6003   assert(dst->encoding()%2==0, "must be an even/odd register pair");
6004   assert(src->encoding()%2==0, "must be an even/odd register pair");
6005   assert(pad<256, "must be a padding BYTE");
6006 
6007   Label retry;
6008   bind(retry);
6009   Assembler::z_mvcle(dst, src, pad);
6010   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6011 }
6012 
6013 void MacroAssembler::compare_long_ext(Register left, Register right, unsigned int pad) {
6014   assert(left->encoding() % 2 == 0, "must be an even/odd register pair");
6015   assert(right->encoding() % 2 == 0, "must be an even/odd register pair");
6016   assert(pad<256, "must be a padding BYTE");
6017 
6018   Label retry;
6019   bind(retry);
6020   Assembler::z_clcle(left, right, pad, Z_R0);
6021   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6022 }
6023 
6024 void MacroAssembler::compare_long_uni(Register left, Register right, unsigned int pad) {
6025   assert(left->encoding() % 2 == 0, "must be an even/odd register pair");
6026   assert(right->encoding() % 2 == 0, "must be an even/odd register pair");
6027   assert(pad<=0xfff, "must be a padding HALFWORD");
6028   assert(VM_Version::has_ETF2(), "instruction must be available");
6029 
6030   Label retry;
6031   bind(retry);
6032   Assembler::z_clclu(left, right, pad, Z_R0);
6033   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6034 }
6035 
6036 void MacroAssembler::search_string(Register end, Register start) {
6037   assert(end->encoding() != 0, "end address must not be in R0");
6038   assert(start->encoding() != 0, "start address must not be in R0");
6039 
6040   Label retry;
6041   bind(retry);
6042   Assembler::z_srst(end, start);
6043   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6044 }
6045 
6046 void MacroAssembler::search_string_uni(Register end, Register start) {
6047   assert(end->encoding() != 0, "end address must not be in R0");
6048   assert(start->encoding() != 0, "start address must not be in R0");
6049   assert(VM_Version::has_ETF3(), "instruction must be available");
6050 
6051   Label retry;
6052   bind(retry);
6053   Assembler::z_srstu(end, start);
6054   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6055 }
6056 
6057 void MacroAssembler::kmac(Register srcBuff) {
6058   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
6059   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
6060 
6061   Label retry;
6062   bind(retry);
6063   Assembler::z_kmac(Z_R0, srcBuff);
6064   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6065 }
6066 
6067 void MacroAssembler::kimd(Register srcBuff) {
6068   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
6069   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
6070 
6071   Label retry;
6072   bind(retry);
6073   Assembler::z_kimd(Z_R0, srcBuff);
6074   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6075 }
6076 
6077 void MacroAssembler::klmd(Register srcBuff) {
6078   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
6079   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
6080 
6081   Label retry;
6082   bind(retry);
6083   Assembler::z_klmd(Z_R0, srcBuff);
6084   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6085 }
6086 
6087 void MacroAssembler::km(Register dstBuff, Register srcBuff) {
6088   // DstBuff and srcBuff are allowed to be the same register (encryption in-place).
6089   // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.
6090   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
6091   assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");
6092   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
6093 
6094   Label retry;
6095   bind(retry);
6096   Assembler::z_km(dstBuff, srcBuff);
6097   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6098 }
6099 
6100 void MacroAssembler::kmc(Register dstBuff, Register srcBuff) {
6101   // DstBuff and srcBuff are allowed to be the same register (encryption in-place).
6102   // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.
6103   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
6104   assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");
6105   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
6106 
6107   Label retry;
6108   bind(retry);
6109   Assembler::z_kmc(dstBuff, srcBuff);
6110   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6111 }
6112 
6113 void MacroAssembler::cksm(Register crcBuff, Register srcBuff) {
6114   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
6115 
6116   Label retry;
6117   bind(retry);
6118   Assembler::z_cksm(crcBuff, srcBuff);
6119   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6120 }
6121 
6122 void MacroAssembler::translate_oo(Register r1, Register r2, uint m3) {
6123   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
6124   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
6125 
6126   Label retry;
6127   bind(retry);
6128   Assembler::z_troo(r1, r2, m3);
6129   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6130 }
6131 
6132 void MacroAssembler::translate_ot(Register r1, Register r2, uint m3) {
6133   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
6134   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
6135 
6136   Label retry;
6137   bind(retry);
6138   Assembler::z_trot(r1, r2, m3);
6139   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6140 }
6141 
6142 void MacroAssembler::translate_to(Register r1, Register r2, uint m3) {
6143   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
6144   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
6145 
6146   Label retry;
6147   bind(retry);
6148   Assembler::z_trto(r1, r2, m3);
6149   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6150 }
6151 
6152 void MacroAssembler::translate_tt(Register r1, Register r2, uint m3) {
6153   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
6154   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
6155 
6156   Label retry;
6157   bind(retry);
6158   Assembler::z_trtt(r1, r2, m3);
6159   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
6160 }
6161 
6162 //---------------------------------------
6163 // Helpers for Intrinsic Emitters
6164 //---------------------------------------
6165 
6166 /**
6167  * uint32_t crc;
6168  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
6169  */
6170 void MacroAssembler::fold_byte_crc32(Register crc, Register val, Register table, Register tmp) {
6171   assert_different_registers(crc, table, tmp);
6172   assert_different_registers(val, table);
6173   if (crc == val) {      // Must rotate first to use the unmodified value.
6174     rotate_then_insert(tmp, val, 56-2, 63-2, 2, true);  // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
6175     z_srl(crc, 8);       // Unsigned shift, clear leftmost 8 bits.
6176   } else {
6177     z_srl(crc, 8);       // Unsigned shift, clear leftmost 8 bits.
6178     rotate_then_insert(tmp, val, 56-2, 63-2, 2, true);  // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
6179   }
6180   z_x(crc, Address(table, tmp, 0));
6181 }
6182 
6183 /**
6184  * uint32_t crc;
6185  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
6186  */
6187 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
6188   fold_byte_crc32(crc, crc, table, tmp);
6189 }
6190 
6191 /**
6192  * Emits code to update CRC-32 with a byte value according to constants in table.
6193  *
6194  * @param [in,out]crc Register containing the crc.
6195  * @param [in]val     Register containing the byte to fold into the CRC.
6196  * @param [in]table   Register containing the table of crc constants.
6197  *
6198  * uint32_t crc;
6199  * val = crc_table[(val ^ crc) & 0xFF];
6200  * crc = val ^ (crc >> 8);
6201  */
6202 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
6203   z_xr(val, crc);
6204   fold_byte_crc32(crc, val, table, val);
6205 }
6206 
6207 
6208 /**
6209  * @param crc   register containing existing CRC (32-bit)
6210  * @param buf   register pointing to input byte buffer (byte*)
6211  * @param len   register containing number of bytes
6212  * @param table register pointing to CRC table
6213  */
6214 void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, Register data) {
6215   assert_different_registers(crc, buf, len, table, data);
6216 
6217   Label L_mainLoop, L_done;
6218   const int mainLoop_stepping = 1;
6219 
6220   // Process all bytes in a single-byte loop.
6221   z_ltr(len, len);
6222   z_brnh(L_done);
6223 
6224   bind(L_mainLoop);
6225     z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
6226     add2reg(buf, mainLoop_stepping);        // Advance buffer position.
6227     update_byte_crc32(crc, data, table);
6228     z_brct(len, L_mainLoop);                // Iterate.
6229 
6230   bind(L_done);
6231 }
6232 
6233 /**
6234  * Emits code to update CRC-32 with a 4-byte value according to constants in table.
6235  * Implementation according to jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.c.
6236  *
6237  */
6238 void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
6239                                         Register t0,  Register t1,  Register t2,    Register t3) {
6240   // This is what we implement (the DOBIG4 part):
6241   //
6242   // #define DOBIG4 c ^= *++buf4; \
6243   //         c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \
6244   //             crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]
6245   // #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4
6246   // Pre-calculate (constant) column offsets, use columns 4..7 for big-endian.
6247   const int ix0 = 4*(4*CRC32_COLUMN_SIZE);
6248   const int ix1 = 5*(4*CRC32_COLUMN_SIZE);
6249   const int ix2 = 6*(4*CRC32_COLUMN_SIZE);
6250   const int ix3 = 7*(4*CRC32_COLUMN_SIZE);
6251 
6252   // XOR crc with next four bytes of buffer.
6253   lgr_if_needed(t0, crc);
6254   z_x(t0, Address(buf, bufDisp));
6255   if (bufInc != 0) {
6256     add2reg(buf, bufInc);
6257   }
6258 
6259   // Chop crc into 4 single-byte pieces, shifted left 2 bits, to form the table indices.
6260   rotate_then_insert(t3, t0, 56-2, 63-2, 2,    true);  // ((c >>  0) & 0xff) << 2
6261   rotate_then_insert(t2, t0, 56-2, 63-2, 2-8,  true);  // ((c >>  8) & 0xff) << 2
6262   rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true);  // ((c >> 16) & 0xff) << 2
6263   rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true);  // ((c >> 24) & 0xff) << 2
6264 
6265   // XOR indexed table values to calculate updated crc.
6266   z_ly(t2, Address(table, t2, (intptr_t)ix1));
6267   z_ly(t0, Address(table, t0, (intptr_t)ix3));
6268   z_xy(t2, Address(table, t3, (intptr_t)ix0));
6269   z_xy(t0, Address(table, t1, (intptr_t)ix2));
6270   z_xr(t0, t2);           // Now t0 contains the updated CRC value.
6271   lgr_if_needed(crc, t0);
6272 }
6273 
6274 /**
6275  * @param crc   register containing existing CRC (32-bit)
6276  * @param buf   register pointing to input byte buffer (byte*)
6277  * @param len   register containing number of bytes
6278  * @param table register pointing to CRC table
6279  *
6280  * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
6281  */
6282 void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
6283                                         Register t0,  Register t1,  Register t2,  Register t3,
6284                                         bool invertCRC) {
6285   assert_different_registers(crc, buf, len, table);
6286 
6287   Label L_mainLoop, L_tail;
6288   Register  data = t0;
6289   Register  ctr  = Z_R0;
6290   const int mainLoop_stepping = 4;
6291   const int log_stepping      = exact_log2(mainLoop_stepping);
6292 
6293   // Don't test for len <= 0 here. This pathological case should not occur anyway.
6294   // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
6295   // The situation itself is detected and handled correctly by the conditional branches
6296   // following aghi(len, -stepping) and aghi(len, +stepping).
6297 
6298   if (invertCRC) {
6299     not_(crc, noreg, false);           // 1s complement of crc
6300   }
6301 
6302   // Check for short (<4 bytes) buffer.
6303   z_srag(ctr, len, log_stepping);
6304   z_brnh(L_tail);
6305 
6306   z_lrvr(crc, crc);          // Revert byte order because we are dealing with big-endian data.
6307   rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop
6308 
6309   BIND(L_mainLoop);
6310     update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3);
6311     z_brct(ctr, L_mainLoop); // Iterate.
6312 
6313   z_lrvr(crc, crc);          // Revert byte order back to original.
6314 
6315   // Process last few (<8) bytes of buffer.
6316   BIND(L_tail);
6317   update_byteLoop_crc32(crc, buf, len, table, data);
6318 
6319   if (invertCRC) {
6320     not_(crc, noreg, false);           // 1s complement of crc
6321   }
6322 }
6323 
6324 /**
6325  * @param crc   register containing existing CRC (32-bit)
6326  * @param buf   register pointing to input byte buffer (byte*)
6327  * @param len   register containing number of bytes
6328  * @param table register pointing to CRC table
6329  */
6330 void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
6331                                         Register t0,  Register t1,  Register t2,  Register t3,
6332                                         bool invertCRC) {
6333   assert_different_registers(crc, buf, len, table);
6334   Register data = t0;
6335 
6336   if (invertCRC) {
6337     not_(crc, noreg, false);           // 1s complement of crc
6338   }
6339 
6340   update_byteLoop_crc32(crc, buf, len, table, data);
6341 
6342   if (invertCRC) {
6343     not_(crc, noreg, false);           // 1s complement of crc
6344   }
6345 }
6346 
6347 void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
6348                                              bool invertCRC) {
6349   assert_different_registers(crc, buf, len, table, tmp);
6350 
6351   if (invertCRC) {
6352     not_(crc, noreg, false);           // 1s complement of crc
6353   }
6354 
6355   z_llgc(tmp, Address(buf, (intptr_t)0));  // Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
6356   update_byte_crc32(crc, tmp, table);
6357 
6358   if (invertCRC) {
6359     not_(crc, noreg, false);           // 1s complement of crc
6360   }
6361 }
6362 
6363 void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table,
6364                                                 bool invertCRC) {
6365   assert_different_registers(crc, val, table);
6366 
6367   if (invertCRC) {
6368     not_(crc, noreg, false);           // 1s complement of crc
6369   }
6370 
6371   update_byte_crc32(crc, val, table);
6372 
6373   if (invertCRC) {
6374     not_(crc, noreg, false);           // 1s complement of crc
6375   }
6376 }
6377 
6378 //
6379 // Code for BigInteger::multiplyToLen() intrinsic.
6380 //
6381 
6382 // dest_lo += src1 + src2
6383 // dest_hi += carry1 + carry2
6384 // Z_R7 is destroyed !
6385 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo,
6386                                      Register src1, Register src2) {
6387   clear_reg(Z_R7);
6388   z_algr(dest_lo, src1);
6389   z_alcgr(dest_hi, Z_R7);
6390   z_algr(dest_lo, src2);
6391   z_alcgr(dest_hi, Z_R7);
6392 }
6393 
6394 // Multiply 64 bit by 64 bit first loop.
6395 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart,
6396                                            Register x_xstart,
6397                                            Register y, Register y_idx,
6398                                            Register z,
6399                                            Register carry,
6400                                            Register product,
6401                                            Register idx, Register kdx) {
6402   // jlong carry, x[], y[], z[];
6403   // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) {
6404   //   huge_128 product = y[idx] * x[xstart] + carry;
6405   //   z[kdx] = (jlong)product;
6406   //   carry  = (jlong)(product >>> 64);
6407   // }
6408   // z[xstart] = carry;
6409 
6410   Label L_first_loop, L_first_loop_exit;
6411   Label L_one_x, L_one_y, L_multiply;
6412 
6413   z_aghi(xstart, -1);
6414   z_brl(L_one_x);   // Special case: length of x is 1.
6415 
6416   // Load next two integers of x.
6417   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
6418   mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0));
6419 
6420 
6421   bind(L_first_loop);
6422 
6423   z_aghi(idx, -1);
6424   z_brl(L_first_loop_exit);
6425   z_aghi(idx, -1);
6426   z_brl(L_one_y);
6427 
6428   // Load next two integers of y.
6429   z_sllg(Z_R1_scratch, idx, LogBytesPerInt);
6430   mem2reg_opt(y_idx, Address(y, Z_R1_scratch, 0));
6431 
6432 
6433   bind(L_multiply);
6434 
6435   Register multiplicand = product->successor();
6436   Register product_low = multiplicand;
6437 
6438   lgr_if_needed(multiplicand, x_xstart);
6439   z_mlgr(product, y_idx);     // multiplicand * y_idx -> product::multiplicand
6440   clear_reg(Z_R7);
6441   z_algr(product_low, carry); // Add carry to result.
6442   z_alcgr(product, Z_R7);     // Add carry of the last addition.
6443   add2reg(kdx, -2);
6444 
6445   // Store result.
6446   z_sllg(Z_R7, kdx, LogBytesPerInt);
6447   reg2mem_opt(product_low, Address(z, Z_R7, 0));
6448   lgr_if_needed(carry, product);
6449   z_bru(L_first_loop);
6450 
6451 
6452   bind(L_one_y); // Load one 32 bit portion of y as (0,value).
6453 
6454   clear_reg(y_idx);
6455   mem2reg_opt(y_idx, Address(y, (intptr_t) 0), false);
6456   z_bru(L_multiply);
6457 
6458 
6459   bind(L_one_x); // Load one 32 bit portion of x as (0,value).
6460 
6461   clear_reg(x_xstart);
6462   mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false);
6463   z_bru(L_first_loop);
6464 
6465   bind(L_first_loop_exit);
6466 }
6467 
6468 // Multiply 64 bit by 64 bit and add 128 bit.
6469 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y,
6470                                             Register z,
6471                                             Register yz_idx, Register idx,
6472                                             Register carry, Register product,
6473                                             int offset) {
6474   // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
6475   // z[kdx] = (jlong)product;
6476 
6477   Register multiplicand = product->successor();
6478   Register product_low = multiplicand;
6479 
6480   z_sllg(Z_R7, idx, LogBytesPerInt);
6481   mem2reg_opt(yz_idx, Address(y, Z_R7, offset));
6482 
6483   lgr_if_needed(multiplicand, x_xstart);
6484   z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand
6485   mem2reg_opt(yz_idx, Address(z, Z_R7, offset));
6486 
6487   add2_with_carry(product, product_low, carry, yz_idx);
6488 
6489   z_sllg(Z_R7, idx, LogBytesPerInt);
6490   reg2mem_opt(product_low, Address(z, Z_R7, offset));
6491 
6492 }
6493 
6494 // Multiply 128 bit by 128 bit. Unrolled inner loop.
6495 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart,
6496                                              Register y, Register z,
6497                                              Register yz_idx, Register idx,
6498                                              Register jdx,
6499                                              Register carry, Register product,
6500                                              Register carry2) {
6501   // jlong carry, x[], y[], z[];
6502   // int kdx = ystart+1;
6503   // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
6504   //   huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
6505   //   z[kdx+idx+1] = (jlong)product;
6506   //   jlong carry2 = (jlong)(product >>> 64);
6507   //   product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
6508   //   z[kdx+idx] = (jlong)product;
6509   //   carry = (jlong)(product >>> 64);
6510   // }
6511   // idx += 2;
6512   // if (idx > 0) {
6513   //   product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
6514   //   z[kdx+idx] = (jlong)product;
6515   //   carry = (jlong)(product >>> 64);
6516   // }
6517 
6518   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
6519 
6520   // scale the index
6521   lgr_if_needed(jdx, idx);
6522   and_imm(jdx, 0xfffffffffffffffcL);
6523   rshift(jdx, 2);
6524 
6525 
6526   bind(L_third_loop);
6527 
6528   z_aghi(jdx, -1);
6529   z_brl(L_third_loop_exit);
6530   add2reg(idx, -4);
6531 
6532   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
6533   lgr_if_needed(carry2, product);
6534 
6535   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
6536   lgr_if_needed(carry, product);
6537   z_bru(L_third_loop);
6538 
6539 
6540   bind(L_third_loop_exit);  // Handle any left-over operand parts.
6541 
6542   and_imm(idx, 0x3);
6543   z_brz(L_post_third_loop_done);
6544 
6545   Label L_check_1;
6546 
6547   z_aghi(idx, -2);
6548   z_brl(L_check_1);
6549 
6550   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
6551   lgr_if_needed(carry, product);
6552 
6553 
6554   bind(L_check_1);
6555 
6556   add2reg(idx, 0x2);
6557   and_imm(idx, 0x1);
6558   z_aghi(idx, -1);
6559   z_brl(L_post_third_loop_done);
6560 
6561   Register   multiplicand = product->successor();
6562   Register   product_low = multiplicand;
6563 
6564   z_sllg(Z_R7, idx, LogBytesPerInt);
6565   clear_reg(yz_idx);
6566   mem2reg_opt(yz_idx, Address(y, Z_R7, 0), false);
6567   lgr_if_needed(multiplicand, x_xstart);
6568   z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand
6569   clear_reg(yz_idx);
6570   mem2reg_opt(yz_idx, Address(z, Z_R7, 0), false);
6571 
6572   add2_with_carry(product, product_low, yz_idx, carry);
6573 
6574   z_sllg(Z_R7, idx, LogBytesPerInt);
6575   reg2mem_opt(product_low, Address(z, Z_R7, 0), false);
6576   rshift(product_low, 32);
6577 
6578   lshift(product, 32);
6579   z_ogr(product_low, product);
6580   lgr_if_needed(carry, product_low);
6581 
6582   bind(L_post_third_loop_done);
6583 }
6584 
6585 void MacroAssembler::multiply_to_len(Register x, Register xlen,
6586                                      Register y, Register ylen,
6587                                      Register z,
6588                                      Register tmp1, Register tmp2,
6589                                      Register tmp3, Register tmp4,
6590                                      Register tmp5) {
6591   ShortBranchVerifier sbv(this);
6592 
6593   assert_different_registers(x, xlen, y, ylen, z,
6594                              tmp1, tmp2, tmp3, tmp4, tmp5, Z_R1_scratch, Z_R7);
6595   assert_different_registers(x, xlen, y, ylen, z,
6596                              tmp1, tmp2, tmp3, tmp4, tmp5, Z_R8);
6597 
6598   z_stmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);
6599 
6600   // In openJdk, we store the argument as 32-bit value to slot.
6601   Address zlen(Z_SP, _z_abi(remaining_cargs));  // Int in long on big endian.
6602 
6603   const Register idx = tmp1;
6604   const Register kdx = tmp2;
6605   const Register xstart = tmp3;
6606 
6607   const Register y_idx = tmp4;
6608   const Register carry = tmp5;
6609   const Register product  = Z_R0_scratch;
6610   const Register x_xstart = Z_R8;
6611 
6612   // First Loop.
6613   //
6614   //   final static long LONG_MASK = 0xffffffffL;
6615   //   int xstart = xlen - 1;
6616   //   int ystart = ylen - 1;
6617   //   long carry = 0;
6618   //   for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
6619   //     long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
6620   //     z[kdx] = (int)product;
6621   //     carry = product >>> 32;
6622   //   }
6623   //   z[xstart] = (int)carry;
6624   //
6625 
6626   lgr_if_needed(idx, ylen);  // idx = ylen
6627   z_llgf(kdx, zlen);         // C2 does not respect int to long conversion for stub calls, thus load zero-extended.
6628   clear_reg(carry);          // carry = 0
6629 
6630   Label L_done;
6631 
6632   lgr_if_needed(xstart, xlen);
6633   z_aghi(xstart, -1);
6634   z_brl(L_done);
6635 
6636   multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
6637 
6638   NearLabel L_second_loop;
6639   compare64_and_branch(kdx, RegisterOrConstant((intptr_t) 0), bcondEqual, L_second_loop);
6640 
6641   NearLabel L_carry;
6642   z_aghi(kdx, -1);
6643   z_brz(L_carry);
6644 
6645   // Store lower 32 bits of carry.
6646   z_sllg(Z_R1_scratch, kdx, LogBytesPerInt);
6647   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
6648   rshift(carry, 32);
6649   z_aghi(kdx, -1);
6650 
6651 
6652   bind(L_carry);
6653 
6654   // Store upper 32 bits of carry.
6655   z_sllg(Z_R1_scratch, kdx, LogBytesPerInt);
6656   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
6657 
6658   // Second and third (nested) loops.
6659   //
6660   // for (int i = xstart-1; i >= 0; i--) { // Second loop
6661   //   carry = 0;
6662   //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
6663   //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
6664   //                    (z[k] & LONG_MASK) + carry;
6665   //     z[k] = (int)product;
6666   //     carry = product >>> 32;
6667   //   }
6668   //   z[i] = (int)carry;
6669   // }
6670   //
6671   // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
6672 
6673   const Register jdx = tmp1;
6674 
6675   bind(L_second_loop);
6676 
6677   clear_reg(carry);           // carry = 0;
6678   lgr_if_needed(jdx, ylen);   // j = ystart+1
6679 
6680   z_aghi(xstart, -1);         // i = xstart-1;
6681   z_brl(L_done);
6682 
6683   // Use free slots in the current stackframe instead of push/pop.
6684   Address zsave(Z_SP, _z_abi(carg_1));
6685   reg2mem_opt(z, zsave);
6686 
6687 
6688   Label L_last_x;
6689 
6690   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
6691   load_address(z, Address(z, Z_R1_scratch, 4)); // z = z + k - j
6692   z_aghi(xstart, -1);                           // i = xstart-1;
6693   z_brl(L_last_x);
6694 
6695   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
6696   mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0));
6697 
6698 
6699   Label L_third_loop_prologue;
6700 
6701   bind(L_third_loop_prologue);
6702 
6703   Address xsave(Z_SP, _z_abi(carg_2));
6704   Address xlensave(Z_SP, _z_abi(carg_3));
6705   Address ylensave(Z_SP, _z_abi(carg_4));
6706 
6707   reg2mem_opt(x, xsave);
6708   reg2mem_opt(xstart, xlensave);
6709   reg2mem_opt(ylen, ylensave);
6710 
6711 
6712   multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
6713 
6714   mem2reg_opt(z, zsave);
6715   mem2reg_opt(x, xsave);
6716   mem2reg_opt(xlen, xlensave);   // This is the decrement of the loop counter!
6717   mem2reg_opt(ylen, ylensave);
6718 
6719   add2reg(tmp3, 1, xlen);
6720   z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt);
6721   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
6722   z_aghi(tmp3, -1);
6723   z_brl(L_done);
6724 
6725   rshift(carry, 32);
6726   z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt);
6727   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
6728   z_bru(L_second_loop);
6729 
6730   // Next infrequent code is moved outside loops.
6731   bind(L_last_x);
6732 
6733   clear_reg(x_xstart);
6734   mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false);
6735   z_bru(L_third_loop_prologue);
6736 
6737   bind(L_done);
6738 
6739   z_lmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);
6740 }
6741 
6742 #ifndef PRODUCT
6743 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false).
6744 void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) {
6745   Label ok;
6746   if (check_equal) {
6747     z_bre(ok);
6748   } else {
6749     z_brne(ok);
6750   }
6751   stop(msg, id);
6752   bind(ok);
6753 }
6754 
6755 // Assert if CC indicates "low".
6756 void MacroAssembler::asm_assert_low(const char *msg, int id) {
6757   Label ok;
6758   z_brnl(ok);
6759   stop(msg, id);
6760   bind(ok);
6761 }
6762 
6763 // Assert if CC indicates "high".
6764 void MacroAssembler::asm_assert_high(const char *msg, int id) {
6765   Label ok;
6766   z_brnh(ok);
6767   stop(msg, id);
6768   bind(ok);
6769 }
6770 
6771 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false)
6772 // generate non-relocatable code.
6773 void MacroAssembler::asm_assert_static(bool check_equal, const char *msg, int id) {
6774   Label ok;
6775   if (check_equal) { z_bre(ok); }
6776   else             { z_brne(ok); }
6777   stop_static(msg, id);
6778   bind(ok);
6779 }
6780 
6781 void MacroAssembler::asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset,
6782                                           Register mem_base, const char* msg, int id) {
6783   switch (size) {
6784     case 4:
6785       load_and_test_int(Z_R0, Address(mem_base, mem_offset));
6786       break;
6787     case 8:
6788       load_and_test_long(Z_R0,  Address(mem_base, mem_offset));
6789       break;
6790     default:
6791       ShouldNotReachHere();
6792   }
6793   if (allow_relocation) { asm_assert(check_equal, msg, id); }
6794   else                  { asm_assert_static(check_equal, msg, id); }
6795 }
6796 
6797 // Check the condition
6798 //   expected_size == FP - SP
6799 // after transformation:
6800 //   expected_size - FP + SP == 0
6801 // Destroys Register expected_size if no tmp register is passed.
6802 void MacroAssembler::asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) {
6803   if (tmp == noreg) {
6804     tmp = expected_size;
6805   } else {
6806     if (tmp != expected_size) {
6807       z_lgr(tmp, expected_size);
6808     }
6809     z_algr(tmp, Z_SP);
6810     z_slg(tmp, 0, Z_R0, Z_SP);
6811     asm_assert_eq(msg, id);
6812   }
6813 }
6814 #endif // !PRODUCT
6815 
6816 void MacroAssembler::verify_thread() {
6817   if (VerifyThread) {
6818     unimplemented("", 117);
6819   }
6820 }
6821 
6822 // Plausibility check for oops.
6823 void MacroAssembler::verify_oop(Register oop, const char* msg) {
6824   if (!VerifyOops) return;
6825 
6826   BLOCK_COMMENT("verify_oop {");
6827   Register tmp = Z_R0;
6828   unsigned int nbytes_save = 5*BytesPerWord;
6829   address entry = StubRoutines::verify_oop_subroutine_entry_address();
6830 
6831   save_return_pc();
6832   push_frame_abi160(nbytes_save);
6833   z_stmg(Z_R1, Z_R5, frame::z_abi_160_size, Z_SP);
6834 
6835   z_lgr(Z_ARG2, oop);
6836   load_const(Z_ARG1, (address) msg);
6837   load_const(Z_R1, entry);
6838   z_lg(Z_R1, 0, Z_R1);
6839   call_c(Z_R1);
6840 
6841   z_lmg(Z_R1, Z_R5, frame::z_abi_160_size, Z_SP);
6842   pop_frame();
6843   restore_return_pc();
6844 
6845   BLOCK_COMMENT("} verify_oop ");
6846 }
6847 
6848 const char* MacroAssembler::stop_types[] = {
6849   "stop",
6850   "untested",
6851   "unimplemented",
6852   "shouldnotreachhere"
6853 };
6854 
6855 static void stop_on_request(const char* tp, const char* msg) {
6856   tty->print("Z assembly code requires stop: (%s) %s\n", tp, msg);
6857   guarantee(false, "Z assembly code requires stop: %s", msg);
6858 }
6859 
6860 void MacroAssembler::stop(int type, const char* msg, int id) {
6861   BLOCK_COMMENT(err_msg("stop: %s {", msg));
6862 
6863   // Setup arguments.
6864   load_const(Z_ARG1, (void*) stop_types[type%stop_end]);
6865   load_const(Z_ARG2, (void*) msg);
6866   get_PC(Z_R14);     // Following code pushes a frame without entering a new function. Use current pc as return address.
6867   save_return_pc();  // Saves return pc Z_R14.
6868   push_frame_abi160(0);
6869   call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
6870   // The plain disassembler does not recognize illtrap. It instead displays
6871   // a 32-bit value. Issueing two illtraps assures the disassembler finds
6872   // the proper beginning of the next instruction.
6873   z_illtrap(); // Illegal instruction.
6874   z_illtrap(); // Illegal instruction.
6875 
6876   BLOCK_COMMENT(" } stop");
6877 }
6878 
6879 // Special version of stop() for code size reduction.
6880 // Reuses the previously generated call sequence, if any.
6881 // Generates the call sequence on its own, if necessary.
6882 // Note: This code will work only in non-relocatable code!
6883 //       The relative address of the data elements (arg1, arg2) must not change.
6884 //       The reentry point must not move relative to it's users. This prerequisite
6885 //       should be given for "hand-written" code, if all chain calls are in the same code blob.
6886 //       Generated code must not undergo any transformation, e.g. ShortenBranches, to be safe.
6887 address MacroAssembler::stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation) {
6888   BLOCK_COMMENT(err_msg("stop_chain(%s,%s): %s {", reentry==NULL?"init":"cont", allow_relocation?"reloc ":"static", msg));
6889 
6890   // Setup arguments.
6891   if (allow_relocation) {
6892     // Relocatable version (for comparison purposes). Remove after some time.
6893     load_const(Z_ARG1, (void*) stop_types[type%stop_end]);
6894     load_const(Z_ARG2, (void*) msg);
6895   } else {
6896     load_absolute_address(Z_ARG1, (address)stop_types[type%stop_end]);
6897     load_absolute_address(Z_ARG2, (address)msg);
6898   }
6899   if ((reentry != NULL) && RelAddr::is_in_range_of_RelAddr16(reentry, pc())) {
6900     BLOCK_COMMENT("branch to reentry point:");
6901     z_brc(bcondAlways, reentry);
6902   } else {
6903     BLOCK_COMMENT("reentry point:");
6904     reentry = pc();      // Re-entry point for subsequent stop calls.
6905     save_return_pc();    // Saves return pc Z_R14.
6906     push_frame_abi160(0);
6907     if (allow_relocation) {
6908       reentry = NULL;    // Prevent reentry if code relocation is allowed.
6909       call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
6910     } else {
6911       call_VM_leaf_static(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
6912     }
6913     z_illtrap(); // Illegal instruction as emergency stop, should the above call return.
6914   }
6915   BLOCK_COMMENT(" } stop_chain");
6916 
6917   return reentry;
6918 }
6919 
6920 // Special version of stop() for code size reduction.
6921 // Assumes constant relative addresses for data and runtime call.
6922 void MacroAssembler::stop_static(int type, const char* msg, int id) {
6923   stop_chain(NULL, type, msg, id, false);
6924 }
6925 
6926 void MacroAssembler::stop_subroutine() {
6927   unimplemented("stop_subroutine", 710);
6928 }
6929 
6930 // Prints msg to stdout from within generated code..
6931 void MacroAssembler::warn(const char* msg) {
6932   RegisterSaver::save_live_registers(this, RegisterSaver::all_registers, Z_R14);
6933   load_absolute_address(Z_R1, (address) warning);
6934   load_absolute_address(Z_ARG1, (address) msg);
6935   (void) call(Z_R1);
6936   RegisterSaver::restore_live_registers(this, RegisterSaver::all_registers);
6937 }
6938 
6939 #ifndef PRODUCT
6940 
6941 // Write pattern 0x0101010101010101 in region [low-before, high+after].
6942 void MacroAssembler::zap_from_to(Register low, Register high, Register val, Register addr, int before, int after) {
6943   if (!ZapEmptyStackFields) return;
6944   BLOCK_COMMENT("zap memory region {");
6945   load_const_optimized(val, 0x0101010101010101);
6946   int size = before + after;
6947   if (low == high && size < 5 && size > 0) {
6948     int offset = -before*BytesPerWord;
6949     for (int i = 0; i < size; ++i) {
6950       z_stg(val, Address(low, offset));
6951       offset +=(1*BytesPerWord);
6952     }
6953   } else {
6954     add2reg(addr, -before*BytesPerWord, low);
6955     if (after) {
6956 #ifdef ASSERT
6957       jlong check = after * BytesPerWord;
6958       assert(Immediate::is_simm32(check) && Immediate::is_simm32(-check), "value not encodable !");
6959 #endif
6960       add2reg(high, after * BytesPerWord);
6961     }
6962     NearLabel loop;
6963     bind(loop);
6964     z_stg(val, Address(addr));
6965     add2reg(addr, 8);
6966     compare64_and_branch(addr, high, bcondNotHigh, loop);
6967     if (after) {
6968       add2reg(high, -after * BytesPerWord);
6969     }
6970   }
6971   BLOCK_COMMENT("} zap memory region");
6972 }
6973 #endif // !PRODUCT
6974 
6975 SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value, Register _rscratch) {
6976   _masm = masm;
6977   _masm->load_absolute_address(_rscratch, (address)flag_addr);
6978   _masm->load_and_test_int(_rscratch, Address(_rscratch));
6979   if (value) {
6980     _masm->z_brne(_label); // Skip if true, i.e. != 0.
6981   } else {
6982     _masm->z_bre(_label);  // Skip if false, i.e. == 0.
6983   }
6984 }
6985 
6986 SkipIfEqual::~SkipIfEqual() {
6987   _masm->bind(_label);
6988 }