1 /*
   2  * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/codeBuffer.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "compiler/disassembler.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "gc/shared/cardTableModRefBS.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "memory/universe.hpp"
  35 #include "oops/klass.inline.hpp"
  36 #include "opto/compile.hpp"
  37 #include "opto/intrinsicnode.hpp"
  38 #include "opto/matcher.hpp"
  39 #include "prims/methodHandles.hpp"
  40 #include "registerSaver_s390.hpp"
  41 #include "runtime/biasedLocking.hpp"
  42 #include "runtime/icache.hpp"
  43 #include "runtime/interfaceSupport.hpp"
  44 #include "runtime/objectMonitor.hpp"
  45 #include "runtime/os.hpp"
  46 #include "runtime/sharedRuntime.hpp"
  47 #include "runtime/stubRoutines.hpp"
  48 #include "utilities/events.hpp"
  49 #include "utilities/macros.hpp"
  50 #if INCLUDE_ALL_GCS
  51 #include "gc/g1/g1CollectedHeap.inline.hpp"
  52 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
  53 #include "gc/g1/heapRegion.hpp"
  54 #endif
  55 
  56 #include <ucontext.h>
  57 
  58 #define BLOCK_COMMENT(str) block_comment(str)
  59 #define BIND(label)        bind(label); BLOCK_COMMENT(#label ":")
  60 
  61 // Move 32-bit register if destination and source are different.
  62 void MacroAssembler::lr_if_needed(Register rd, Register rs) {
  63   if (rs != rd) { z_lr(rd, rs); }
  64 }
  65 
  66 // Move register if destination and source are different.
  67 void MacroAssembler::lgr_if_needed(Register rd, Register rs) {
  68   if (rs != rd) { z_lgr(rd, rs); }
  69 }
  70 
  71 // Zero-extend 32-bit register into 64-bit register if destination and source are different.
  72 void MacroAssembler::llgfr_if_needed(Register rd, Register rs) {
  73   if (rs != rd) { z_llgfr(rd, rs); }
  74 }
  75 
  76 // Move float register if destination and source are different.
  77 void MacroAssembler::ldr_if_needed(FloatRegister rd, FloatRegister rs) {
  78   if (rs != rd) { z_ldr(rd, rs); }
  79 }
  80 
  81 // Move integer register if destination and source are different.
  82 // It is assumed that shorter-than-int types are already
  83 // appropriately sign-extended.
  84 void MacroAssembler::move_reg_if_needed(Register dst, BasicType dst_type, Register src,
  85                                         BasicType src_type) {
  86   assert((dst_type != T_FLOAT) && (dst_type != T_DOUBLE), "use move_freg for float types");
  87   assert((src_type != T_FLOAT) && (src_type != T_DOUBLE), "use move_freg for float types");
  88 
  89   if (dst_type == src_type) {
  90     lgr_if_needed(dst, src); // Just move all 64 bits.
  91     return;
  92   }
  93 
  94   switch (dst_type) {
  95     // Do not support these types for now.
  96     //  case T_BOOLEAN:
  97     case T_BYTE:  // signed byte
  98       switch (src_type) {
  99         case T_INT:
 100           z_lgbr(dst, src);
 101           break;
 102         default:
 103           ShouldNotReachHere();
 104       }
 105       return;
 106 
 107     case T_CHAR:
 108     case T_SHORT:
 109       switch (src_type) {
 110         case T_INT:
 111           if (dst_type == T_CHAR) {
 112             z_llghr(dst, src);
 113           } else {
 114             z_lghr(dst, src);
 115           }
 116           break;
 117         default:
 118           ShouldNotReachHere();
 119       }
 120       return;
 121 
 122     case T_INT:
 123       switch (src_type) {
 124         case T_BOOLEAN:
 125         case T_BYTE:
 126         case T_CHAR:
 127         case T_SHORT:
 128         case T_INT:
 129         case T_LONG:
 130         case T_OBJECT:
 131         case T_ARRAY:
 132         case T_VOID:
 133         case T_ADDRESS:
 134           lr_if_needed(dst, src);
 135           // llgfr_if_needed(dst, src);  // zero-extend (in case we need to find a bug).
 136           return;
 137 
 138         default:
 139           assert(false, "non-integer src type");
 140           return;
 141       }
 142     case T_LONG:
 143       switch (src_type) {
 144         case T_BOOLEAN:
 145         case T_BYTE:
 146         case T_CHAR:
 147         case T_SHORT:
 148         case T_INT:
 149           z_lgfr(dst, src); // sign extension
 150           return;
 151 
 152         case T_LONG:
 153         case T_OBJECT:
 154         case T_ARRAY:
 155         case T_VOID:
 156         case T_ADDRESS:
 157           lgr_if_needed(dst, src);
 158           return;
 159 
 160         default:
 161           assert(false, "non-integer src type");
 162           return;
 163       }
 164       return;
 165     case T_OBJECT:
 166     case T_ARRAY:
 167     case T_VOID:
 168     case T_ADDRESS:
 169       switch (src_type) {
 170         // These types don't make sense to be converted to pointers:
 171         //      case T_BOOLEAN:
 172         //      case T_BYTE:
 173         //      case T_CHAR:
 174         //      case T_SHORT:
 175 
 176         case T_INT:
 177           z_llgfr(dst, src); // zero extension
 178           return;
 179 
 180         case T_LONG:
 181         case T_OBJECT:
 182         case T_ARRAY:
 183         case T_VOID:
 184         case T_ADDRESS:
 185           lgr_if_needed(dst, src);
 186           return;
 187 
 188         default:
 189           assert(false, "non-integer src type");
 190           return;
 191       }
 192       return;
 193     default:
 194       assert(false, "non-integer dst type");
 195       return;
 196   }
 197 }
 198 
 199 // Move float register if destination and source are different.
 200 void MacroAssembler::move_freg_if_needed(FloatRegister dst, BasicType dst_type,
 201                                          FloatRegister src, BasicType src_type) {
 202   assert((dst_type == T_FLOAT) || (dst_type == T_DOUBLE), "use move_reg for int types");
 203   assert((src_type == T_FLOAT) || (src_type == T_DOUBLE), "use move_reg for int types");
 204   if (dst_type == src_type) {
 205     ldr_if_needed(dst, src); // Just move all 64 bits.
 206   } else {
 207     switch (dst_type) {
 208       case T_FLOAT:
 209         assert(src_type == T_DOUBLE, "invalid float type combination");
 210         z_ledbr(dst, src);
 211         return;
 212       case T_DOUBLE:
 213         assert(src_type == T_FLOAT, "invalid float type combination");
 214         z_ldebr(dst, src);
 215         return;
 216       default:
 217         assert(false, "non-float dst type");
 218         return;
 219     }
 220   }
 221 }
 222 
 223 // Optimized emitter for reg to mem operations.
 224 // Uses modern instructions if running on modern hardware, classic instructions
 225 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 226 // Data register (reg) cannot be used as work register.
 227 //
 228 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default).
 229 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs!
 230 void MacroAssembler::freg2mem_opt(FloatRegister reg,
 231                                   int64_t       disp,
 232                                   Register      index,
 233                                   Register      base,
 234                                   void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
 235                                   void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
 236                                   Register      scratch) {
 237   index = (index == noreg) ? Z_R0 : index;
 238   if (Displacement::is_shortDisp(disp)) {
 239     (this->*classic)(reg, disp, index, base);
 240   } else {
 241     if (Displacement::is_validDisp(disp)) {
 242       (this->*modern)(reg, disp, index, base);
 243     } else {
 244       if (scratch != Z_R0 && scratch != Z_R1) {
 245         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 246       } else {
 247         if (scratch != Z_R0) {   // scratch == Z_R1
 248           if ((scratch == index) || (index == base)) {
 249             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 250           } else {
 251             add2reg(scratch, disp, base);
 252             (this->*classic)(reg, 0, index, scratch);
 253             if (base == scratch) {
 254               add2reg(base, -disp);  // Restore base.
 255             }
 256           }
 257         } else {   // scratch == Z_R0
 258           z_lgr(scratch, base);
 259           add2reg(base, disp);
 260           (this->*classic)(reg, 0, index, base);
 261           z_lgr(base, scratch);      // Restore base.
 262         }
 263       }
 264     }
 265   }
 266 }
 267 
 268 void MacroAssembler::freg2mem_opt(FloatRegister reg, const Address &a, bool is_double) {
 269   if (is_double) {
 270     freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stdy), CLASSIC_FFUN(z_std));
 271   } else {
 272     freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stey), CLASSIC_FFUN(z_ste));
 273   }
 274 }
 275 
 276 // Optimized emitter for mem to reg operations.
 277 // Uses modern instructions if running on modern hardware, classic instructions
 278 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 279 // data register (reg) cannot be used as work register.
 280 //
 281 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default).
 282 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs!
 283 void MacroAssembler::mem2freg_opt(FloatRegister reg,
 284                                   int64_t       disp,
 285                                   Register      index,
 286                                   Register      base,
 287                                   void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
 288                                   void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
 289                                   Register      scratch) {
 290   index = (index == noreg) ? Z_R0 : index;
 291   if (Displacement::is_shortDisp(disp)) {
 292     (this->*classic)(reg, disp, index, base);
 293   } else {
 294     if (Displacement::is_validDisp(disp)) {
 295       (this->*modern)(reg, disp, index, base);
 296     } else {
 297       if (scratch != Z_R0 && scratch != Z_R1) {
 298         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 299       } else {
 300         if (scratch != Z_R0) {   // scratch == Z_R1
 301           if ((scratch == index) || (index == base)) {
 302             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 303           } else {
 304             add2reg(scratch, disp, base);
 305             (this->*classic)(reg, 0, index, scratch);
 306             if (base == scratch) {
 307               add2reg(base, -disp);  // Restore base.
 308             }
 309           }
 310         } else {   // scratch == Z_R0
 311           z_lgr(scratch, base);
 312           add2reg(base, disp);
 313           (this->*classic)(reg, 0, index, base);
 314           z_lgr(base, scratch);      // Restore base.
 315         }
 316       }
 317     }
 318   }
 319 }
 320 
 321 void MacroAssembler::mem2freg_opt(FloatRegister reg, const Address &a, bool is_double) {
 322   if (is_double) {
 323     mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ldy), CLASSIC_FFUN(z_ld));
 324   } else {
 325     mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ley), CLASSIC_FFUN(z_le));
 326   }
 327 }
 328 
 329 // Optimized emitter for reg to mem operations.
 330 // Uses modern instructions if running on modern hardware, classic instructions
 331 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 332 // Data register (reg) cannot be used as work register.
 333 //
 334 // Don't rely on register locking, instead pass a scratch register
 335 // (Z_R0 by default)
 336 // CAUTION! passing registers >= Z_R2 may produce bad results on old CPUs!
 337 void MacroAssembler::reg2mem_opt(Register reg,
 338                                  int64_t  disp,
 339                                  Register index,
 340                                  Register base,
 341                                  void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
 342                                  void (MacroAssembler::*classic)(Register, int64_t, Register, Register),
 343                                  Register scratch) {
 344   index = (index == noreg) ? Z_R0 : index;
 345   if (Displacement::is_shortDisp(disp)) {
 346     (this->*classic)(reg, disp, index, base);
 347   } else {
 348     if (Displacement::is_validDisp(disp)) {
 349       (this->*modern)(reg, disp, index, base);
 350     } else {
 351       if (scratch != Z_R0 && scratch != Z_R1) {
 352         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
 353       } else {
 354         if (scratch != Z_R0) {   // scratch == Z_R1
 355           if ((scratch == index) || (index == base)) {
 356             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 357           } else {
 358             add2reg(scratch, disp, base);
 359             (this->*classic)(reg, 0, index, scratch);
 360             if (base == scratch) {
 361               add2reg(base, -disp);  // Restore base.
 362             }
 363           }
 364         } else {   // scratch == Z_R0
 365           if ((scratch == reg) || (scratch == base) || (reg == base)) {
 366             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
 367           } else {
 368             z_lgr(scratch, base);
 369             add2reg(base, disp);
 370             (this->*classic)(reg, 0, index, base);
 371             z_lgr(base, scratch);    // Restore base.
 372           }
 373         }
 374       }
 375     }
 376   }
 377 }
 378 
 379 int MacroAssembler::reg2mem_opt(Register reg, const Address &a, bool is_double) {
 380   int store_offset = offset();
 381   if (is_double) {
 382     reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_stg), CLASSIC_IFUN(z_stg));
 383   } else {
 384     reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_sty), CLASSIC_IFUN(z_st));
 385   }
 386   return store_offset;
 387 }
 388 
 389 // Optimized emitter for mem to reg operations.
 390 // Uses modern instructions if running on modern hardware, classic instructions
 391 // otherwise. Prefers (usually shorter) classic instructions if applicable.
 392 // Data register (reg) will be used as work register where possible.
 393 void MacroAssembler::mem2reg_opt(Register reg,
 394                                  int64_t  disp,
 395                                  Register index,
 396                                  Register base,
 397                                  void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
 398                                  void (MacroAssembler::*classic)(Register, int64_t, Register, Register)) {
 399   index = (index == noreg) ? Z_R0 : index;
 400   if (Displacement::is_shortDisp(disp)) {
 401     (this->*classic)(reg, disp, index, base);
 402   } else {
 403     if (Displacement::is_validDisp(disp)) {
 404       (this->*modern)(reg, disp, index, base);
 405     } else {
 406       if ((reg == index) && (reg == base)) {
 407         z_sllg(reg, reg, 1);
 408         add2reg(reg, disp);
 409         (this->*classic)(reg, 0, noreg, reg);
 410       } else if ((reg == index) && (reg != Z_R0)) {
 411         add2reg(reg, disp);
 412         (this->*classic)(reg, 0, reg, base);
 413       } else if (reg == base) {
 414         add2reg(reg, disp);
 415         (this->*classic)(reg, 0, index, reg);
 416       } else if (reg != Z_R0) {
 417         add2reg(reg, disp, base);
 418         (this->*classic)(reg, 0, index, reg);
 419       } else { // reg == Z_R0 && reg != base here
 420         add2reg(base, disp);
 421         (this->*classic)(reg, 0, index, base);
 422         add2reg(base, -disp);
 423       }
 424     }
 425   }
 426 }
 427 
 428 void MacroAssembler::mem2reg_opt(Register reg, const Address &a, bool is_double) {
 429   if (is_double) {
 430     z_lg(reg, a);
 431   } else {
 432     mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_ly), CLASSIC_IFUN(z_l));
 433   }
 434 }
 435 
 436 void MacroAssembler::mem2reg_signed_opt(Register reg, const Address &a) {
 437   mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_lgf), CLASSIC_IFUN(z_lgf));
 438 }
 439 
 440 void MacroAssembler::and_imm(Register r, long mask,
 441                              Register tmp /* = Z_R0 */,
 442                              bool wide    /* = false */) {
 443   assert(wide || Immediate::is_simm32(mask), "mask value too large");
 444 
 445   if (!wide) {
 446     z_nilf(r, mask);
 447     return;
 448   }
 449 
 450   assert(r != tmp, " need a different temporary register !");
 451   load_const_optimized(tmp, mask);
 452   z_ngr(r, tmp);
 453 }
 454 
 455 // Calculate the 1's complement.
 456 // Note: The condition code is neither preserved nor correctly set by this code!!!
 457 // Note: (wide == false) does not protect the high order half of the target register
 458 //       from alteration. It only serves as optimization hint for 32-bit results.
 459 void MacroAssembler::not_(Register r1, Register r2, bool wide) {
 460 
 461   if ((r2 == noreg) || (r2 == r1)) { // Calc 1's complement in place.
 462     z_xilf(r1, -1);
 463     if (wide) {
 464       z_xihf(r1, -1);
 465     }
 466   } else { // Distinct src and dst registers.
 467     if (VM_Version::has_DistinctOpnds()) {
 468       load_const_optimized(r1, -1);
 469       z_xgrk(r1, r2, r1);
 470     } else {
 471       if (wide) {
 472         z_lgr(r1, r2);
 473         z_xilf(r1, -1);
 474         z_xihf(r1, -1);
 475       } else {
 476         z_lr(r1, r2);
 477         z_xilf(r1, -1);
 478       }
 479     }
 480   }
 481 }
 482 
 483 unsigned long MacroAssembler::create_mask(int lBitPos, int rBitPos) {
 484   assert(lBitPos >=  0,      "zero is  leftmost bit position");
 485   assert(rBitPos <= 63,      "63   is rightmost bit position");
 486   assert(lBitPos <= rBitPos, "inverted selection interval");
 487   return (lBitPos == 0 ? (unsigned long)(-1L) : ((1UL<<(63-lBitPos+1))-1)) & (~((1UL<<(63-rBitPos))-1));
 488 }
 489 
 490 // Helper function for the "Rotate_then_<logicalOP>" emitters.
 491 // Rotate src, then mask register contents such that only bits in range survive.
 492 // For oneBits == false, all bits not in range are set to 0. Useful for deleting all bits outside range.
 493 // For oneBits == true,  all bits not in range are set to 1. Useful for preserving all bits outside range.
 494 // The caller must ensure that the selected range only contains bits with defined value.
 495 void MacroAssembler::rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos,
 496                                       int nRotate, bool src32bit, bool dst32bit, bool oneBits) {
 497   assert(!(dst32bit && lBitPos < 32), "selection interval out of range for int destination");
 498   bool sll4rll = (nRotate >= 0) && (nRotate <= (63-rBitPos)); // Substitute SLL(G) for RLL(G).
 499   bool srl4rll = (nRotate <  0) && (-nRotate <= lBitPos);     // Substitute SRL(G) for RLL(G).
 500   //  Pre-determine which parts of dst will be zero after shift/rotate.
 501   bool llZero  =  sll4rll && (nRotate >= 16);
 502   bool lhZero  = (sll4rll && (nRotate >= 32)) || (srl4rll && (nRotate <= -48));
 503   bool lfZero  = llZero && lhZero;
 504   bool hlZero  = (sll4rll && (nRotate >= 48)) || (srl4rll && (nRotate <= -32));
 505   bool hhZero  =                                 (srl4rll && (nRotate <= -16));
 506   bool hfZero  = hlZero && hhZero;
 507 
 508   // rotate then mask src operand.
 509   // if oneBits == true,  all bits outside selected range are 1s.
 510   // if oneBits == false, all bits outside selected range are 0s.
 511   if (src32bit) {   // There might be garbage in the upper 32 bits which will get masked away.
 512     if (dst32bit) {
 513       z_rll(dst, src, nRotate);   // Copy and rotate, upper half of reg remains undisturbed.
 514     } else {
 515       if      (sll4rll) { z_sllg(dst, src,  nRotate); }
 516       else if (srl4rll) { z_srlg(dst, src, -nRotate); }
 517       else              { z_rllg(dst, src,  nRotate); }
 518     }
 519   } else {
 520     if      (sll4rll) { z_sllg(dst, src,  nRotate); }
 521     else if (srl4rll) { z_srlg(dst, src, -nRotate); }
 522     else              { z_rllg(dst, src,  nRotate); }
 523   }
 524 
 525   unsigned long  range_mask    = create_mask(lBitPos, rBitPos);
 526   unsigned int   range_mask_h  = (unsigned int)(range_mask >> 32);
 527   unsigned int   range_mask_l  = (unsigned int)range_mask;
 528   unsigned short range_mask_hh = (unsigned short)(range_mask >> 48);
 529   unsigned short range_mask_hl = (unsigned short)(range_mask >> 32);
 530   unsigned short range_mask_lh = (unsigned short)(range_mask >> 16);
 531   unsigned short range_mask_ll = (unsigned short)range_mask;
 532   // Works for z9 and newer H/W.
 533   if (oneBits) {
 534     if ((~range_mask_l) != 0)                { z_oilf(dst, ~range_mask_l); } // All bits outside range become 1s.
 535     if (((~range_mask_h) != 0) && !dst32bit) { z_oihf(dst, ~range_mask_h); }
 536   } else {
 537     // All bits outside range become 0s
 538     if (((~range_mask_l) != 0) &&              !lfZero) {
 539       z_nilf(dst, range_mask_l);
 540     }
 541     if (((~range_mask_h) != 0) && !dst32bit && !hfZero) {
 542       z_nihf(dst, range_mask_h);
 543     }
 544   }
 545 }
 546 
 547 // Rotate src, then insert selected range from rotated src into dst.
 548 // Clear dst before, if requested.
 549 void MacroAssembler::rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos,
 550                                         int nRotate, bool clear_dst) {
 551   // This version does not depend on src being zero-extended int2long.
 552   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 553   z_risbg(dst, src, lBitPos, rBitPos, nRotate, clear_dst); // Rotate, then insert selected, clear the rest.
 554 }
 555 
 556 // Rotate src, then and selected range from rotated src into dst.
 557 // Set condition code only if so requested. Otherwise it is unpredictable.
 558 // See performance note in macroAssembler_s390.hpp for important information.
 559 void MacroAssembler::rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos,
 560                                      int nRotate, bool test_only) {
 561   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 562   // This version does not depend on src being zero-extended int2long.
 563   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 564   z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 565 }
 566 
 567 // Rotate src, then or selected range from rotated src into dst.
 568 // Set condition code only if so requested. Otherwise it is unpredictable.
 569 // See performance note in macroAssembler_s390.hpp for important information.
 570 void MacroAssembler::rotate_then_or(Register dst, Register src,  int  lBitPos,  int  rBitPos,
 571                                     int nRotate, bool test_only) {
 572   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 573   // This version does not depend on src being zero-extended int2long.
 574   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 575   z_rosbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 576 }
 577 
 578 // Rotate src, then xor selected range from rotated src into dst.
 579 // Set condition code only if so requested. Otherwise it is unpredictable.
 580 // See performance note in macroAssembler_s390.hpp for important information.
 581 void MacroAssembler::rotate_then_xor(Register dst, Register src,  int  lBitPos,  int  rBitPos,
 582                                      int nRotate, bool test_only) {
 583   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
 584     // This version does not depend on src being zero-extended int2long.
 585   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
 586   z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
 587 }
 588 
 589 void MacroAssembler::add64(Register r1, RegisterOrConstant inc) {
 590   if (inc.is_register()) {
 591     z_agr(r1, inc.as_register());
 592   } else { // constant
 593     intptr_t imm = inc.as_constant();
 594     add2reg(r1, imm);
 595   }
 596 }
 597 // Helper function to multiply the 64bit contents of a register by a 16bit constant.
 598 // The optimization tries to avoid the mghi instruction, since it uses the FPU for
 599 // calculation and is thus rather slow.
 600 //
 601 // There is no handling for special cases, e.g. cval==0 or cval==1.
 602 //
 603 // Returns len of generated code block.
 604 unsigned int MacroAssembler::mul_reg64_const16(Register rval, Register work, int cval) {
 605   int block_start = offset();
 606 
 607   bool sign_flip = cval < 0;
 608   cval = sign_flip ? -cval : cval;
 609 
 610   BLOCK_COMMENT("Reg64*Con16 {");
 611 
 612   int bit1 = cval & -cval;
 613   if (bit1 == cval) {
 614     z_sllg(rval, rval, exact_log2(bit1));
 615     if (sign_flip) { z_lcgr(rval, rval); }
 616   } else {
 617     int bit2 = (cval-bit1) & -(cval-bit1);
 618     if ((bit1+bit2) == cval) {
 619       z_sllg(work, rval, exact_log2(bit1));
 620       z_sllg(rval, rval, exact_log2(bit2));
 621       z_agr(rval, work);
 622       if (sign_flip) { z_lcgr(rval, rval); }
 623     } else {
 624       if (sign_flip) { z_mghi(rval, -cval); }
 625       else           { z_mghi(rval,  cval); }
 626     }
 627   }
 628   BLOCK_COMMENT("} Reg64*Con16");
 629 
 630   int block_end = offset();
 631   return block_end - block_start;
 632 }
 633 
 634 // Generic operation r1 := r2 + imm.
 635 //
 636 // Should produce the best code for each supported CPU version.
 637 // r2 == noreg yields r1 := r1 + imm
 638 // imm == 0 emits either no instruction or r1 := r2 !
 639 // NOTES: 1) Don't use this function where fixed sized
 640 //           instruction sequences are required!!!
 641 //        2) Don't use this function if condition code
 642 //           setting is required!
 643 //        3) Despite being declared as int64_t, the parameter imm
 644 //           must be a simm_32 value (= signed 32-bit integer).
 645 void MacroAssembler::add2reg(Register r1, int64_t imm, Register r2) {
 646   assert(Immediate::is_simm32(imm), "probably an implicit conversion went wrong");
 647 
 648   if (r2 == noreg) { r2 = r1; }
 649 
 650   // Handle special case imm == 0.
 651   if (imm == 0) {
 652     lgr_if_needed(r1, r2);
 653     // Nothing else to do.
 654     return;
 655   }
 656 
 657   if (!PreferLAoverADD || (r2 == Z_R0)) {
 658     bool distinctOpnds = VM_Version::has_DistinctOpnds();
 659 
 660     // Can we encode imm in 16 bits signed?
 661     if (Immediate::is_simm16(imm)) {
 662       if (r1 == r2) {
 663         z_aghi(r1, imm);
 664         return;
 665       }
 666       if (distinctOpnds) {
 667         z_aghik(r1, r2, imm);
 668         return;
 669       }
 670       z_lgr(r1, r2);
 671       z_aghi(r1, imm);
 672       return;
 673     }
 674   } else {
 675     // Can we encode imm in 12 bits unsigned?
 676     if (Displacement::is_shortDisp(imm)) {
 677       z_la(r1, imm, r2);
 678       return;
 679     }
 680     // Can we encode imm in 20 bits signed?
 681     if (Displacement::is_validDisp(imm)) {
 682       // Always use LAY instruction, so we don't need the tmp register.
 683       z_lay(r1, imm, r2);
 684       return;
 685     }
 686 
 687   }
 688 
 689   // Can handle it (all possible values) with long immediates.
 690   lgr_if_needed(r1, r2);
 691   z_agfi(r1, imm);
 692 }
 693 
 694 // Generic operation r := b + x + d
 695 //
 696 // Addition of several operands with address generation semantics - sort of:
 697 //  - no restriction on the registers. Any register will do for any operand.
 698 //  - x == noreg: operand will be disregarded.
 699 //  - b == noreg: will use (contents of) result reg as operand (r := r + d).
 700 //  - x == Z_R0:  just disregard
 701 //  - b == Z_R0:  use as operand. This is not address generation semantics!!!
 702 //
 703 // The same restrictions as on add2reg() are valid!!!
 704 void MacroAssembler::add2reg_with_index(Register r, int64_t d, Register x, Register b) {
 705   assert(Immediate::is_simm32(d), "probably an implicit conversion went wrong");
 706 
 707   if (x == noreg) { x = Z_R0; }
 708   if (b == noreg) { b = r; }
 709 
 710   // Handle special case x == R0.
 711   if (x == Z_R0) {
 712     // Can simply add the immediate value to the base register.
 713     add2reg(r, d, b);
 714     return;
 715   }
 716 
 717   if (!PreferLAoverADD || (b == Z_R0)) {
 718     bool distinctOpnds = VM_Version::has_DistinctOpnds();
 719     // Handle special case d == 0.
 720     if (d == 0) {
 721       if (b == x)        { z_sllg(r, b, 1); return; }
 722       if (r == x)        { z_agr(r, b);     return; }
 723       if (r == b)        { z_agr(r, x);     return; }
 724       if (distinctOpnds) { z_agrk(r, x, b); return; }
 725       z_lgr(r, b);
 726       z_agr(r, x);
 727     } else {
 728       if (x == b)             { z_sllg(r, x, 1); }
 729       else if (r == x)        { z_agr(r, b); }
 730       else if (r == b)        { z_agr(r, x); }
 731       else if (distinctOpnds) { z_agrk(r, x, b); }
 732       else {
 733         z_lgr(r, b);
 734         z_agr(r, x);
 735       }
 736       add2reg(r, d);
 737     }
 738   } else {
 739     // Can we encode imm in 12 bits unsigned?
 740     if (Displacement::is_shortDisp(d)) {
 741       z_la(r, d, x, b);
 742       return;
 743     }
 744     // Can we encode imm in 20 bits signed?
 745     if (Displacement::is_validDisp(d)) {
 746       z_lay(r, d, x, b);
 747       return;
 748     }
 749     z_la(r, 0, x, b);
 750     add2reg(r, d);
 751   }
 752 }
 753 
 754 // Generic emitter (32bit) for direct memory increment.
 755 // For optimal code, do not specify Z_R0 as temp register.
 756 void MacroAssembler::add2mem_32(const Address &a, int64_t imm, Register tmp) {
 757   if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) {
 758     z_asi(a, imm);
 759   } else {
 760     z_lgf(tmp, a);
 761     add2reg(tmp, imm);
 762     z_st(tmp, a);
 763   }
 764 }
 765 
 766 void MacroAssembler::add2mem_64(const Address &a, int64_t imm, Register tmp) {
 767   if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) {
 768     z_agsi(a, imm);
 769   } else {
 770     z_lg(tmp, a);
 771     add2reg(tmp, imm);
 772     z_stg(tmp, a);
 773   }
 774 }
 775 
 776 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) {
 777   switch (size_in_bytes) {
 778     case  8: z_lg(dst, src); break;
 779     case  4: is_signed ? z_lgf(dst, src) : z_llgf(dst, src); break;
 780     case  2: is_signed ? z_lgh(dst, src) : z_llgh(dst, src); break;
 781     case  1: is_signed ? z_lgb(dst, src) : z_llgc(dst, src); break;
 782     default: ShouldNotReachHere();
 783   }
 784 }
 785 
 786 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) {
 787   switch (size_in_bytes) {
 788     case  8: z_stg(src, dst); break;
 789     case  4: z_st(src, dst); break;
 790     case  2: z_sth(src, dst); break;
 791     case  1: z_stc(src, dst); break;
 792     default: ShouldNotReachHere();
 793   }
 794 }
 795 
 796 // Split a si20 offset (20bit, signed) into an ui12 offset (12bit, unsigned) and
 797 // a high-order summand in register tmp.
 798 //
 799 // return value: <  0: No split required, si20 actually has property uimm12.
 800 //               >= 0: Split performed. Use return value as uimm12 displacement and
 801 //                     tmp as index register.
 802 int MacroAssembler::split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate) {
 803   assert(Immediate::is_simm20(si20_offset), "sanity");
 804   int lg_off = (int)si20_offset &  0x0fff; // Punch out low-order 12 bits, always positive.
 805   int ll_off = (int)si20_offset & ~0x0fff; // Force low-order 12 bits to zero.
 806   assert((Displacement::is_shortDisp(si20_offset) && (ll_off == 0)) ||
 807          !Displacement::is_shortDisp(si20_offset), "unexpected offset values");
 808   assert((lg_off+ll_off) == si20_offset, "offset splitup error");
 809 
 810   Register work = accumulate? Z_R0 : tmp;
 811 
 812   if (fixed_codelen) {          // Len of code = 10 = 4 + 6.
 813     z_lghi(work, ll_off>>12);   // Implicit sign extension.
 814     z_slag(work, work, 12);
 815   } else {                      // Len of code = 0..10.
 816     if (ll_off == 0) { return -1; }
 817     // ll_off has 8 significant bits (at most) plus sign.
 818     if ((ll_off & 0x0000f000) == 0) {    // Non-zero bits only in upper halfbyte.
 819       z_llilh(work, ll_off >> 16);
 820       if (ll_off < 0) {                  // Sign-extension required.
 821         z_lgfr(work, work);
 822       }
 823     } else {
 824       if ((ll_off & 0x000f0000) == 0) {  // Non-zero bits only in lower halfbyte.
 825         z_llill(work, ll_off);
 826       } else {                           // Non-zero bits in both halfbytes.
 827         z_lghi(work, ll_off>>12);        // Implicit sign extension.
 828         z_slag(work, work, 12);
 829       }
 830     }
 831   }
 832   if (accumulate) { z_algr(tmp, work); } // len of code += 4
 833   return lg_off;
 834 }
 835 
 836 void MacroAssembler::load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) {
 837   if (Displacement::is_validDisp(si20)) {
 838     z_ley(t, si20, a);
 839   } else {
 840     // Fixed_codelen = true is a simple way to ensure that the size of load_float_largeoffset
 841     // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant
 842     // pool loads).
 843     bool accumulate    = true;
 844     bool fixed_codelen = true;
 845     Register work;
 846 
 847     if (fixed_codelen) {
 848       z_lgr(tmp, a);  // Lgr_if_needed not applicable due to fixed_codelen.
 849     } else {
 850       accumulate = (a == tmp);
 851     }
 852     work = tmp;
 853 
 854     int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate);
 855     if (disp12 < 0) {
 856       z_le(t, si20, work);
 857     } else {
 858       if (accumulate) {
 859         z_le(t, disp12, work);
 860       } else {
 861         z_le(t, disp12, work, a);
 862       }
 863     }
 864   }
 865 }
 866 
 867 void MacroAssembler::load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) {
 868   if (Displacement::is_validDisp(si20)) {
 869     z_ldy(t, si20, a);
 870   } else {
 871     // Fixed_codelen = true is a simple way to ensure that the size of load_double_largeoffset
 872     // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant
 873     // pool loads).
 874     bool accumulate    = true;
 875     bool fixed_codelen = true;
 876     Register work;
 877 
 878     if (fixed_codelen) {
 879       z_lgr(tmp, a);  // Lgr_if_needed not applicable due to fixed_codelen.
 880     } else {
 881       accumulate = (a == tmp);
 882     }
 883     work = tmp;
 884 
 885     int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate);
 886     if (disp12 < 0) {
 887       z_ld(t, si20, work);
 888     } else {
 889       if (accumulate) {
 890         z_ld(t, disp12, work);
 891       } else {
 892         z_ld(t, disp12, work, a);
 893       }
 894     }
 895   }
 896 }
 897 
 898 // PCrelative TOC access.
 899 // Returns distance (in bytes) from current position to start of consts section.
 900 // Returns 0 (zero) if no consts section exists or if it has size zero.
 901 long MacroAssembler::toc_distance() {
 902   CodeSection* cs = code()->consts();
 903   return (long)((cs != NULL) ? cs->start()-pc() : 0);
 904 }
 905 
 906 // Implementation on x86/sparc assumes that constant and instruction section are
 907 // adjacent, but this doesn't hold. Two special situations may occur, that we must
 908 // be able to handle:
 909 //   1. const section may be located apart from the inst section.
 910 //   2. const section may be empty
 911 // In both cases, we use the const section's start address to compute the "TOC",
 912 // this seems to occur only temporarily; in the final step we always seem to end up
 913 // with the pc-relatice variant.
 914 //
 915 // PC-relative offset could be +/-2**32 -> use long for disp
 916 // Furthermore: makes no sense to have special code for
 917 // adjacent const and inst sections.
 918 void MacroAssembler::load_toc(Register Rtoc) {
 919   // Simply use distance from start of const section (should be patched in the end).
 920   long disp = toc_distance();
 921 
 922   RelocationHolder rspec = internal_word_Relocation::spec(pc() + disp);
 923   relocate(rspec);
 924   z_larl(Rtoc, RelAddr::pcrel_off32(disp));  // Offset is in halfwords.
 925 }
 926 
 927 // PCrelative TOC access.
 928 // Load from anywhere pcrelative (with relocation of load instr)
 929 void MacroAssembler::load_long_pcrelative(Register Rdst, address dataLocation) {
 930   address          pc             = this->pc();
 931   ptrdiff_t        total_distance = dataLocation - pc;
 932   RelocationHolder rspec          = internal_word_Relocation::spec(dataLocation);
 933 
 934   assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory");
 935   assert(total_distance != 0, "sanity");
 936 
 937   // Some extra safety net.
 938   if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
 939     guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "too far away");
 940   }
 941 
 942   (this)->relocate(rspec, relocInfo::pcrel_addr_format);
 943   z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance));
 944 }
 945 
 946 
 947 // PCrelative TOC access.
 948 // Load from anywhere pcrelative (with relocation of load instr)
 949 // loaded addr has to be relocated when added to constant pool.
 950 void MacroAssembler::load_addr_pcrelative(Register Rdst, address addrLocation) {
 951   address          pc             = this->pc();
 952   ptrdiff_t        total_distance = addrLocation - pc;
 953   RelocationHolder rspec          = internal_word_Relocation::spec(addrLocation);
 954 
 955   assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory");
 956 
 957   // Some extra safety net.
 958   if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
 959     guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "too far away");
 960   }
 961 
 962   (this)->relocate(rspec, relocInfo::pcrel_addr_format);
 963   z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance));
 964 }
 965 
 966 // Generic operation: load a value from memory and test.
 967 // CondCode indicates the sign (<0, ==0, >0) of the loaded value.
 968 void MacroAssembler::load_and_test_byte(Register dst, const Address &a) {
 969   z_lb(dst, a);
 970   z_ltr(dst, dst);
 971 }
 972 
 973 void MacroAssembler::load_and_test_short(Register dst, const Address &a) {
 974   int64_t disp = a.disp20();
 975   if (Displacement::is_shortDisp(disp)) {
 976     z_lh(dst, a);
 977   } else if (Displacement::is_longDisp(disp)) {
 978     z_lhy(dst, a);
 979   } else {
 980     guarantee(false, "displacement out of range");
 981   }
 982   z_ltr(dst, dst);
 983 }
 984 
 985 void MacroAssembler::load_and_test_int(Register dst, const Address &a) {
 986   z_lt(dst, a);
 987 }
 988 
 989 void MacroAssembler::load_and_test_int2long(Register dst, const Address &a) {
 990   z_ltgf(dst, a);
 991 }
 992 
 993 void MacroAssembler::load_and_test_long(Register dst, const Address &a) {
 994   z_ltg(dst, a);
 995 }
 996 
 997 // Test a bit in memory.
 998 void MacroAssembler::testbit(const Address &a, unsigned int bit) {
 999   assert(a.index() == noreg, "no index reg allowed in testbit");
1000   if (bit <= 7) {
1001     z_tm(a.disp() + 3, a.base(), 1 << bit);
1002   } else if (bit <= 15) {
1003     z_tm(a.disp() + 2, a.base(), 1 << (bit - 8));
1004   } else if (bit <= 23) {
1005     z_tm(a.disp() + 1, a.base(), 1 << (bit - 16));
1006   } else if (bit <= 31) {
1007     z_tm(a.disp() + 0, a.base(), 1 << (bit - 24));
1008   } else {
1009     ShouldNotReachHere();
1010   }
1011 }
1012 
1013 // Test a bit in a register. Result is reflected in CC.
1014 void MacroAssembler::testbit(Register r, unsigned int bitPos) {
1015   if (bitPos < 16) {
1016     z_tmll(r, 1U<<bitPos);
1017   } else if (bitPos < 32) {
1018     z_tmlh(r, 1U<<(bitPos-16));
1019   } else if (bitPos < 48) {
1020     z_tmhl(r, 1U<<(bitPos-32));
1021   } else if (bitPos < 64) {
1022     z_tmhh(r, 1U<<(bitPos-48));
1023   } else {
1024     ShouldNotReachHere();
1025   }
1026 }
1027 
1028 // Clear a register, i.e. load const zero into reg.
1029 // Return len (in bytes) of generated instruction(s).
1030 // whole_reg: Clear 64 bits if true, 32 bits otherwise.
1031 // set_cc:    Use instruction that sets the condition code, if true.
1032 int MacroAssembler::clear_reg(Register r, bool whole_reg, bool set_cc) {
1033   unsigned int start_off = offset();
1034   if (whole_reg) {
1035     set_cc ? z_xgr(r, r) : z_laz(r, 0, Z_R0);
1036   } else {  // Only 32bit register.
1037     set_cc ? z_xr(r, r) : z_lhi(r, 0);
1038   }
1039   return offset() - start_off;
1040 }
1041 
1042 #ifdef ASSERT
1043 int MacroAssembler::preset_reg(Register r, unsigned long pattern, int pattern_len) {
1044   switch (pattern_len) {
1045     case 1:
1046       pattern = (pattern & 0x000000ff)  | ((pattern & 0x000000ff)<<8);
1047     case 2:
1048       pattern = (pattern & 0x0000ffff)  | ((pattern & 0x0000ffff)<<16);
1049     case 4:
1050       pattern = (pattern & 0xffffffffL) | ((pattern & 0xffffffffL)<<32);
1051     case 8:
1052       return load_const_optimized_rtn_len(r, pattern, true);
1053       break;
1054     default:
1055       guarantee(false, "preset_reg: bad len");
1056   }
1057   return 0;
1058 }
1059 #endif
1060 
1061 // addr: Address descriptor of memory to clear index register will not be used !
1062 // size: Number of bytes to clear.
1063 //    !!! DO NOT USE THEM FOR ATOMIC MEMORY CLEARING !!!
1064 //    !!! Use store_const() instead                  !!!
1065 void MacroAssembler::clear_mem(const Address& addr, unsigned size) {
1066   guarantee(size <= 256, "MacroAssembler::clear_mem: size too large");
1067 
1068   if (size == 1) {
1069     z_mvi(addr, 0);
1070     return;
1071   }
1072 
1073   switch (size) {
1074     case 2: z_mvhhi(addr, 0);
1075       return;
1076     case 4: z_mvhi(addr, 0);
1077       return;
1078     case 8: z_mvghi(addr, 0);
1079       return;
1080     default: ; // Fallthru to xc.
1081   }
1082 
1083   z_xc(addr, size, addr);
1084 }
1085 
1086 void MacroAssembler::align(int modulus) {
1087   while (offset() % modulus != 0) z_nop();
1088 }
1089 
1090 // Special version for non-relocateable code if required alignment
1091 // is larger than CodeEntryAlignment.
1092 void MacroAssembler::align_address(int modulus) {
1093   while ((uintptr_t)pc() % modulus != 0) z_nop();
1094 }
1095 
1096 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
1097                                          Register temp_reg,
1098                                          int64_t extra_slot_offset) {
1099   // On Z, we can have index and disp in an Address. So don't call argument_offset,
1100   // which issues an unnecessary add instruction.
1101   int stackElementSize = Interpreter::stackElementSize;
1102   int64_t offset = extra_slot_offset * stackElementSize;
1103   const Register argbase = Z_esp;
1104   if (arg_slot.is_constant()) {
1105     offset += arg_slot.as_constant() * stackElementSize;
1106     return Address(argbase, offset);
1107   }
1108   // else
1109   assert(temp_reg != noreg, "must specify");
1110   assert(temp_reg != Z_ARG1, "base and index are conflicting");
1111   z_sllg(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize)); // tempreg = arg_slot << 3
1112   return Address(argbase, temp_reg, offset);
1113 }
1114 
1115 
1116 //===================================================================
1117 //===   START   C O N S T A N T S   I N   C O D E   S T R E A M   ===
1118 //===================================================================
1119 //===            P A T CH A B L E   C O N S T A N T S             ===
1120 //===================================================================
1121 
1122 
1123 //---------------------------------------------------
1124 //  Load (patchable) constant into register
1125 //---------------------------------------------------
1126 
1127 
1128 // Load absolute address (and try to optimize).
1129 //   Note: This method is usable only for position-fixed code,
1130 //         referring to a position-fixed target location.
1131 //         If not so, relocations and patching must be used.
1132 void MacroAssembler::load_absolute_address(Register d, address addr) {
1133   assert(addr != NULL, "should not happen");
1134   BLOCK_COMMENT("load_absolute_address:");
1135   if (addr == NULL) {
1136     z_larl(d, pc()); // Dummy emit for size calc.
1137     return;
1138   }
1139 
1140   if (RelAddr::is_in_range_of_RelAddr32(addr, pc())) {
1141     z_larl(d, addr);
1142     return;
1143   }
1144 
1145   load_const_optimized(d, (long)addr);
1146 }
1147 
1148 // Load a 64bit constant.
1149 // Patchable code sequence, but not atomically patchable.
1150 // Make sure to keep code size constant -> no value-dependent optimizations.
1151 // Do not kill condition code.
1152 void MacroAssembler::load_const(Register t, long x) {
1153   Assembler::z_iihf(t, (int)(x >> 32));
1154   Assembler::z_iilf(t, (int)(x & 0xffffffff));
1155 }
1156 
1157 // Load a 32bit constant into a 64bit register, sign-extend or zero-extend.
1158 // Patchable code sequence, but not atomically patchable.
1159 // Make sure to keep code size constant -> no value-dependent optimizations.
1160 // Do not kill condition code.
1161 void MacroAssembler::load_const_32to64(Register t, int64_t x, bool sign_extend) {
1162   if (sign_extend) { Assembler::z_lgfi(t, x); }
1163   else             { Assembler::z_llilf(t, x); }
1164 }
1165 
1166 // Load narrow oop constant, no decompression.
1167 void MacroAssembler::load_narrow_oop(Register t, narrowOop a) {
1168   assert(UseCompressedOops, "must be on to call this method");
1169   load_const_32to64(t, a, false /*sign_extend*/);
1170 }
1171 
1172 // Load narrow klass constant, compression required.
1173 void MacroAssembler::load_narrow_klass(Register t, Klass* k) {
1174   assert(UseCompressedClassPointers, "must be on to call this method");
1175   narrowKlass encoded_k = Klass::encode_klass(k);
1176   load_const_32to64(t, encoded_k, false /*sign_extend*/);
1177 }
1178 
1179 //------------------------------------------------------
1180 //  Compare (patchable) constant with register.
1181 //------------------------------------------------------
1182 
1183 // Compare narrow oop in reg with narrow oop constant, no decompression.
1184 void MacroAssembler::compare_immediate_narrow_oop(Register oop1, narrowOop oop2) {
1185   assert(UseCompressedOops, "must be on to call this method");
1186 
1187   Assembler::z_clfi(oop1, oop2);
1188 }
1189 
1190 // Compare narrow oop in reg with narrow oop constant, no decompression.
1191 void MacroAssembler::compare_immediate_narrow_klass(Register klass1, Klass* klass2) {
1192   assert(UseCompressedClassPointers, "must be on to call this method");
1193   narrowKlass encoded_k = Klass::encode_klass(klass2);
1194 
1195   Assembler::z_clfi(klass1, encoded_k);
1196 }
1197 
1198 //----------------------------------------------------------
1199 //  Check which kind of load_constant we have here.
1200 //----------------------------------------------------------
1201 
1202 // Detection of CPU version dependent load_const sequence.
1203 // The detection is valid only for code sequences generated by load_const,
1204 // not load_const_optimized.
1205 bool MacroAssembler::is_load_const(address a) {
1206   unsigned long inst1, inst2;
1207   unsigned int  len1,  len2;
1208 
1209   len1 = get_instruction(a, &inst1);
1210   len2 = get_instruction(a + len1, &inst2);
1211 
1212   return is_z_iihf(inst1) && is_z_iilf(inst2);
1213 }
1214 
1215 // Detection of CPU version dependent load_const_32to64 sequence.
1216 // Mostly used for narrow oops and narrow Klass pointers.
1217 // The detection is valid only for code sequences generated by load_const_32to64.
1218 bool MacroAssembler::is_load_const_32to64(address pos) {
1219   unsigned long inst1, inst2;
1220   unsigned int len1;
1221 
1222   len1 = get_instruction(pos, &inst1);
1223   return is_z_llilf(inst1);
1224 }
1225 
1226 // Detection of compare_immediate_narrow sequence.
1227 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop.
1228 bool MacroAssembler::is_compare_immediate32(address pos) {
1229   return is_equal(pos, CLFI_ZOPC, RIL_MASK);
1230 }
1231 
1232 // Detection of compare_immediate_narrow sequence.
1233 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop.
1234 bool MacroAssembler::is_compare_immediate_narrow_oop(address pos) {
1235   return is_compare_immediate32(pos);
1236   }
1237 
1238 // Detection of compare_immediate_narrow sequence.
1239 // The detection is valid only for code sequences generated by compare_immediate_narrow_klass.
1240 bool MacroAssembler::is_compare_immediate_narrow_klass(address pos) {
1241   return is_compare_immediate32(pos);
1242 }
1243 
1244 //-----------------------------------
1245 //  patch the load_constant
1246 //-----------------------------------
1247 
1248 // CPU-version dependend patching of load_const.
1249 void MacroAssembler::patch_const(address a, long x) {
1250   assert(is_load_const(a), "not a load of a constant");
1251   set_imm32((address)a, (int) ((x >> 32) & 0xffffffff));
1252   set_imm32((address)(a + 6), (int)(x & 0xffffffff));
1253 }
1254 
1255 // Patching the value of CPU version dependent load_const_32to64 sequence.
1256 // The passed ptr MUST be in compressed format!
1257 int MacroAssembler::patch_load_const_32to64(address pos, int64_t np) {
1258   assert(is_load_const_32to64(pos), "not a load of a narrow ptr (oop or klass)");
1259 
1260   set_imm32(pos, np);
1261   return 6;
1262 }
1263 
1264 // Patching the value of CPU version dependent compare_immediate_narrow sequence.
1265 // The passed ptr MUST be in compressed format!
1266 int MacroAssembler::patch_compare_immediate_32(address pos, int64_t np) {
1267   assert(is_compare_immediate32(pos), "not a compressed ptr compare");
1268 
1269   set_imm32(pos, np);
1270   return 6;
1271 }
1272 
1273 // Patching the immediate value of CPU version dependent load_narrow_oop sequence.
1274 // The passed ptr must NOT be in compressed format!
1275 int MacroAssembler::patch_load_narrow_oop(address pos, oop o) {
1276   assert(UseCompressedOops, "Can only patch compressed oops");
1277 
1278   narrowOop no = oopDesc::encode_heap_oop(o);
1279   return patch_load_const_32to64(pos, no);
1280 }
1281 
1282 // Patching the immediate value of CPU version dependent load_narrow_klass sequence.
1283 // The passed ptr must NOT be in compressed format!
1284 int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) {
1285   assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
1286 
1287   narrowKlass nk = Klass::encode_klass(k);
1288   return patch_load_const_32to64(pos, nk);
1289 }
1290 
1291 // Patching the immediate value of CPU version dependent compare_immediate_narrow_oop sequence.
1292 // The passed ptr must NOT be in compressed format!
1293 int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) {
1294   assert(UseCompressedOops, "Can only patch compressed oops");
1295 
1296   narrowOop no = oopDesc::encode_heap_oop(o);
1297   return patch_compare_immediate_32(pos, no);
1298 }
1299 
1300 // Patching the immediate value of CPU version dependent compare_immediate_narrow_klass sequence.
1301 // The passed ptr must NOT be in compressed format!
1302 int MacroAssembler::patch_compare_immediate_narrow_klass(address pos, Klass* k) {
1303   assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
1304 
1305   narrowKlass nk = Klass::encode_klass(k);
1306   return patch_compare_immediate_32(pos, nk);
1307 }
1308 
1309 //------------------------------------------------------------------------
1310 //  Extract the constant from a load_constant instruction stream.
1311 //------------------------------------------------------------------------
1312 
1313 // Get constant from a load_const sequence.
1314 long MacroAssembler::get_const(address a) {
1315   assert(is_load_const(a), "not a load of a constant");
1316   unsigned long x;
1317   x =  (((unsigned long) (get_imm32(a,0) & 0xffffffff)) << 32);
1318   x |= (((unsigned long) (get_imm32(a,1) & 0xffffffff)));
1319   return (long) x;
1320 }
1321 
1322 //--------------------------------------
1323 //  Store a constant in memory.
1324 //--------------------------------------
1325 
1326 // General emitter to move a constant to memory.
1327 // The store is atomic.
1328 //  o Address must be given in RS format (no index register)
1329 //  o Displacement should be 12bit unsigned for efficiency. 20bit signed also supported.
1330 //  o Constant can be 1, 2, 4, or 8 bytes, signed or unsigned.
1331 //  o Memory slot can be 1, 2, 4, or 8 bytes, signed or unsigned.
1332 //  o Memory slot must be at least as wide as constant, will assert otherwise.
1333 //  o Signed constants will sign-extend, unsigned constants will zero-extend to slot width.
1334 int MacroAssembler::store_const(const Address &dest, long imm,
1335                                 unsigned int lm, unsigned int lc,
1336                                 Register scratch) {
1337   int64_t  disp = dest.disp();
1338   Register base = dest.base();
1339   assert(!dest.has_index(), "not supported");
1340   assert((lm==1)||(lm==2)||(lm==4)||(lm==8), "memory   length not supported");
1341   assert((lc==1)||(lc==2)||(lc==4)||(lc==8), "constant length not supported");
1342   assert(lm>=lc, "memory slot too small");
1343   assert(lc==8 || Immediate::is_simm(imm, lc*8), "const out of range");
1344   assert(Displacement::is_validDisp(disp), "displacement out of range");
1345 
1346   bool is_shortDisp = Displacement::is_shortDisp(disp);
1347   int store_offset = -1;
1348 
1349   // For target len == 1 it's easy.
1350   if (lm == 1) {
1351     store_offset = offset();
1352     if (is_shortDisp) {
1353       z_mvi(disp, base, imm);
1354       return store_offset;
1355     } else {
1356       z_mviy(disp, base, imm);
1357       return store_offset;
1358     }
1359   }
1360 
1361   // All the "good stuff" takes an unsigned displacement.
1362   if (is_shortDisp) {
1363     // NOTE: Cannot use clear_mem for imm==0, because it is not atomic.
1364 
1365     store_offset = offset();
1366     switch (lm) {
1367       case 2:  // Lc == 1 handled correctly here, even for unsigned. Instruction does no widening.
1368         z_mvhhi(disp, base, imm);
1369         return store_offset;
1370       case 4:
1371         if (Immediate::is_simm16(imm)) {
1372           z_mvhi(disp, base, imm);
1373           return store_offset;
1374         }
1375         break;
1376       case 8:
1377         if (Immediate::is_simm16(imm)) {
1378           z_mvghi(disp, base, imm);
1379           return store_offset;
1380         }
1381         break;
1382       default:
1383         ShouldNotReachHere();
1384         break;
1385     }
1386   }
1387 
1388   //  Can't optimize, so load value and store it.
1389   guarantee(scratch != noreg, " need a scratch register here !");
1390   if (imm != 0) {
1391     load_const_optimized(scratch, imm);  // Preserves CC anyway.
1392   } else {
1393     // Leave CC alone!!
1394     (void) clear_reg(scratch, true, false); // Indicate unused result.
1395   }
1396 
1397   store_offset = offset();
1398   if (is_shortDisp) {
1399     switch (lm) {
1400       case 2:
1401         z_sth(scratch, disp, Z_R0, base);
1402         return store_offset;
1403       case 4:
1404         z_st(scratch, disp, Z_R0, base);
1405         return store_offset;
1406       case 8:
1407         z_stg(scratch, disp, Z_R0, base);
1408         return store_offset;
1409       default:
1410         ShouldNotReachHere();
1411         break;
1412     }
1413   } else {
1414     switch (lm) {
1415       case 2:
1416         z_sthy(scratch, disp, Z_R0, base);
1417         return store_offset;
1418       case 4:
1419         z_sty(scratch, disp, Z_R0, base);
1420         return store_offset;
1421       case 8:
1422         z_stg(scratch, disp, Z_R0, base);
1423         return store_offset;
1424       default:
1425         ShouldNotReachHere();
1426         break;
1427     }
1428   }
1429   return -1; // should not reach here
1430 }
1431 
1432 //===================================================================
1433 //===       N O T   P A T CH A B L E   C O N S T A N T S          ===
1434 //===================================================================
1435 
1436 // Load constant x into register t with a fast instrcution sequence
1437 // depending on the bits in x. Preserves CC under all circumstances.
1438 int MacroAssembler::load_const_optimized_rtn_len(Register t, long x, bool emit) {
1439   if (x == 0) {
1440     int len;
1441     if (emit) {
1442       len = clear_reg(t, true, false);
1443     } else {
1444       len = 4;
1445     }
1446     return len;
1447   }
1448 
1449   if (Immediate::is_simm16(x)) {
1450     if (emit) { z_lghi(t, x); }
1451     return 4;
1452   }
1453 
1454   // 64 bit value: | part1 | part2 | part3 | part4 |
1455   // At least one part is not zero!
1456   int part1 = ((x >> 32) & 0xffff0000) >> 16;
1457   int part2 = (x >> 32) & 0x0000ffff;
1458   int part3 = (x & 0xffff0000) >> 16;
1459   int part4 = (x & 0x0000ffff);
1460 
1461   // Lower word only (unsigned).
1462   if ((part1 == 0) && (part2 == 0)) {
1463     if (part3 == 0) {
1464       if (emit) z_llill(t, part4);
1465       return 4;
1466     }
1467     if (part4 == 0) {
1468       if (emit) z_llilh(t, part3);
1469       return 4;
1470     }
1471     if (emit) z_llilf(t, (int)(x & 0xffffffff));
1472     return 6;
1473   }
1474 
1475   // Upper word only.
1476   if ((part3 == 0) && (part4 == 0)) {
1477     if (part1 == 0) {
1478       if (emit) z_llihl(t, part2);
1479       return 4;
1480     }
1481     if (part2 == 0) {
1482       if (emit) z_llihh(t, part1);
1483       return 4;
1484     }
1485     if (emit) z_llihf(t, (int)(x >> 32));
1486     return 6;
1487   }
1488 
1489   // Lower word only (signed).
1490   if ((part1 == 0x0000ffff) && (part2 == 0x0000ffff) && ((part3 & 0x00008000) != 0)) {
1491     if (emit) z_lgfi(t, (int)(x & 0xffffffff));
1492     return 6;
1493   }
1494 
1495   int len = 0;
1496 
1497   if ((part1 == 0) || (part2 == 0)) {
1498     if (part1 == 0) {
1499       if (emit) z_llihl(t, part2);
1500       len += 4;
1501     } else {
1502       if (emit) z_llihh(t, part1);
1503       len += 4;
1504     }
1505   } else {
1506     if (emit) z_llihf(t, (int)(x >> 32));
1507     len += 6;
1508   }
1509 
1510   if ((part3 == 0) || (part4 == 0)) {
1511     if (part3 == 0) {
1512       if (emit) z_iill(t, part4);
1513       len += 4;
1514     } else {
1515       if (emit) z_iilh(t, part3);
1516       len += 4;
1517     }
1518   } else {
1519     if (emit) z_iilf(t, (int)(x & 0xffffffff));
1520     len += 6;
1521   }
1522   return len;
1523 }
1524 
1525 //=====================================================================
1526 //===     H I G H E R   L E V E L   B R A N C H   E M I T T E R S   ===
1527 //=====================================================================
1528 
1529 // Note: In the worst case, one of the scratch registers is destroyed!!!
1530 void MacroAssembler::compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1531   // Right operand is constant.
1532   if (x2.is_constant()) {
1533     jlong value = x2.as_constant();
1534     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/true);
1535     return;
1536   }
1537 
1538   // Right operand is in register.
1539   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/true);
1540 }
1541 
1542 // Note: In the worst case, one of the scratch registers is destroyed!!!
1543 void MacroAssembler::compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1544   // Right operand is constant.
1545   if (x2.is_constant()) {
1546     jlong value = x2.as_constant();
1547     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/false);
1548     return;
1549   }
1550 
1551   // Right operand is in register.
1552   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/false);
1553 }
1554 
1555 // Note: In the worst case, one of the scratch registers is destroyed!!!
1556 void MacroAssembler::compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1557   // Right operand is constant.
1558   if (x2.is_constant()) {
1559     jlong value = x2.as_constant();
1560     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/true);
1561     return;
1562   }
1563 
1564   // Right operand is in register.
1565   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/true);
1566 }
1567 
1568 void MacroAssembler::compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1569   // Right operand is constant.
1570   if (x2.is_constant()) {
1571     jlong value = x2.as_constant();
1572     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/false);
1573     return;
1574   }
1575 
1576   // Right operand is in register.
1577   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/false);
1578 }
1579 
1580 // Generate an optimal branch to the branch target.
1581 // Optimal means that a relative branch (brc or brcl) is used if the
1582 // branch distance is short enough. Loading the target address into a
1583 // register and branching via reg is used as fallback only.
1584 //
1585 // Used registers:
1586 //   Z_R1 - work reg. Holds branch target address.
1587 //          Used in fallback case only.
1588 //
1589 // This version of branch_optimized is good for cases where the target address is known
1590 // and constant, i.e. is never changed (no relocation, no patching).
1591 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, address branch_addr) {
1592   address branch_origin = pc();
1593 
1594   if (RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) {
1595     z_brc(cond, branch_addr);
1596   } else if (RelAddr::is_in_range_of_RelAddr32(branch_addr, branch_origin)) {
1597     z_brcl(cond, branch_addr);
1598   } else {
1599     load_const_optimized(Z_R1, branch_addr);  // CC must not get killed by load_const_optimized.
1600     z_bcr(cond, Z_R1);
1601   }
1602 }
1603 
1604 // This version of branch_optimized is good for cases where the target address
1605 // is potentially not yet known at the time the code is emitted.
1606 //
1607 // One very common case is a branch to an unbound label which is handled here.
1608 // The caller might know (or hope) that the branch distance is short enough
1609 // to be encoded in a 16bit relative address. In this case he will pass a
1610 // NearLabel branch_target.
1611 // Care must be taken with unbound labels. Each call to target(label) creates
1612 // an entry in the patch queue for that label to patch all references of the label
1613 // once it gets bound. Those recorded patch locations must be patchable. Otherwise,
1614 // an assertion fires at patch time.
1615 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& branch_target) {
1616   if (branch_target.is_bound()) {
1617     address branch_addr = target(branch_target);
1618     branch_optimized(cond, branch_addr);
1619   } else if (branch_target.is_near()) {
1620     z_brc(cond, branch_target);  // Caller assures that the target will be in range for z_brc.
1621   } else {
1622     z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time.
1623   }
1624 }
1625 
1626 // Generate an optimal compare and branch to the branch target.
1627 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the
1628 // branch distance is short enough. Loading the target address into a
1629 // register and branching via reg is used as fallback only.
1630 //
1631 // Input:
1632 //   r1 - left compare operand
1633 //   r2 - right compare operand
1634 void MacroAssembler::compare_and_branch_optimized(Register r1,
1635                                                   Register r2,
1636                                                   Assembler::branch_condition cond,
1637                                                   address  branch_addr,
1638                                                   bool     len64,
1639                                                   bool     has_sign) {
1640   unsigned int casenum = (len64?2:0)+(has_sign?0:1);
1641 
1642   address branch_origin = pc();
1643   if (VM_Version::has_CompareBranch() && RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) {
1644     switch (casenum) {
1645       case 0: z_crj( r1, r2, cond, branch_addr); break;
1646       case 1: z_clrj (r1, r2, cond, branch_addr); break;
1647       case 2: z_cgrj(r1, r2, cond, branch_addr); break;
1648       case 3: z_clgrj(r1, r2, cond, branch_addr); break;
1649       default: ShouldNotReachHere(); break;
1650     }
1651   } else {
1652     switch (casenum) {
1653       case 0: z_cr( r1, r2); break;
1654       case 1: z_clr(r1, r2); break;
1655       case 2: z_cgr(r1, r2); break;
1656       case 3: z_clgr(r1, r2); break;
1657       default: ShouldNotReachHere(); break;
1658     }
1659     branch_optimized(cond, branch_addr);
1660   }
1661 }
1662 
1663 // Generate an optimal compare and branch to the branch target.
1664 // Optimal means that a relative branch (clgij, brc or brcl) is used if the
1665 // branch distance is short enough. Loading the target address into a
1666 // register and branching via reg is used as fallback only.
1667 //
1668 // Input:
1669 //   r1 - left compare operand (in register)
1670 //   x2 - right compare operand (immediate)
1671 void MacroAssembler::compare_and_branch_optimized(Register r1,
1672                                                   jlong    x2,
1673                                                   Assembler::branch_condition cond,
1674                                                   Label&   branch_target,
1675                                                   bool     len64,
1676                                                   bool     has_sign) {
1677   address      branch_origin = pc();
1678   bool         x2_imm8       = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2));
1679   bool         is_RelAddr16  = branch_target.is_near() ||
1680                                (branch_target.is_bound() &&
1681                                 RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin));
1682   unsigned int casenum       = (len64?2:0)+(has_sign?0:1);
1683 
1684   if (VM_Version::has_CompareBranch() && is_RelAddr16 && x2_imm8) {
1685     switch (casenum) {
1686       case 0: z_cij( r1, x2, cond, branch_target); break;
1687       case 1: z_clij(r1, x2, cond, branch_target); break;
1688       case 2: z_cgij(r1, x2, cond, branch_target); break;
1689       case 3: z_clgij(r1, x2, cond, branch_target); break;
1690       default: ShouldNotReachHere(); break;
1691     }
1692     return;
1693   }
1694 
1695   if (x2 == 0) {
1696     switch (casenum) {
1697       case 0: z_ltr(r1, r1); break;
1698       case 1: z_ltr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication!
1699       case 2: z_ltgr(r1, r1); break;
1700       case 3: z_ltgr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication!
1701       default: ShouldNotReachHere(); break;
1702     }
1703   } else {
1704     if ((has_sign && Immediate::is_simm16(x2)) || (!has_sign && Immediate::is_uimm(x2, 15))) {
1705       switch (casenum) {
1706         case 0: z_chi(r1, x2); break;
1707         case 1: z_chi(r1, x2); break; // positive immediate < 2**15
1708         case 2: z_cghi(r1, x2); break;
1709         case 3: z_cghi(r1, x2); break; // positive immediate < 2**15
1710         default: break;
1711       }
1712     } else if ( (has_sign && Immediate::is_simm32(x2)) || (!has_sign && Immediate::is_uimm32(x2)) ) {
1713       switch (casenum) {
1714         case 0: z_cfi( r1, x2); break;
1715         case 1: z_clfi(r1, x2); break;
1716         case 2: z_cgfi(r1, x2); break;
1717         case 3: z_clgfi(r1, x2); break;
1718         default: ShouldNotReachHere(); break;
1719       }
1720     } else {
1721       // No instruction with immediate operand possible, so load into register.
1722       Register scratch = (r1 != Z_R0) ? Z_R0 : Z_R1;
1723       load_const_optimized(scratch, x2);
1724       switch (casenum) {
1725         case 0: z_cr( r1, scratch); break;
1726         case 1: z_clr(r1, scratch); break;
1727         case 2: z_cgr(r1, scratch); break;
1728         case 3: z_clgr(r1, scratch); break;
1729         default: ShouldNotReachHere(); break;
1730       }
1731     }
1732   }
1733   branch_optimized(cond, branch_target);
1734 }
1735 
1736 // Generate an optimal compare and branch to the branch target.
1737 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the
1738 // branch distance is short enough. Loading the target address into a
1739 // register and branching via reg is used as fallback only.
1740 //
1741 // Input:
1742 //   r1 - left compare operand
1743 //   r2 - right compare operand
1744 void MacroAssembler::compare_and_branch_optimized(Register r1,
1745                                                   Register r2,
1746                                                   Assembler::branch_condition cond,
1747                                                   Label&   branch_target,
1748                                                   bool     len64,
1749                                                   bool     has_sign) {
1750   unsigned int casenum = (len64 ? 2 : 0) + (has_sign ? 0 : 1);
1751 
1752   if (branch_target.is_bound()) {
1753     address branch_addr = target(branch_target);
1754     compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign);
1755   } else {
1756     if (VM_Version::has_CompareBranch() && branch_target.is_near()) {
1757       switch (casenum) {
1758         case 0: z_crj(  r1, r2, cond, branch_target); break;
1759         case 1: z_clrj( r1, r2, cond, branch_target); break;
1760         case 2: z_cgrj( r1, r2, cond, branch_target); break;
1761         case 3: z_clgrj(r1, r2, cond, branch_target); break;
1762         default: ShouldNotReachHere(); break;
1763       }
1764     } else {
1765       switch (casenum) {
1766         case 0: z_cr( r1, r2); break;
1767         case 1: z_clr(r1, r2); break;
1768         case 2: z_cgr(r1, r2); break;
1769         case 3: z_clgr(r1, r2); break;
1770         default: ShouldNotReachHere(); break;
1771       }
1772       branch_optimized(cond, branch_target);
1773     }
1774   }
1775 }
1776 
1777 //===========================================================================
1778 //===   END     H I G H E R   L E V E L   B R A N C H   E M I T T E R S   ===
1779 //===========================================================================
1780 
1781 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
1782   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1783   int index = oop_recorder()->allocate_metadata_index(obj);
1784   RelocationHolder rspec = metadata_Relocation::spec(index);
1785   return AddressLiteral((address)obj, rspec);
1786 }
1787 
1788 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
1789   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1790   int index = oop_recorder()->find_index(obj);
1791   RelocationHolder rspec = metadata_Relocation::spec(index);
1792   return AddressLiteral((address)obj, rspec);
1793 }
1794 
1795 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
1796   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1797   int oop_index = oop_recorder()->allocate_oop_index(obj);
1798   return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
1799 }
1800 
1801 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
1802   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1803   int oop_index = oop_recorder()->find_index(obj);
1804   return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
1805 }
1806 
1807 // NOTE: destroys r
1808 void MacroAssembler::c2bool(Register r, Register t) {
1809   z_lcr(t, r);   // t = -r
1810   z_or(r, t);    // r = -r OR r
1811   z_srl(r, 31);  // Yields 0 if r was 0, 1 otherwise.
1812 }
1813 
1814 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
1815                                                       Register tmp,
1816                                                       int offset) {
1817   intptr_t value = *delayed_value_addr;
1818   if (value != 0) {
1819     return RegisterOrConstant(value + offset);
1820   }
1821 
1822   BLOCK_COMMENT("delayed_value {");
1823   // Load indirectly to solve generation ordering problem.
1824   load_absolute_address(tmp, (address) delayed_value_addr); // tmp = a;
1825   z_lg(tmp, 0, tmp);                   // tmp = *tmp;
1826 
1827 #ifdef ASSERT
1828   NearLabel L;
1829   compare64_and_branch(tmp, (intptr_t)0L, Assembler::bcondNotEqual, L);
1830   z_illtrap();
1831   bind(L);
1832 #endif
1833 
1834   if (offset != 0) {
1835     z_agfi(tmp, offset);               // tmp = tmp + offset;
1836   }
1837 
1838   BLOCK_COMMENT("} delayed_value");
1839   return RegisterOrConstant(tmp);
1840 }
1841 
1842 // Patch instruction `inst' at offset `inst_pos' to refer to `dest_pos'
1843 // and return the resulting instruction.
1844 // Dest_pos and inst_pos are 32 bit only. These parms can only designate
1845 // relative positions.
1846 // Use correct argument types. Do not pre-calculate distance.
1847 unsigned long MacroAssembler::patched_branch(address dest_pos, unsigned long inst, address inst_pos) {
1848   int c = 0;
1849   unsigned long patched_inst = 0;
1850   if (is_call_pcrelative_short(inst) ||
1851       is_branch_pcrelative_short(inst) ||
1852       is_branchoncount_pcrelative_short(inst) ||
1853       is_branchonindex32_pcrelative_short(inst)) {
1854     c = 1;
1855     int m = fmask(15, 0);    // simm16(-1, 16, 32);
1856     int v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 32);
1857     patched_inst = (inst & ~m) | v;
1858   } else if (is_compareandbranch_pcrelative_short(inst)) {
1859     c = 2;
1860     long m = fmask(31, 16);  // simm16(-1, 16, 48);
1861     long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48);
1862     patched_inst = (inst & ~m) | v;
1863   } else if (is_branchonindex64_pcrelative_short(inst)) {
1864     c = 3;
1865     long m = fmask(31, 16);  // simm16(-1, 16, 48);
1866     long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48);
1867     patched_inst = (inst & ~m) | v;
1868   } else if (is_call_pcrelative_long(inst) || is_branch_pcrelative_long(inst)) {
1869     c = 4;
1870     long m = fmask(31, 0);  // simm32(-1, 16, 48);
1871     long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48);
1872     patched_inst = (inst & ~m) | v;
1873   } else if (is_pcrelative_long(inst)) { // These are the non-branch pc-relative instructions.
1874     c = 5;
1875     long m = fmask(31, 0);  // simm32(-1, 16, 48);
1876     long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48);
1877     patched_inst = (inst & ~m) | v;
1878   } else {
1879     print_dbg_msg(tty, inst, "not a relative branch", 0);
1880     dump_code_range(tty, inst_pos, 32, "not a pcrelative branch");
1881     ShouldNotReachHere();
1882   }
1883 
1884   long new_off = get_pcrel_offset(patched_inst);
1885   if (new_off != (dest_pos-inst_pos)) {
1886     tty->print_cr("case %d: dest_pos = %p, inst_pos = %p, disp = %ld(%12.12lx)", c, dest_pos, inst_pos, new_off, new_off);
1887     print_dbg_msg(tty, inst,         "<- original instruction: branch patching error", 0);
1888     print_dbg_msg(tty, patched_inst, "<- patched  instruction: branch patching error", 0);
1889 #ifdef LUCY_DBG
1890     VM_Version::z_SIGSEGV();
1891 #endif
1892     ShouldNotReachHere();
1893   }
1894   return patched_inst;
1895 }
1896 
1897 // Only called when binding labels (share/vm/asm/assembler.cpp)
1898 // Pass arguments as intended. Do not pre-calculate distance.
1899 void MacroAssembler::pd_patch_instruction(address branch, address target) {
1900   unsigned long stub_inst;
1901   int           inst_len = get_instruction(branch, &stub_inst);
1902 
1903   set_instruction(branch, patched_branch(target, stub_inst, branch), inst_len);
1904 }
1905 
1906 
1907 // Extract relative address (aka offset).
1908 // inv_simm16 works for 4-byte instructions only.
1909 // compare and branch instructions are 6-byte and have a 16bit offset "in the middle".
1910 long MacroAssembler::get_pcrel_offset(unsigned long inst) {
1911 
1912   if (MacroAssembler::is_pcrelative_short(inst)) {
1913     if (((inst&0xFFFFffff00000000UL) == 0) && ((inst&0x00000000FFFF0000UL) != 0)) {
1914       return RelAddr::inv_pcrel_off16(inv_simm16(inst));
1915     } else {
1916       return RelAddr::inv_pcrel_off16(inv_simm16_48(inst));
1917     }
1918   }
1919 
1920   if (MacroAssembler::is_pcrelative_long(inst)) {
1921     return RelAddr::inv_pcrel_off32(inv_simm32(inst));
1922   }
1923 
1924   print_dbg_msg(tty, inst, "not a pcrelative instruction", 6);
1925 #ifdef LUCY_DBG
1926   VM_Version::z_SIGSEGV();
1927 #else
1928   ShouldNotReachHere();
1929 #endif
1930   return -1;
1931 }
1932 
1933 long MacroAssembler::get_pcrel_offset(address pc) {
1934   unsigned long inst;
1935   unsigned int  len = get_instruction(pc, &inst);
1936 
1937 #ifdef ASSERT
1938   long offset;
1939   if (MacroAssembler::is_pcrelative_short(inst) || MacroAssembler::is_pcrelative_long(inst)) {
1940     offset = get_pcrel_offset(inst);
1941   } else {
1942     offset = -1;
1943   }
1944 
1945   if (offset == -1) {
1946     dump_code_range(tty, pc, 32, "not a pcrelative instruction");
1947 #ifdef LUCY_DBG
1948     VM_Version::z_SIGSEGV();
1949 #else
1950     ShouldNotReachHere();
1951 #endif
1952   }
1953   return offset;
1954 #else
1955   return get_pcrel_offset(inst);
1956 #endif // ASSERT
1957 }
1958 
1959 // Get target address from pc-relative instructions.
1960 address MacroAssembler::get_target_addr_pcrel(address pc) {
1961   assert(is_pcrelative_long(pc), "not a pcrelative instruction");
1962   return pc + get_pcrel_offset(pc);
1963 }
1964 
1965 // Patch pc relative load address.
1966 void MacroAssembler::patch_target_addr_pcrel(address pc, address con) {
1967   unsigned long inst;
1968   // Offset is +/- 2**32 -> use long.
1969   ptrdiff_t distance = con - pc;
1970 
1971   get_instruction(pc, &inst);
1972 
1973   if (is_pcrelative_short(inst)) {
1974     *(short *)(pc+2) = RelAddr::pcrel_off16(con, pc);  // Instructions are at least 2-byte aligned, no test required.
1975 
1976     // Some extra safety net.
1977     if (!RelAddr::is_in_range_of_RelAddr16(distance)) {
1978       print_dbg_msg(tty, inst, "distance out of range (16bit)", 4);
1979       dump_code_range(tty, pc, 32, "distance out of range (16bit)");
1980       guarantee(RelAddr::is_in_range_of_RelAddr16(distance), "too far away (more than +/- 2**16");
1981     }
1982     return;
1983   }
1984 
1985   if (is_pcrelative_long(inst)) {
1986     *(int *)(pc+2)   = RelAddr::pcrel_off32(con, pc);
1987 
1988     // Some Extra safety net.
1989     if (!RelAddr::is_in_range_of_RelAddr32(distance)) {
1990       print_dbg_msg(tty, inst, "distance out of range (32bit)", 6);
1991       dump_code_range(tty, pc, 32, "distance out of range (32bit)");
1992       guarantee(RelAddr::is_in_range_of_RelAddr32(distance), "too far away (more than +/- 2**32");
1993     }
1994     return;
1995   }
1996 
1997   guarantee(false, "not a pcrelative instruction to patch!");
1998 }
1999 
2000 // "Current PC" here means the address just behind the basr instruction.
2001 address MacroAssembler::get_PC(Register result) {
2002   z_basr(result, Z_R0); // Don't branch, just save next instruction address in result.
2003   return pc();
2004 }
2005 
2006 // Get current PC + offset.
2007 // Offset given in bytes, must be even!
2008 // "Current PC" here means the address of the larl instruction plus the given offset.
2009 address MacroAssembler::get_PC(Register result, int64_t offset) {
2010   address here = pc();
2011   z_larl(result, offset/2); // Save target instruction address in result.
2012   return here + offset;
2013 }
2014 
2015 // Resize_frame with SP(new) = SP(old) - [offset].
2016 void MacroAssembler::resize_frame_sub(Register offset, Register fp, bool load_fp)
2017 {
2018   assert_different_registers(offset, fp, Z_SP);
2019   if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); }
2020 
2021   z_sgr(Z_SP, offset);
2022   z_stg(fp, _z_abi(callers_sp), Z_SP);
2023 }
2024 
2025 // Resize_frame with SP(new) = [addr].
2026 void MacroAssembler::resize_frame_absolute(Register addr, Register fp, bool load_fp) {
2027   assert_different_registers(addr, fp, Z_SP);
2028   if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); }
2029 
2030   if (addr != Z_R0) {
2031     // Minimize stalls by not using Z_SP immediately after update.
2032     z_stg(fp, _z_abi(callers_sp), addr);
2033     z_lgr(Z_SP, addr);
2034   } else {
2035     z_lgr(Z_SP, addr);
2036     z_stg(fp, _z_abi(callers_sp), Z_SP);
2037   }
2038 }
2039 
2040 // Resize_frame with SP(new) = SP(old) + offset.
2041 void MacroAssembler::resize_frame(RegisterOrConstant offset, Register fp, bool load_fp) {
2042   assert_different_registers(fp, Z_SP);
2043   if (load_fp) z_lg(fp, _z_abi(callers_sp), Z_SP);
2044 
2045   if (Displacement::is_validDisp((int)_z_abi(callers_sp) + offset.constant_or_zero())) {
2046     // Minimize stalls by first using, then updating Z_SP.
2047     // Do that only if we have a small positive offset or if ExtImm are available.
2048     z_stg(fp, Address(Z_SP, offset, _z_abi(callers_sp)));
2049     add64(Z_SP, offset);
2050   } else {
2051     add64(Z_SP, offset);
2052     z_stg(fp, _z_abi(callers_sp), Z_SP);
2053   }
2054 }
2055 
2056 void MacroAssembler::push_frame(Register bytes, Register old_sp, bool copy_sp, bool bytes_with_inverted_sign) {
2057 #ifdef ASSERT
2058   assert_different_registers(bytes, old_sp, Z_SP);
2059   if (!copy_sp) {
2060     z_cgr(old_sp, Z_SP);
2061     asm_assert_eq("[old_sp]!=[Z_SP]", 0x211);
2062   }
2063 #endif
2064   if (copy_sp) { z_lgr(old_sp, Z_SP); }
2065   if (bytes_with_inverted_sign) {
2066     z_stg(old_sp, 0, bytes, Z_SP);
2067     add2reg_with_index(Z_SP, 0, bytes, Z_SP);
2068   } else {
2069     z_sgr(Z_SP, bytes); // Z_sgfr sufficient, but probably not faster.
2070     z_stg(old_sp, 0, Z_SP);
2071   }
2072 }
2073 
2074 unsigned int MacroAssembler::push_frame(unsigned int bytes, Register scratch) {
2075   long offset = Assembler::align(bytes, frame::alignment_in_bytes);
2076 
2077   if (Displacement::is_validDisp(-offset)) {
2078     // Minimize stalls by first using, then updating Z_SP.
2079     // Do that only if we have ExtImm available.
2080     z_stg(Z_SP, -offset, Z_SP);
2081     add2reg(Z_SP, -offset);
2082   } else {
2083     if (scratch != Z_R0 && scratch != Z_R1) {
2084       z_stg(Z_SP, -offset, Z_SP);
2085       add2reg(Z_SP, -offset);
2086     } else {   // scratch == Z_R0 || scratch == Z_R1
2087       z_lgr(scratch, Z_SP);
2088       add2reg(Z_SP, -offset);
2089       z_stg(scratch, 0, Z_SP);
2090     }
2091   }
2092   return offset;
2093 }
2094 
2095 // Push a frame of size `bytes' plus abi160 on top.
2096 unsigned int MacroAssembler::push_frame_abi160(unsigned int bytes) {
2097   BLOCK_COMMENT("push_frame_abi160 {");
2098   unsigned int res = push_frame(bytes + frame::z_abi_160_size);
2099   BLOCK_COMMENT("} push_frame_abi160");
2100   return res;
2101 }
2102 
2103 // Pop current C frame.
2104 void MacroAssembler::pop_frame() {
2105   BLOCK_COMMENT("pop_frame:");
2106   Assembler::z_lg(Z_SP, _z_abi(callers_sp), Z_SP);
2107 }
2108 
2109 void MacroAssembler::call_VM_leaf_base(address entry_point, bool allow_relocation) {
2110   if (allow_relocation) {
2111     call_c(entry_point);
2112   } else {
2113     call_c_static(entry_point);
2114   }
2115 }
2116 
2117 void MacroAssembler::call_VM_leaf_base(address entry_point) {
2118   bool allow_relocation = true;
2119   call_VM_leaf_base(entry_point, allow_relocation);
2120 }
2121 
2122 void MacroAssembler::call_VM_base(Register oop_result,
2123                                   Register last_java_sp,
2124                                   address  entry_point,
2125                                   bool     allow_relocation,
2126                                   bool     check_exceptions) { // Defaults to true.
2127   // Allow_relocation indicates, if true, that the generated code shall
2128   // be fit for code relocation or referenced data relocation. In other
2129   // words: all addresses must be considered variable. PC-relative addressing
2130   // is not possible then.
2131   // On the other hand, if (allow_relocation == false), addresses and offsets
2132   // may be considered stable, enabling us to take advantage of some PC-relative
2133   // addressing tweaks. These might improve performance and reduce code size.
2134 
2135   // Determine last_java_sp register.
2136   if (!last_java_sp->is_valid()) {
2137     last_java_sp = Z_SP;  // Load Z_SP as SP.
2138   }
2139 
2140   set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, Z_R1, allow_relocation);
2141 
2142   // ARG1 must hold thread address.
2143   z_lgr(Z_ARG1, Z_thread);
2144 
2145   address return_pc = NULL;
2146   if (allow_relocation) {
2147     return_pc = call_c(entry_point);
2148   } else {
2149     return_pc = call_c_static(entry_point);
2150   }
2151 
2152   reset_last_Java_frame(allow_relocation);
2153 
2154   // C++ interp handles this in the interpreter.
2155   check_and_handle_popframe(Z_thread);
2156   check_and_handle_earlyret(Z_thread);
2157 
2158   // Check for pending exceptions.
2159   if (check_exceptions) {
2160     // Check for pending exceptions (java_thread is set upon return).
2161     load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset()));
2162 
2163     // This used to conditionally jump to forward_exception however it is
2164     // possible if we relocate that the branch will not reach. So we must jump
2165     // around so we can always reach.
2166 
2167     Label ok;
2168     z_bre(ok); // Bcondequal is the same as bcondZero.
2169     call_stub(StubRoutines::forward_exception_entry());
2170     bind(ok);
2171   }
2172 
2173   // Get oop result if there is one and reset the value in the thread.
2174   if (oop_result->is_valid()) {
2175     get_vm_result(oop_result);
2176   }
2177 
2178   _last_calls_return_pc = return_pc;  // Wipe out other (error handling) calls.
2179 }
2180 
2181 void MacroAssembler::call_VM_base(Register oop_result,
2182                                   Register last_java_sp,
2183                                   address  entry_point,
2184                                   bool     check_exceptions) { // Defaults to true.
2185   bool allow_relocation = true;
2186   call_VM_base(oop_result, last_java_sp, entry_point, allow_relocation, check_exceptions);
2187 }
2188 
2189 // VM calls without explicit last_java_sp.
2190 
2191 void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
2192   // Call takes possible detour via InterpreterMacroAssembler.
2193   call_VM_base(oop_result, noreg, entry_point, true, check_exceptions);
2194 }
2195 
2196 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
2197   // Z_ARG1 is reserved for the thread.
2198   lgr_if_needed(Z_ARG2, arg_1);
2199   call_VM(oop_result, entry_point, check_exceptions);
2200 }
2201 
2202 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
2203   // Z_ARG1 is reserved for the thread.
2204   lgr_if_needed(Z_ARG2, arg_1);
2205   assert(arg_2 != Z_ARG2, "smashed argument");
2206   lgr_if_needed(Z_ARG3, arg_2);
2207   call_VM(oop_result, entry_point, check_exceptions);
2208 }
2209 
2210 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2,
2211                              Register arg_3, bool check_exceptions) {
2212   // Z_ARG1 is reserved for the thread.
2213   lgr_if_needed(Z_ARG2, arg_1);
2214   assert(arg_2 != Z_ARG2, "smashed argument");
2215   lgr_if_needed(Z_ARG3, arg_2);
2216   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2217   lgr_if_needed(Z_ARG4, arg_3);
2218   call_VM(oop_result, entry_point, check_exceptions);
2219 }
2220 
2221 // VM static calls without explicit last_java_sp.
2222 
2223 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, bool check_exceptions) {
2224   // Call takes possible detour via InterpreterMacroAssembler.
2225   call_VM_base(oop_result, noreg, entry_point, false, check_exceptions);
2226 }
2227 
2228 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2,
2229                                     Register arg_3, bool check_exceptions) {
2230   // Z_ARG1 is reserved for the thread.
2231   lgr_if_needed(Z_ARG2, arg_1);
2232   assert(arg_2 != Z_ARG2, "smashed argument");
2233   lgr_if_needed(Z_ARG3, arg_2);
2234   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2235   lgr_if_needed(Z_ARG4, arg_3);
2236   call_VM_static(oop_result, entry_point, check_exceptions);
2237 }
2238 
2239 // VM calls with explicit last_java_sp.
2240 
2241 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions) {
2242   // Call takes possible detour via InterpreterMacroAssembler.
2243   call_VM_base(oop_result, last_java_sp, entry_point, true, check_exceptions);
2244 }
2245 
2246 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
2247    // Z_ARG1 is reserved for the thread.
2248    lgr_if_needed(Z_ARG2, arg_1);
2249    call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2250 }
2251 
2252 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1,
2253                              Register arg_2, bool check_exceptions) {
2254    // Z_ARG1 is reserved for the thread.
2255    lgr_if_needed(Z_ARG2, arg_1);
2256    assert(arg_2 != Z_ARG2, "smashed argument");
2257    lgr_if_needed(Z_ARG3, arg_2);
2258    call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2259 }
2260 
2261 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1,
2262                              Register arg_2, Register arg_3, bool check_exceptions) {
2263   // Z_ARG1 is reserved for the thread.
2264   lgr_if_needed(Z_ARG2, arg_1);
2265   assert(arg_2 != Z_ARG2, "smashed argument");
2266   lgr_if_needed(Z_ARG3, arg_2);
2267   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2268   lgr_if_needed(Z_ARG4, arg_3);
2269   call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2270 }
2271 
2272 // VM leaf calls.
2273 
2274 void MacroAssembler::call_VM_leaf(address entry_point) {
2275   // Call takes possible detour via InterpreterMacroAssembler.
2276   call_VM_leaf_base(entry_point, true);
2277 }
2278 
2279 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {
2280   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2281   call_VM_leaf(entry_point);
2282 }
2283 
2284 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
2285   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2286   assert(arg_2 != Z_ARG1, "smashed argument");
2287   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2288   call_VM_leaf(entry_point);
2289 }
2290 
2291 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
2292   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2293   assert(arg_2 != Z_ARG1, "smashed argument");
2294   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2295   assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument");
2296   if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3);
2297   call_VM_leaf(entry_point);
2298 }
2299 
2300 // Static VM leaf calls.
2301 // Really static VM leaf calls are never patched.
2302 
2303 void MacroAssembler::call_VM_leaf_static(address entry_point) {
2304   // Call takes possible detour via InterpreterMacroAssembler.
2305   call_VM_leaf_base(entry_point, false);
2306 }
2307 
2308 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1) {
2309   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2310   call_VM_leaf_static(entry_point);
2311 }
2312 
2313 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2) {
2314   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2315   assert(arg_2 != Z_ARG1, "smashed argument");
2316   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2317   call_VM_leaf_static(entry_point);
2318 }
2319 
2320 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
2321   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2322   assert(arg_2 != Z_ARG1, "smashed argument");
2323   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2324   assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument");
2325   if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3);
2326   call_VM_leaf_static(entry_point);
2327 }
2328 
2329 // Don't use detour via call_c(reg).
2330 address MacroAssembler::call_c(address function_entry) {
2331   load_const(Z_R1, function_entry);
2332   return call(Z_R1);
2333 }
2334 
2335 // Variant for really static (non-relocatable) calls which are never patched.
2336 address MacroAssembler::call_c_static(address function_entry) {
2337   load_absolute_address(Z_R1, function_entry);
2338 #if 0 // def ASSERT
2339   // Verify that call site did not move.
2340   load_const_optimized(Z_R0, function_entry);
2341   z_cgr(Z_R1, Z_R0);
2342   z_brc(bcondEqual, 3);
2343   z_illtrap(0xba);
2344 #endif
2345   return call(Z_R1);
2346 }
2347 
2348 address MacroAssembler::call_c_opt(address function_entry) {
2349   bool success = call_far_patchable(function_entry, -2 /* emit relocation + constant */);
2350   _last_calls_return_pc = success ? pc() : NULL;
2351   return _last_calls_return_pc;
2352 }
2353 
2354 // Identify a call_far_patchable instruction: LARL + LG + BASR
2355 //
2356 //    nop                   ; optionally, if required for alignment
2357 //    lgrl rx,A(TOC entry)  ; PC-relative access into constant pool
2358 //    basr Z_R14,rx         ; end of this instruction must be aligned to a word boundary
2359 //
2360 // Code pattern will eventually get patched into variant2 (see below for detection code).
2361 //
2362 bool MacroAssembler::is_call_far_patchable_variant0_at(address instruction_addr) {
2363   address iaddr = instruction_addr;
2364 
2365   // Check for the actual load instruction.
2366   if (!is_load_const_from_toc(iaddr)) { return false; }
2367   iaddr += load_const_from_toc_size();
2368 
2369   // Check for the call (BASR) instruction, finally.
2370   assert(iaddr-instruction_addr+call_byregister_size() == call_far_patchable_size(), "size mismatch");
2371   return is_call_byregister(iaddr);
2372 }
2373 
2374 // Identify a call_far_patchable instruction: BRASL
2375 //
2376 // Code pattern to suits atomic patching:
2377 //    nop                       ; Optionally, if required for alignment.
2378 //    nop    ...                ; Multiple filler nops to compensate for size difference (variant0 is longer).
2379 //    nop                       ; For code pattern detection: Prepend each BRASL with a nop.
2380 //    brasl  Z_R14,<reladdr>    ; End of code must be 4-byte aligned !
2381 bool MacroAssembler::is_call_far_patchable_variant2_at(address instruction_addr) {
2382   const address call_addr = (address)((intptr_t)instruction_addr + call_far_patchable_size() - call_far_pcrelative_size());
2383 
2384   // Check for correct number of leading nops.
2385   address iaddr;
2386   for (iaddr = instruction_addr; iaddr < call_addr; iaddr += nop_size()) {
2387     if (!is_z_nop(iaddr)) { return false; }
2388   }
2389   assert(iaddr == call_addr, "sanity");
2390 
2391   // --> Check for call instruction.
2392   if (is_call_far_pcrelative(call_addr)) {
2393     assert(call_addr-instruction_addr+call_far_pcrelative_size() == call_far_patchable_size(), "size mismatch");
2394     return true;
2395   }
2396 
2397   return false;
2398 }
2399 
2400 // Emit a NOT mt-safely patchable 64 bit absolute call.
2401 // If toc_offset == -2, then the destination of the call (= target) is emitted
2402 //                      to the constant pool and a runtime_call relocation is added
2403 //                      to the code buffer.
2404 // If toc_offset != -2, target must already be in the constant pool at
2405 //                      _ctableStart+toc_offset (a caller can retrieve toc_offset
2406 //                      from the runtime_call relocation).
2407 // Special handling of emitting to scratch buffer when there is no constant pool.
2408 // Slightly changed code pattern. We emit an additional nop if we would
2409 // not end emitting at a word aligned address. This is to ensure
2410 // an atomically patchable displacement in brasl instructions.
2411 //
2412 // A call_far_patchable comes in different flavors:
2413 //  - LARL(CP) / LG(CP) / BR (address in constant pool, access via CP register)
2414 //  - LGRL(CP) / BR          (address in constant pool, pc-relative accesss)
2415 //  - BRASL                  (relative address of call target coded in instruction)
2416 // All flavors occupy the same amount of space. Length differences are compensated
2417 // by leading nops, such that the instruction sequence always ends at the same
2418 // byte offset. This is required to keep the return offset constant.
2419 // Furthermore, the return address (the end of the instruction sequence) is forced
2420 // to be on a 4-byte boundary. This is required for atomic patching, should we ever
2421 // need to patch the call target of the BRASL flavor.
2422 // RETURN value: false, if no constant pool entry could be allocated, true otherwise.
2423 bool MacroAssembler::call_far_patchable(address target, int64_t tocOffset) {
2424   // Get current pc and ensure word alignment for end of instr sequence.
2425   const address start_pc = pc();
2426   const intptr_t       start_off = offset();
2427   assert(!call_far_patchable_requires_alignment_nop(start_pc), "call_far_patchable requires aligned address");
2428   const ptrdiff_t      dist      = (ptrdiff_t)(target - (start_pc + 2)); // Prepend each BRASL with a nop.
2429   const bool emit_target_to_pool = (tocOffset == -2) && !code_section()->scratch_emit();
2430   const bool emit_relative_call  = !emit_target_to_pool &&
2431                                    RelAddr::is_in_range_of_RelAddr32(dist) &&
2432                                    ReoptimizeCallSequences &&
2433                                    !code_section()->scratch_emit();
2434 
2435   if (emit_relative_call) {
2436     // Add padding to get the same size as below.
2437     const unsigned int padding = call_far_patchable_size() - call_far_pcrelative_size();
2438     unsigned int current_padding;
2439     for (current_padding = 0; current_padding < padding; current_padding += nop_size()) { z_nop(); }
2440     assert(current_padding == padding, "sanity");
2441 
2442     // relative call: len = 2(nop) + 6 (brasl)
2443     // CodeBlob resize cannot occur in this case because
2444     // this call is emitted into pre-existing space.
2445     z_nop(); // Prepend each BRASL with a nop.
2446     z_brasl(Z_R14, target);
2447   } else {
2448     // absolute call: Get address from TOC.
2449     // len = (load TOC){6|0} + (load from TOC){6} + (basr){2} = {14|8}
2450     if (emit_target_to_pool) {
2451       // When emitting the call for the first time, we do not need to use
2452       // the pc-relative version. It will be patched anyway, when the code
2453       // buffer is copied.
2454       // Relocation is not needed when !ReoptimizeCallSequences.
2455       relocInfo::relocType rt = ReoptimizeCallSequences ? relocInfo::runtime_call_w_cp_type : relocInfo::none;
2456       AddressLiteral dest(target, rt);
2457       // Store_oop_in_toc() adds dest to the constant table. As side effect, this kills
2458       // inst_mark(). Reset if possible.
2459       bool reset_mark = (inst_mark() == pc());
2460       tocOffset = store_oop_in_toc(dest);
2461       if (reset_mark) { set_inst_mark(); }
2462       if (tocOffset == -1) {
2463         return false; // Couldn't create constant pool entry.
2464       }
2465     }
2466     assert(offset() == start_off, "emit no code before this point!");
2467 
2468     address tocPos = pc() + tocOffset;
2469     if (emit_target_to_pool) {
2470       tocPos = code()->consts()->start() + tocOffset;
2471     }
2472     load_long_pcrelative(Z_R14, tocPos);
2473     z_basr(Z_R14, Z_R14);
2474   }
2475 
2476 #ifdef ASSERT
2477   // Assert that we can identify the emitted call.
2478   assert(is_call_far_patchable_at(addr_at(start_off)), "can't identify emitted call");
2479   assert(offset() == start_off+call_far_patchable_size(), "wrong size");
2480 
2481   if (emit_target_to_pool) {
2482     assert(get_dest_of_call_far_patchable_at(addr_at(start_off), code()->consts()->start()) == target,
2483            "wrong encoding of dest address");
2484   }
2485 #endif
2486   return true; // success
2487 }
2488 
2489 // Identify a call_far_patchable instruction.
2490 // For more detailed information see header comment of call_far_patchable.
2491 bool MacroAssembler::is_call_far_patchable_at(address instruction_addr) {
2492   return is_call_far_patchable_variant2_at(instruction_addr)  || // short version: BRASL
2493          is_call_far_patchable_variant0_at(instruction_addr);    // long version LARL + LG + BASR
2494 }
2495 
2496 // Does the call_far_patchable instruction use a pc-relative encoding
2497 // of the call destination?
2498 bool MacroAssembler::is_call_far_patchable_pcrelative_at(address instruction_addr) {
2499   // Variant 2 is pc-relative.
2500   return is_call_far_patchable_variant2_at(instruction_addr);
2501 }
2502 
2503 bool MacroAssembler::is_call_far_pcrelative(address instruction_addr) {
2504   // Prepend each BRASL with a nop.
2505   return is_z_nop(instruction_addr) && is_z_brasl(instruction_addr + nop_size());  // Match at position after one nop required.
2506 }
2507 
2508 // Set destination address of a call_far_patchable instruction.
2509 void MacroAssembler::set_dest_of_call_far_patchable_at(address instruction_addr, address dest, int64_t tocOffset) {
2510   ResourceMark rm;
2511 
2512   // Now that CP entry is verified, patch call to a pc-relative call (if circumstances permit).
2513   int code_size = MacroAssembler::call_far_patchable_size();
2514   CodeBuffer buf(instruction_addr, code_size);
2515   MacroAssembler masm(&buf);
2516   masm.call_far_patchable(dest, tocOffset);
2517   ICache::invalidate_range(instruction_addr, code_size); // Empty on z.
2518 }
2519 
2520 // Get dest address of a call_far_patchable instruction.
2521 address MacroAssembler::get_dest_of_call_far_patchable_at(address instruction_addr, address ctable) {
2522   // Dynamic TOC: absolute address in constant pool.
2523   // Check variant2 first, it is more frequent.
2524 
2525   // Relative address encoded in call instruction.
2526   if (is_call_far_patchable_variant2_at(instruction_addr)) {
2527     return MacroAssembler::get_target_addr_pcrel(instruction_addr + nop_size()); // Prepend each BRASL with a nop.
2528 
2529   // Absolute address in constant pool.
2530   } else if (is_call_far_patchable_variant0_at(instruction_addr)) {
2531     address iaddr = instruction_addr;
2532 
2533     long    tocOffset = get_load_const_from_toc_offset(iaddr);
2534     address tocLoc    = iaddr + tocOffset;
2535     return *(address *)(tocLoc);
2536   } else {
2537     fprintf(stderr, "MacroAssembler::get_dest_of_call_far_patchable_at has a problem at %p:\n", instruction_addr);
2538     fprintf(stderr, "not a call_far_patchable: %16.16lx %16.16lx, len = %d\n",
2539             *(unsigned long*)instruction_addr,
2540             *(unsigned long*)(instruction_addr+8),
2541             call_far_patchable_size());
2542     Disassembler::decode(instruction_addr, instruction_addr+call_far_patchable_size());
2543     ShouldNotReachHere();
2544     return NULL;
2545   }
2546 }
2547 
2548 void MacroAssembler::align_call_far_patchable(address pc) {
2549   if (call_far_patchable_requires_alignment_nop(pc)) { z_nop(); }
2550 }
2551 
2552 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
2553 }
2554 
2555 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
2556 }
2557 
2558 // Read from the polling page.
2559 // Use TM or TMY instruction, depending on read offset.
2560 //   offset = 0: Use TM, safepoint polling.
2561 //   offset < 0: Use TMY, profiling safepoint polling.
2562 void MacroAssembler::load_from_polling_page(Register polling_page_address, int64_t offset) {
2563   if (Immediate::is_uimm12(offset)) {
2564     z_tm(offset, polling_page_address, mask_safepoint);
2565   } else {
2566     z_tmy(offset, polling_page_address, mask_profiling);
2567   }
2568 }
2569 
2570 // Check whether z_instruction is a read access to the polling page
2571 // which was emitted by load_from_polling_page(..).
2572 bool MacroAssembler::is_load_from_polling_page(address instr_loc) {
2573   unsigned long z_instruction;
2574   unsigned int  ilen = get_instruction(instr_loc, &z_instruction);
2575 
2576   if (ilen == 2) { return false; } // It's none of the allowed instructions.
2577 
2578   if (ilen == 4) {
2579     if (!is_z_tm(z_instruction)) { return false; } // It's len=4, but not a z_tm. fail.
2580 
2581     int ms = inv_mask(z_instruction,8,32);  // mask
2582     int ra = inv_reg(z_instruction,16,32);  // base register
2583     int ds = inv_uimm12(z_instruction);     // displacement
2584 
2585     if (!(ds == 0 && ra != 0 && ms == mask_safepoint)) {
2586       return false; // It's not a z_tm(0, ra, mask_safepoint). Fail.
2587     }
2588 
2589   } else { /* if (ilen == 6) */
2590 
2591     assert(!is_z_lg(z_instruction), "old form (LG) polling page access. Please fix and use TM(Y).");
2592 
2593     if (!is_z_tmy(z_instruction)) { return false; } // It's len=6, but not a z_tmy. fail.
2594 
2595     int ms = inv_mask(z_instruction,8,48);  // mask
2596     int ra = inv_reg(z_instruction,16,48);  // base register
2597     int ds = inv_simm20(z_instruction);     // displacement
2598   }
2599 
2600   return true;
2601 }
2602 
2603 // Extract poll address from instruction and ucontext.
2604 address MacroAssembler::get_poll_address(address instr_loc, void* ucontext) {
2605   assert(ucontext != NULL, "must have ucontext");
2606   ucontext_t* uc = (ucontext_t*) ucontext;
2607   unsigned long z_instruction;
2608   unsigned int ilen = get_instruction(instr_loc, &z_instruction);
2609 
2610   if (ilen == 4 && is_z_tm(z_instruction)) {
2611     int ra = inv_reg(z_instruction, 16, 32);  // base register
2612     int ds = inv_uimm12(z_instruction);       // displacement
2613     address addr = (address)uc->uc_mcontext.gregs[ra];
2614     return addr + ds;
2615   } else if (ilen == 6 && is_z_tmy(z_instruction)) {
2616     int ra = inv_reg(z_instruction, 16, 48);  // base register
2617     int ds = inv_simm20(z_instruction);       // displacement
2618     address addr = (address)uc->uc_mcontext.gregs[ra];
2619     return addr + ds;
2620   }
2621 
2622   ShouldNotReachHere();
2623   return NULL;
2624 }
2625 
2626 // Extract poll register from instruction.
2627 uint MacroAssembler::get_poll_register(address instr_loc) {
2628   unsigned long z_instruction;
2629   unsigned int ilen = get_instruction(instr_loc, &z_instruction);
2630 
2631   if (ilen == 4 && is_z_tm(z_instruction)) {
2632     return (uint)inv_reg(z_instruction, 16, 32);  // base register
2633   } else if (ilen == 6 && is_z_tmy(z_instruction)) {
2634     return (uint)inv_reg(z_instruction, 16, 48);  // base register
2635   }
2636 
2637   ShouldNotReachHere();
2638   return 0;
2639 }
2640 
2641 bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) {
2642   ShouldNotCallThis();
2643   return false;
2644 }
2645 
2646 // Write serialization page so VM thread can do a pseudo remote membar
2647 // We use the current thread pointer to calculate a thread specific
2648 // offset to write to within the page. This minimizes bus traffic
2649 // due to cache line collision.
2650 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
2651   assert_different_registers(tmp1, tmp2);
2652   z_sllg(tmp2, thread, os::get_serialize_page_shift_count());
2653   load_const_optimized(tmp1, (long) os::get_memory_serialize_page());
2654 
2655   int mask = os::get_serialize_page_mask();
2656   if (Immediate::is_uimm16(mask)) {
2657     z_nill(tmp2, mask);
2658     z_llghr(tmp2, tmp2);
2659   } else {
2660     z_nilf(tmp2, mask);
2661     z_llgfr(tmp2, tmp2);
2662   }
2663 
2664   z_release();
2665   z_st(Z_R0, 0, tmp2, tmp1);
2666 }
2667 
2668 // Don't rely on register locking, always use Z_R1 as scratch register instead.
2669 void MacroAssembler::bang_stack_with_offset(int offset) {
2670   // Stack grows down, caller passes positive offset.
2671   assert(offset > 0, "must bang with positive offset");
2672   if (Displacement::is_validDisp(-offset)) {
2673     z_tmy(-offset, Z_SP, mask_stackbang);
2674   } else {
2675     add2reg(Z_R1, -offset, Z_SP);    // Do not destroy Z_SP!!!
2676     z_tm(0, Z_R1, mask_stackbang);  // Just banging.
2677   }
2678 }
2679 
2680 void MacroAssembler::reserved_stack_check(Register return_pc) {
2681   // Test if reserved zone needs to be enabled.
2682   Label no_reserved_zone_enabling;
2683   assert(return_pc == Z_R14, "Return pc must be in R14 before z_br() to StackOverflow stub.");
2684   BLOCK_COMMENT("reserved_stack_check {");
2685 
2686   z_clg(Z_SP, Address(Z_thread, JavaThread::reserved_stack_activation_offset()));
2687   z_brl(no_reserved_zone_enabling);
2688 
2689   // Enable reserved zone again, throw stack overflow exception.
2690   save_return_pc();
2691   push_frame_abi160(0);
2692   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), Z_thread);
2693   pop_frame();
2694   restore_return_pc();
2695 
2696   load_const_optimized(Z_R1, StubRoutines::throw_delayed_StackOverflowError_entry());
2697   // Don't use call() or z_basr(), they will invalidate Z_R14 which contains the return pc.
2698   z_br(Z_R1);
2699 
2700   should_not_reach_here();
2701 
2702   bind(no_reserved_zone_enabling);
2703   BLOCK_COMMENT("} reserved_stack_check");
2704 }
2705 
2706 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
2707 void MacroAssembler::tlab_allocate(Register obj,
2708                                    Register var_size_in_bytes,
2709                                    int con_size_in_bytes,
2710                                    Register t1,
2711                                    Label& slow_case) {
2712   assert_different_registers(obj, var_size_in_bytes, t1);
2713   Register end = t1;
2714   Register thread = Z_thread;
2715 
2716   z_lg(obj, Address(thread, JavaThread::tlab_top_offset()));
2717   if (var_size_in_bytes == noreg) {
2718     z_lay(end, Address(obj, con_size_in_bytes));
2719   } else {
2720     z_lay(end, Address(obj, var_size_in_bytes));
2721   }
2722   z_cg(end, Address(thread, JavaThread::tlab_end_offset()));
2723   branch_optimized(bcondHigh, slow_case);
2724 
2725   // Update the tlab top pointer.
2726   z_stg(end, Address(thread, JavaThread::tlab_top_offset()));
2727 
2728   // Recover var_size_in_bytes if necessary.
2729   if (var_size_in_bytes == end) {
2730     z_sgr(var_size_in_bytes, obj);
2731   }
2732 }
2733 
2734 // Emitter for interface method lookup.
2735 //   input: recv_klass, intf_klass, itable_index
2736 //   output: method_result
2737 //   kills: itable_index, temp1_reg, Z_R0, Z_R1
2738 // TODO: Temp2_reg is unused. we may use this emitter also in the itable stubs.
2739 // If the register is still not needed then, remove it.
2740 void MacroAssembler::lookup_interface_method(Register           recv_klass,
2741                                              Register           intf_klass,
2742                                              RegisterOrConstant itable_index,
2743                                              Register           method_result,
2744                                              Register           temp1_reg,
2745                                              Register           temp2_reg,
2746                                              Label&             no_such_interface) {
2747 
2748   const Register vtable_len = temp1_reg;    // Used to compute itable_entry_addr.
2749   const Register itable_entry_addr = Z_R1_scratch;
2750   const Register itable_interface = Z_R0_scratch;
2751 
2752   BLOCK_COMMENT("lookup_interface_method {");
2753 
2754   // Load start of itable entries into itable_entry_addr.
2755   z_llgf(vtable_len, Address(recv_klass, Klass::vtable_length_offset()));
2756   z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
2757 
2758   // Loop over all itable entries until desired interfaceOop(Rinterface) found.
2759   const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
2760 
2761   add2reg_with_index(itable_entry_addr,
2762                      vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(),
2763                      recv_klass, vtable_len);
2764 
2765   const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize;
2766   Label     search;
2767 
2768   bind(search);
2769 
2770   // Handle IncompatibleClassChangeError.
2771   // If the entry is NULL then we've reached the end of the table
2772   // without finding the expected interface, so throw an exception.
2773   load_and_test_long(itable_interface, Address(itable_entry_addr));
2774   z_bre(no_such_interface);
2775 
2776   add2reg(itable_entry_addr, itable_offset_search_inc);
2777   z_cgr(itable_interface, intf_klass);
2778   z_brne(search);
2779 
2780   // Entry found and itable_entry_addr points to it, get offset of vtable for interface.
2781 
2782   const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() -
2783                                     itableOffsetEntry::interface_offset_in_bytes()) -
2784                                    itable_offset_search_inc;
2785 
2786   // Compute itableMethodEntry and get method and entry point
2787   // we use addressing with index and displacement, since the formula
2788   // for computing the entry's offset has a fixed and a dynamic part,
2789   // the latter depending on the matched interface entry and on the case,
2790   // that the itable index has been passed as a register, not a constant value.
2791   int method_offset = itableMethodEntry::method_offset_in_bytes();
2792                            // Fixed part (displacement), common operand.
2793   Register itable_offset;  // Dynamic part (index register).
2794 
2795   if (itable_index.is_register()) {
2796      // Compute the method's offset in that register, for the formula, see the
2797      // else-clause below.
2798      itable_offset = itable_index.as_register();
2799 
2800      z_sllg(itable_offset, itable_offset, exact_log2(itableMethodEntry::size() * wordSize));
2801      z_agf(itable_offset, vtable_offset_offset, itable_entry_addr);
2802   } else {
2803     itable_offset = Z_R1_scratch;
2804     // Displacement increases.
2805     method_offset += itableMethodEntry::size() * wordSize * itable_index.as_constant();
2806 
2807     // Load index from itable.
2808     z_llgf(itable_offset, vtable_offset_offset, itable_entry_addr);
2809   }
2810 
2811   // Finally load the method's oop.
2812   z_lg(method_result, method_offset, itable_offset, recv_klass);
2813   BLOCK_COMMENT("} lookup_interface_method");
2814 }
2815 
2816 // Lookup for virtual method invocation.
2817 void MacroAssembler::lookup_virtual_method(Register           recv_klass,
2818                                            RegisterOrConstant vtable_index,
2819                                            Register           method_result) {
2820   assert_different_registers(recv_klass, vtable_index.register_or_noreg());
2821   assert(vtableEntry::size() * wordSize == wordSize,
2822          "else adjust the scaling in the code below");
2823 
2824   BLOCK_COMMENT("lookup_virtual_method {");
2825 
2826   const int base = in_bytes(Klass::vtable_start_offset());
2827 
2828   if (vtable_index.is_constant()) {
2829     // Load with base + disp.
2830     Address vtable_entry_addr(recv_klass,
2831                               vtable_index.as_constant() * wordSize +
2832                               base +
2833                               vtableEntry::method_offset_in_bytes());
2834 
2835     z_lg(method_result, vtable_entry_addr);
2836   } else {
2837     // Shift index properly and load with base + index + disp.
2838     Register vindex = vtable_index.as_register();
2839     Address  vtable_entry_addr(recv_klass, vindex,
2840                                base + vtableEntry::method_offset_in_bytes());
2841 
2842     z_sllg(vindex, vindex, exact_log2(wordSize));
2843     z_lg(method_result, vtable_entry_addr);
2844   }
2845   BLOCK_COMMENT("} lookup_virtual_method");
2846 }
2847 
2848 // Factor out code to call ic_miss_handler.
2849 // Generate code to call the inline cache miss handler.
2850 //
2851 // In most cases, this code will be generated out-of-line.
2852 // The method parameters are intended to provide some variability.
2853 //   ICM          - Label which has to be bound to the start of useful code (past any traps).
2854 //   trapMarker   - Marking byte for the generated illtrap instructions (if any).
2855 //                  Any value except 0x00 is supported.
2856 //                  = 0x00 - do not generate illtrap instructions.
2857 //                         use nops to fill ununsed space.
2858 //   requiredSize - required size of the generated code. If the actually
2859 //                  generated code is smaller, use padding instructions to fill up.
2860 //                  = 0 - no size requirement, no padding.
2861 //   scratch      - scratch register to hold branch target address.
2862 //
2863 //  The method returns the code offset of the bound label.
2864 unsigned int MacroAssembler::call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch) {
2865   intptr_t startOffset = offset();
2866 
2867   // Prevent entry at content_begin().
2868   if (trapMarker != 0) {
2869     z_illtrap(trapMarker);
2870   }
2871 
2872   // Load address of inline cache miss code into scratch register
2873   // and branch to cache miss handler.
2874   BLOCK_COMMENT("IC miss handler {");
2875   BIND(ICM);
2876   unsigned int   labelOffset = offset();
2877   AddressLiteral icmiss(SharedRuntime::get_ic_miss_stub());
2878 
2879   load_const_optimized(scratch, icmiss);
2880   z_br(scratch);
2881 
2882   // Fill unused space.
2883   if (requiredSize > 0) {
2884     while ((offset() - startOffset) < requiredSize) {
2885       if (trapMarker == 0) {
2886         z_nop();
2887       } else {
2888         z_illtrap(trapMarker);
2889       }
2890     }
2891   }
2892   BLOCK_COMMENT("} IC miss handler");
2893   return labelOffset;
2894 }
2895 
2896 void MacroAssembler::nmethod_UEP(Label& ic_miss) {
2897   Register ic_reg       = as_Register(Matcher::inline_cache_reg_encode());
2898   int      klass_offset = oopDesc::klass_offset_in_bytes();
2899   if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) {
2900     if (VM_Version::has_CompareBranch()) {
2901       z_cgij(Z_ARG1, 0, Assembler::bcondEqual, ic_miss);
2902     } else {
2903       z_ltgr(Z_ARG1, Z_ARG1);
2904       z_bre(ic_miss);
2905     }
2906   }
2907   // Compare cached class against klass from receiver.
2908   compare_klass_ptr(ic_reg, klass_offset, Z_ARG1, false);
2909   z_brne(ic_miss);
2910 }
2911 
2912 void MacroAssembler::check_klass_subtype_fast_path(Register   sub_klass,
2913                                                    Register   super_klass,
2914                                                    Register   temp1_reg,
2915                                                    Label*     L_success,
2916                                                    Label*     L_failure,
2917                                                    Label*     L_slow_path,
2918                                                    RegisterOrConstant super_check_offset) {
2919 
2920   const int sc_offset  = in_bytes(Klass::secondary_super_cache_offset());
2921   const int sco_offset = in_bytes(Klass::super_check_offset_offset());
2922 
2923   bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
2924   bool need_slow_path = (must_load_sco ||
2925                          super_check_offset.constant_or_zero() == sc_offset);
2926 
2927   // Input registers must not overlap.
2928   assert_different_registers(sub_klass, super_klass, temp1_reg);
2929   if (super_check_offset.is_register()) {
2930     assert_different_registers(sub_klass, super_klass,
2931                                super_check_offset.as_register());
2932   } else if (must_load_sco) {
2933     assert(temp1_reg != noreg, "supply either a temp or a register offset");
2934   }
2935 
2936   const Register Rsuper_check_offset = temp1_reg;
2937 
2938   NearLabel L_fallthrough;
2939   int label_nulls = 0;
2940   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
2941   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
2942   if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
2943   assert(label_nulls <= 1 ||
2944          (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
2945          "at most one NULL in the batch, usually");
2946 
2947   BLOCK_COMMENT("check_klass_subtype_fast_path {");
2948   // If the pointers are equal, we are done (e.g., String[] elements).
2949   // This self-check enables sharing of secondary supertype arrays among
2950   // non-primary types such as array-of-interface. Otherwise, each such
2951   // type would need its own customized SSA.
2952   // We move this check to the front of the fast path because many
2953   // type checks are in fact trivially successful in this manner,
2954   // so we get a nicely predicted branch right at the start of the check.
2955   compare64_and_branch(sub_klass, super_klass, bcondEqual, *L_success);
2956 
2957   // Check the supertype display, which is uint.
2958   if (must_load_sco) {
2959     z_llgf(Rsuper_check_offset, sco_offset, super_klass);
2960     super_check_offset = RegisterOrConstant(Rsuper_check_offset);
2961   }
2962   Address super_check_addr(sub_klass, super_check_offset, 0);
2963   z_cg(super_klass, super_check_addr); // compare w/ displayed supertype
2964 
2965   // This check has worked decisively for primary supers.
2966   // Secondary supers are sought in the super_cache ('super_cache_addr').
2967   // (Secondary supers are interfaces and very deeply nested subtypes.)
2968   // This works in the same check above because of a tricky aliasing
2969   // between the super_cache and the primary super display elements.
2970   // (The 'super_check_addr' can address either, as the case requires.)
2971   // Note that the cache is updated below if it does not help us find
2972   // what we need immediately.
2973   // So if it was a primary super, we can just fail immediately.
2974   // Otherwise, it's the slow path for us (no success at this point).
2975 
2976   // Hacked jmp, which may only be used just before L_fallthrough.
2977 #define final_jmp(label)                                                \
2978   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
2979   else                            { branch_optimized(Assembler::bcondAlways, label); } /*omit semicolon*/
2980 
2981   if (super_check_offset.is_register()) {
2982     branch_optimized(Assembler::bcondEqual, *L_success);
2983     z_cfi(super_check_offset.as_register(), sc_offset);
2984     if (L_failure == &L_fallthrough) {
2985       branch_optimized(Assembler::bcondEqual, *L_slow_path);
2986     } else {
2987       branch_optimized(Assembler::bcondNotEqual, *L_failure);
2988       final_jmp(*L_slow_path);
2989     }
2990   } else if (super_check_offset.as_constant() == sc_offset) {
2991     // Need a slow path; fast failure is impossible.
2992     if (L_slow_path == &L_fallthrough) {
2993       branch_optimized(Assembler::bcondEqual, *L_success);
2994     } else {
2995       branch_optimized(Assembler::bcondNotEqual, *L_slow_path);
2996       final_jmp(*L_success);
2997     }
2998   } else {
2999     // No slow path; it's a fast decision.
3000     if (L_failure == &L_fallthrough) {
3001       branch_optimized(Assembler::bcondEqual, *L_success);
3002     } else {
3003       branch_optimized(Assembler::bcondNotEqual, *L_failure);
3004       final_jmp(*L_success);
3005     }
3006   }
3007 
3008   bind(L_fallthrough);
3009 #undef local_brc
3010 #undef final_jmp
3011   BLOCK_COMMENT("} check_klass_subtype_fast_path");
3012   // fallthru (to slow path)
3013 }
3014 
3015 void MacroAssembler::check_klass_subtype_slow_path(Register Rsubklass,
3016                                                    Register Rsuperklass,
3017                                                    Register Rarray_ptr,  // tmp
3018                                                    Register Rlength,     // tmp
3019                                                    Label* L_success,
3020                                                    Label* L_failure) {
3021   // Input registers must not overlap.
3022   // Also check for R1 which is explicitely used here.
3023   assert_different_registers(Z_R1, Rsubklass, Rsuperklass, Rarray_ptr, Rlength);
3024   NearLabel L_fallthrough, L_loop;
3025   int label_nulls = 0;
3026   if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
3027   if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
3028   assert(label_nulls <= 1, "at most one NULL in the batch");
3029 
3030   const int ss_offset = in_bytes(Klass::secondary_supers_offset());
3031   const int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
3032 
3033   const int length_offset = Array<Klass*>::length_offset_in_bytes();
3034   const int base_offset   = Array<Klass*>::base_offset_in_bytes();
3035 
3036   // Hacked jmp, which may only be used just before L_fallthrough.
3037 #define final_jmp(label)                                                \
3038   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
3039   else                            branch_optimized(Assembler::bcondAlways, label) /*omit semicolon*/
3040 
3041   NearLabel loop_iterate, loop_count, match;
3042 
3043   BLOCK_COMMENT("check_klass_subtype_slow_path {");
3044   z_lg(Rarray_ptr, ss_offset, Rsubklass);
3045 
3046   load_and_test_int(Rlength, Address(Rarray_ptr, length_offset));
3047   branch_optimized(Assembler::bcondZero, *L_failure);
3048 
3049   // Oops in table are NO MORE compressed.
3050   z_cg(Rsuperklass, base_offset, Rarray_ptr); // Check array element for match.
3051   z_bre(match);                               // Shortcut for array length = 1.
3052 
3053   // No match yet, so we must walk the array's elements.
3054   z_lngfr(Rlength, Rlength);
3055   z_sllg(Rlength, Rlength, LogBytesPerWord); // -#bytes of cache array
3056   z_llill(Z_R1, BytesPerWord);               // Set increment/end index.
3057   add2reg(Rlength, 2 * BytesPerWord);        // start index  = -(n-2)*BytesPerWord
3058   z_slgr(Rarray_ptr, Rlength);               // start addr: +=  (n-2)*BytesPerWord
3059   z_bru(loop_count);
3060 
3061   BIND(loop_iterate);
3062   z_cg(Rsuperklass, base_offset, Rlength, Rarray_ptr); // Check array element for match.
3063   z_bre(match);
3064   BIND(loop_count);
3065   z_brxlg(Rlength, Z_R1, loop_iterate);
3066 
3067   // Rsuperklass not found among secondary super classes -> failure.
3068   branch_optimized(Assembler::bcondAlways, *L_failure);
3069 
3070   // Got a hit. Return success (zero result). Set cache.
3071   // Cache load doesn't happen here. For speed it is directly emitted by the compiler.
3072 
3073   BIND(match);
3074 
3075   z_stg(Rsuperklass, sc_offset, Rsubklass); // Save result to cache.
3076 
3077   final_jmp(*L_success);
3078 
3079   // Exit to the surrounding code.
3080   BIND(L_fallthrough);
3081 #undef local_brc
3082 #undef final_jmp
3083   BLOCK_COMMENT("} check_klass_subtype_slow_path");
3084 }
3085 
3086 // Emitter for combining fast and slow path.
3087 void MacroAssembler::check_klass_subtype(Register sub_klass,
3088                                          Register super_klass,
3089                                          Register temp1_reg,
3090                                          Register temp2_reg,
3091                                          Label&   L_success) {
3092   NearLabel failure;
3093   BLOCK_COMMENT(err_msg("check_klass_subtype(%s subclass of %s) {", sub_klass->name(), super_klass->name()));
3094   check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg,
3095                                 &L_success, &failure, NULL);
3096   check_klass_subtype_slow_path(sub_klass, super_klass,
3097                                 temp1_reg, temp2_reg, &L_success, NULL);
3098   BIND(failure);
3099   BLOCK_COMMENT("} check_klass_subtype");
3100 }
3101 
3102 // Increment a counter at counter_address when the eq condition code is
3103 // set. Kills registers tmp1_reg and tmp2_reg and preserves the condition code.
3104 void MacroAssembler::increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg) {
3105   Label l;
3106   z_brne(l);
3107   load_const(tmp1_reg, counter_address);
3108   add2mem_32(Address(tmp1_reg), 1, tmp2_reg);
3109   z_cr(tmp1_reg, tmp1_reg); // Set cc to eq.
3110   bind(l);
3111 }
3112 
3113 // Semantics are dependent on the slow_case label:
3114 //   If the slow_case label is not NULL, failure to biased-lock the object
3115 //   transfers control to the location of the slow_case label. If the
3116 //   object could be biased-locked, control is transferred to the done label.
3117 //   The condition code is unpredictable.
3118 //
3119 //   If the slow_case label is NULL, failure to biased-lock the object results
3120 //   in a transfer of control to the done label with a condition code of not_equal.
3121 //   If the biased-lock could be successfully obtained, control is transfered to
3122 //   the done label with a condition code of equal.
3123 //   It is mandatory to react on the condition code At the done label.
3124 //
3125 void MacroAssembler::biased_locking_enter(Register  obj_reg,
3126                                           Register  mark_reg,
3127                                           Register  temp_reg,
3128                                           Register  temp2_reg,    // May be Z_RO!
3129                                           Label    &done,
3130                                           Label    *slow_case) {
3131   assert(UseBiasedLocking, "why call this otherwise?");
3132   assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg);
3133 
3134   Label cas_label; // Try, if implemented, CAS locking. Fall thru to slow path otherwise.
3135 
3136   BLOCK_COMMENT("biased_locking_enter {");
3137 
3138   // Biased locking
3139   // See whether the lock is currently biased toward our thread and
3140   // whether the epoch is still valid.
3141   // Note that the runtime guarantees sufficient alignment of JavaThread
3142   // pointers to allow age to be placed into low bits.
3143   assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits,
3144          "biased locking makes assumptions about bit layout");
3145   z_lr(temp_reg, mark_reg);
3146   z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place);
3147   z_chi(temp_reg, markOopDesc::biased_lock_pattern);
3148   z_brne(cas_label);  // Try cas if object is not biased, i.e. cannot be biased locked.
3149 
3150   load_prototype_header(temp_reg, obj_reg);
3151   load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
3152 
3153   z_ogr(temp_reg, Z_thread);
3154   z_xgr(temp_reg, mark_reg);
3155   z_ngr(temp_reg, temp2_reg);
3156   if (PrintBiasedLockingStatistics) {
3157     increment_counter_eq((address) BiasedLocking::biased_lock_entry_count_addr(), mark_reg, temp2_reg);
3158     // Restore mark_reg.
3159     z_lg(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
3160   }
3161   branch_optimized(Assembler::bcondEqual, done);  // Biased lock obtained, return success.
3162 
3163   Label try_revoke_bias;
3164   Label try_rebias;
3165   Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes());
3166 
3167   //----------------------------------------------------------------------------
3168   // At this point we know that the header has the bias pattern and
3169   // that we are not the bias owner in the current epoch. We need to
3170   // figure out more details about the state of the header in order to
3171   // know what operations can be legally performed on the object's
3172   // header.
3173 
3174   // If the low three bits in the xor result aren't clear, that means
3175   // the prototype header is no longer biased and we have to revoke
3176   // the bias on this object.
3177   z_tmll(temp_reg, markOopDesc::biased_lock_mask_in_place);
3178   z_brnaz(try_revoke_bias);
3179 
3180   // Biasing is still enabled for this data type. See whether the
3181   // epoch of the current bias is still valid, meaning that the epoch
3182   // bits of the mark word are equal to the epoch bits of the
3183   // prototype header. (Note that the prototype header's epoch bits
3184   // only change at a safepoint.) If not, attempt to rebias the object
3185   // toward the current thread. Note that we must be absolutely sure
3186   // that the current epoch is invalid in order to do this because
3187   // otherwise the manipulations it performs on the mark word are
3188   // illegal.
3189   z_tmll(temp_reg, markOopDesc::epoch_mask_in_place);
3190   z_brnaz(try_rebias);
3191 
3192   //----------------------------------------------------------------------------
3193   // The epoch of the current bias is still valid but we know nothing
3194   // about the owner; it might be set or it might be clear. Try to
3195   // acquire the bias of the object using an atomic operation. If this
3196   // fails we will go in to the runtime to revoke the object's bias.
3197   // Note that we first construct the presumed unbiased header so we
3198   // don't accidentally blow away another thread's valid bias.
3199   z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place |
3200          markOopDesc::epoch_mask_in_place);
3201   z_lgr(temp_reg, Z_thread);
3202   z_llgfr(mark_reg, mark_reg);
3203   z_ogr(temp_reg, mark_reg);
3204 
3205   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3206 
3207   z_csg(mark_reg, temp_reg, 0, obj_reg);
3208 
3209   // If the biasing toward our thread failed, this means that
3210   // another thread succeeded in biasing it toward itself and we
3211   // need to revoke that bias. The revocation will occur in the
3212   // interpreter runtime in the slow case.
3213 
3214   if (PrintBiasedLockingStatistics) {
3215     increment_counter_eq((address) BiasedLocking::anonymously_biased_lock_entry_count_addr(),
3216                          temp_reg, temp2_reg);
3217   }
3218   if (slow_case != NULL) {
3219     branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way.
3220   }
3221   branch_optimized(Assembler::bcondAlways, done);           // Biased lock status given in condition code.
3222 
3223   //----------------------------------------------------------------------------
3224   bind(try_rebias);
3225   // At this point we know the epoch has expired, meaning that the
3226   // current "bias owner", if any, is actually invalid. Under these
3227   // circumstances _only_, we are allowed to use the current header's
3228   // value as the comparison value when doing the cas to acquire the
3229   // bias in the current epoch. In other words, we allow transfer of
3230   // the bias from one thread to another directly in this situation.
3231 
3232   z_nilf(mark_reg, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
3233   load_prototype_header(temp_reg, obj_reg);
3234   z_llgfr(mark_reg, mark_reg);
3235 
3236   z_ogr(temp_reg, Z_thread);
3237 
3238   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3239 
3240   z_csg(mark_reg, temp_reg, 0, obj_reg);
3241 
3242   // If the biasing toward our thread failed, this means that
3243   // another thread succeeded in biasing it toward itself and we
3244   // need to revoke that bias. The revocation will occur in the
3245   // interpreter runtime in the slow case.
3246 
3247   if (PrintBiasedLockingStatistics) {
3248     increment_counter_eq((address) BiasedLocking::rebiased_lock_entry_count_addr(), temp_reg, temp2_reg);
3249   }
3250   if (slow_case != NULL) {
3251     branch_optimized(Assembler::bcondNotEqual, *slow_case);  // Biased lock not obtained, need to go the long way.
3252   }
3253   z_bru(done);           // Biased lock status given in condition code.
3254 
3255   //----------------------------------------------------------------------------
3256   bind(try_revoke_bias);
3257   // The prototype mark in the klass doesn't have the bias bit set any
3258   // more, indicating that objects of this data type are not supposed
3259   // to be biased any more. We are going to try to reset the mark of
3260   // this object to the prototype value and fall through to the
3261   // CAS-based locking scheme. Note that if our CAS fails, it means
3262   // that another thread raced us for the privilege of revoking the
3263   // bias of this particular object, so it's okay to continue in the
3264   // normal locking code.
3265   load_prototype_header(temp_reg, obj_reg);
3266 
3267   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3268 
3269   z_csg(mark_reg, temp_reg, 0, obj_reg);
3270 
3271   // Fall through to the normal CAS-based lock, because no matter what
3272   // the result of the above CAS, some thread must have succeeded in
3273   // removing the bias bit from the object's header.
3274   if (PrintBiasedLockingStatistics) {
3275     // z_cgr(mark_reg, temp2_reg);
3276     increment_counter_eq((address) BiasedLocking::revoked_lock_entry_count_addr(), temp_reg, temp2_reg);
3277   }
3278 
3279   bind(cas_label);
3280   BLOCK_COMMENT("} biased_locking_enter");
3281 }
3282 
3283 void MacroAssembler::biased_locking_exit(Register mark_addr, Register temp_reg, Label& done) {
3284   // Check for biased locking unlock case, which is a no-op
3285   // Note: we do not have to check the thread ID for two reasons.
3286   // First, the interpreter checks for IllegalMonitorStateException at
3287   // a higher level. Second, if the bias was revoked while we held the
3288   // lock, the object could not be rebiased toward another thread, so
3289   // the bias bit would be clear.
3290   BLOCK_COMMENT("biased_locking_exit {");
3291 
3292   z_lg(temp_reg, 0, mark_addr);
3293   z_nilf(temp_reg, markOopDesc::biased_lock_mask_in_place);
3294 
3295   z_chi(temp_reg, markOopDesc::biased_lock_pattern);
3296   z_bre(done);
3297   BLOCK_COMMENT("} biased_locking_exit");
3298 }
3299 
3300 void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) {
3301   Register displacedHeader = temp1;
3302   Register currentHeader = temp1;
3303   Register temp = temp2;
3304   NearLabel done, object_has_monitor;
3305 
3306   BLOCK_COMMENT("compiler_fast_lock_object {");
3307 
3308   // Load markOop from oop into mark.
3309   z_lg(displacedHeader, 0, oop);
3310 
3311   if (try_bias) {
3312     biased_locking_enter(oop, displacedHeader, temp, Z_R0, done);
3313   }
3314 
3315   // Handle existing monitor.
3316   if ((EmitSync & 0x01) == 0) {
3317     // The object has an existing monitor iff (mark & monitor_value) != 0.
3318     guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word");
3319     z_lr(temp, displacedHeader);
3320     z_nill(temp, markOopDesc::monitor_value);
3321     z_brne(object_has_monitor);
3322   }
3323 
3324   // Set mark to markOop | markOopDesc::unlocked_value.
3325   z_oill(displacedHeader, markOopDesc::unlocked_value);
3326 
3327   // Load Compare Value application register.
3328 
3329   // Initialize the box (must happen before we update the object mark).
3330   z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box);
3331 
3332   // Memory Fence (in cmpxchgd)
3333   // Compare object markOop with mark and if equal exchange scratch1 with object markOop.
3334 
3335   // If the compare-and-swap succeeded, then we found an unlocked object and we
3336   // have now locked it.
3337   z_csg(displacedHeader, box, 0, oop);
3338   assert(currentHeader==displacedHeader, "must be same register"); // Identified two registers from z/Architecture.
3339   z_bre(done);
3340 
3341   // We did not see an unlocked object so try the fast recursive case.
3342 
3343   z_sgr(currentHeader, Z_SP);
3344   load_const_optimized(temp, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
3345 
3346   z_ngr(currentHeader, temp);
3347   //   z_brne(done);
3348   //   z_release();
3349   z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box);
3350 
3351   z_bru(done);
3352 
3353   if ((EmitSync & 0x01) == 0) {
3354     Register zero = temp;
3355     Register monitor_tagged = displacedHeader; // Tagged with markOopDesc::monitor_value.
3356     bind(object_has_monitor);
3357     // The object's monitor m is unlocked iff m->owner == NULL,
3358     // otherwise m->owner may contain a thread or a stack address.
3359     //
3360     // Try to CAS m->owner from NULL to current thread.
3361     z_lghi(zero, 0);
3362     // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ.
3363     z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged);
3364     // Store a non-null value into the box.
3365     z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box);
3366 #ifdef ASSERT
3367       z_brne(done);
3368       // We've acquired the monitor, check some invariants.
3369       // Invariant 1: _recursions should be 0.
3370       asm_assert_mem8_is_zero(OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), monitor_tagged,
3371                               "monitor->_recursions should be 0", -1);
3372       z_ltgr(zero, zero); // Set CR=EQ.
3373 #endif
3374   }
3375   bind(done);
3376 
3377   BLOCK_COMMENT("} compiler_fast_lock_object");
3378   // If locking was successful, CR should indicate 'EQ'.
3379   // The compiler or the native wrapper generates a branch to the runtime call
3380   // _complete_monitor_locking_Java.
3381 }
3382 
3383 void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) {
3384   Register displacedHeader = temp1;
3385   Register currentHeader = temp2;
3386   Register temp = temp1;
3387   Register monitor = temp2;
3388 
3389   Label done, object_has_monitor;
3390 
3391   BLOCK_COMMENT("compiler_fast_unlock_object {");
3392 
3393   if (try_bias) {
3394     biased_locking_exit(oop, currentHeader, done);
3395   }
3396 
3397   // Find the lock address and load the displaced header from the stack.
3398   // if the displaced header is zero, we have a recursive unlock.
3399   load_and_test_long(displacedHeader, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3400   z_bre(done);
3401 
3402   // Handle existing monitor.
3403   if ((EmitSync & 0x02) == 0) {
3404     // The object has an existing monitor iff (mark & monitor_value) != 0.
3405     z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);
3406     guarantee(Immediate::is_uimm16(markOopDesc::monitor_value), "must be half-word");
3407     z_nill(currentHeader, markOopDesc::monitor_value);
3408     z_brne(object_has_monitor);
3409   }
3410 
3411   // Check if it is still a light weight lock, this is true if we see
3412   // the stack address of the basicLock in the markOop of the object
3413   // copy box to currentHeader such that csg does not kill it.
3414   z_lgr(currentHeader, box);
3415   z_csg(currentHeader, displacedHeader, 0, oop);
3416   z_bru(done); // Csg sets CR as desired.
3417 
3418   // Handle existing monitor.
3419   if ((EmitSync & 0x02) == 0) {
3420     bind(object_has_monitor);
3421     z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);    // CurrentHeader is tagged with monitor_value set.
3422     load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
3423     z_brne(done);
3424     load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
3425     z_brne(done);
3426     load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
3427     z_brne(done);
3428     load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
3429     z_brne(done);
3430     z_release();
3431     z_stg(temp/*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), currentHeader);
3432   }
3433 
3434   bind(done);
3435 
3436   BLOCK_COMMENT("} compiler_fast_unlock_object");
3437   // flag == EQ indicates success
3438   // flag == NE indicates failure
3439 }
3440 
3441 // Write to card table for modification at store_addr - register is destroyed afterwards.
3442 void MacroAssembler::card_write_barrier_post(Register store_addr, Register tmp) {
3443   CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
3444   assert(bs->kind() == BarrierSet::CardTableForRS ||
3445          bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
3446   assert_different_registers(store_addr, tmp);
3447   z_srlg(store_addr, store_addr, CardTableModRefBS::card_shift);
3448   load_absolute_address(tmp, (address)bs->byte_map_base);
3449   z_agr(store_addr, tmp);
3450   z_mvi(0, store_addr, 0); // Store byte 0.
3451 }
3452 
3453 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) {
3454   NearLabel Ldone;
3455   z_ltgr(tmp1, value);
3456   z_bre(Ldone);          // Use NULL result as-is.
3457 
3458   z_nill(value, ~JNIHandles::weak_tag_mask);
3459   z_lg(value, 0, value); // Resolve (untagged) jobject.
3460 
3461 #if INCLUDE_ALL_GCS
3462   if (UseG1GC) {
3463     NearLabel Lnot_weak;
3464     z_tmll(tmp1, JNIHandles::weak_tag_mask); // Test for jweak tag.
3465     z_braz(Lnot_weak);
3466     verify_oop(value);
3467     g1_write_barrier_pre(noreg /* obj */,
3468                          noreg /* offset */,
3469                          value /* pre_val */,
3470                          noreg /* val */,
3471                          tmp1  /* tmp1 */,
3472                          tmp2  /* tmp2 */,
3473                          true  /* pre_val_needed */);
3474     bind(Lnot_weak);
3475   }
3476 #endif // INCLUDE_ALL_GCS
3477   verify_oop(value);
3478   bind(Ldone);
3479 }
3480 
3481 #if INCLUDE_ALL_GCS
3482 
3483 //------------------------------------------------------
3484 // General G1 pre-barrier generator.
3485 // Purpose: record the previous value if it is not null.
3486 // All non-tmps are preserved.
3487 //------------------------------------------------------
3488 void MacroAssembler::g1_write_barrier_pre(Register           Robj,
3489                                           RegisterOrConstant offset,
3490                                           Register           Rpre_val,      // Ideally, this is a non-volatile register.
3491                                           Register           Rval,          // Will be preserved.
3492                                           Register           Rtmp1,         // If Rpre_val is volatile, either Rtmp1
3493                                           Register           Rtmp2,         // or Rtmp2 has to be non-volatile..
3494                                           bool               pre_val_needed // Save Rpre_val across runtime call, caller uses it.
3495                                        ) {
3496   Label callRuntime, filtered;
3497   const int active_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active());
3498   const int buffer_offset = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_buf());
3499   const int index_offset  = in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_index());
3500   assert_different_registers(Rtmp1, Rtmp2, Z_R0_scratch); // None of the Rtmp<i> must be Z_R0!!
3501 
3502   BLOCK_COMMENT("g1_write_barrier_pre {");
3503 
3504   // Is marking active?
3505   // Note: value is loaded for test purposes only. No further use here.
3506   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
3507     load_and_test_int(Rtmp1, Address(Z_thread, active_offset));
3508   } else {
3509     guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
3510     load_and_test_byte(Rtmp1, Address(Z_thread, active_offset));
3511   }
3512   z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently.
3513 
3514   // Do we need to load the previous value into Rpre_val?
3515   if (Robj != noreg) {
3516     // Load the previous value...
3517     Register ixReg = offset.is_register() ? offset.register_or_noreg() : Z_R0;
3518     if (UseCompressedOops) {
3519       z_llgf(Rpre_val, offset.constant_or_zero(), ixReg, Robj);
3520     } else {
3521       z_lg(Rpre_val, offset.constant_or_zero(), ixReg, Robj);
3522     }
3523   }
3524   assert(Rpre_val != noreg, "must have a real register");
3525 
3526   // Is the previous value NULL?
3527   // Note: pre_val is loaded, decompressed and stored (directly or via runtime call).
3528   //       Register contents is preserved across runtime call if caller requests to do so.
3529   z_ltgr(Rpre_val, Rpre_val);
3530   z_bre(filtered); // previous value is NULL, so we don't need to record it.
3531 
3532   // Decode the oop now. We know it's not NULL.
3533   if (Robj != noreg && UseCompressedOops) {
3534     oop_decoder(Rpre_val, Rpre_val, /*maybeNULL=*/false);
3535   }
3536 
3537   // OK, it's not filtered, so we'll need to call enqueue.
3538 
3539   // We can store the original value in the thread's buffer
3540   // only if index > 0. Otherwise, we need runtime to handle.
3541   // (The index field is typed as size_t.)
3542   Register Rbuffer = Rtmp1, Rindex = Rtmp2;
3543 
3544   z_lg(Rbuffer, buffer_offset, Z_thread);
3545 
3546   load_and_test_long(Rindex, Address(Z_thread, index_offset));
3547   z_bre(callRuntime); // If index == 0, goto runtime.
3548 
3549   add2reg(Rindex, -wordSize); // Decrement index.
3550   z_stg(Rindex, index_offset, Z_thread);
3551 
3552   // Record the previous value.
3553   z_stg(Rpre_val, 0, Rbuffer, Rindex);
3554   z_bru(filtered);  // We are done.
3555 
3556   Rbuffer = noreg;  // end of life
3557   Rindex  = noreg;  // end of life
3558 
3559   bind(callRuntime);
3560 
3561   // Save Rpre_val (result) over runtime call.
3562   // Requires Rtmp1, Rtmp2, or Rpre_val to be non-volatile.
3563   Register Rpre_save = Rpre_val;
3564   if (pre_val_needed && Rpre_val->is_volatile()) {
3565     guarantee(!Rtmp1->is_volatile() || !Rtmp2->is_volatile(), "oops!");
3566     Rpre_save = !Rtmp1->is_volatile() ? Rtmp1 : Rtmp2;
3567   }
3568   lgr_if_needed(Rpre_save, Rpre_val);
3569 
3570   // Preserve inputs by spilling them into the top frame.
3571   if (Robj != noreg && Robj->is_volatile()) {
3572     z_stg(Robj, Robj->encoding()*BytesPerWord, Z_SP);
3573   }
3574   if (offset.is_register() && offset.as_register()->is_volatile()) {
3575     Register Roff = offset.as_register();
3576     z_stg(Roff, Roff->encoding()*BytesPerWord, Z_SP);
3577   }
3578   if (Rval != noreg && Rval->is_volatile()) {
3579     z_stg(Rval, Rval->encoding()*BytesPerWord, Z_SP);
3580   }
3581 
3582   // Push frame to protect top frame with return pc and spilled register values.
3583   save_return_pc();
3584   push_frame_abi160(0); // Will use Z_R0 as tmp on old CPUs.
3585 
3586   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_val, Z_thread);
3587 
3588   pop_frame();
3589   restore_return_pc();
3590 
3591   // Restore spilled values.
3592   if (Robj != noreg && Robj->is_volatile()) {
3593     z_lg(Robj, Robj->encoding()*BytesPerWord, Z_SP);
3594   }
3595   if (offset.is_register() && offset.as_register()->is_volatile()) {
3596     Register Roff = offset.as_register();
3597     z_lg(Roff, Roff->encoding()*BytesPerWord, Z_SP);
3598   }
3599   if (Rval != noreg && Rval->is_volatile()) {
3600     z_lg(Rval, Rval->encoding()*BytesPerWord, Z_SP);
3601   }
3602 
3603   // Restore Rpre_val (result) after runtime call.
3604   lgr_if_needed(Rpre_val, Rpre_save);
3605 
3606   bind(filtered);
3607   BLOCK_COMMENT("} g1_write_barrier_pre");
3608 }
3609 
3610 // General G1 post-barrier generator.
3611 // Purpose: Store cross-region card.
3612 void MacroAssembler::g1_write_barrier_post(Register Rstore_addr,
3613                                            Register Rnew_val,
3614                                            Register Rtmp1,
3615                                            Register Rtmp2,
3616                                            Register Rtmp3) {
3617   Label callRuntime, filtered;
3618 
3619   assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); // Most probably, Rnew_val == Rtmp3.
3620 
3621   G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
3622   assert(bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
3623 
3624   BLOCK_COMMENT("g1_write_barrier_post {");
3625 
3626   // Does store cross heap regions?
3627   // It does if the two addresses specify different grain addresses.
3628   if (G1RSBarrierRegionFilter) {
3629     if (VM_Version::has_DistinctOpnds()) {
3630       z_xgrk(Rtmp1, Rstore_addr, Rnew_val);
3631     } else {
3632       z_lgr(Rtmp1, Rstore_addr);
3633       z_xgr(Rtmp1, Rnew_val);
3634     }
3635     z_srag(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes);
3636     z_bre(filtered);
3637   }
3638 
3639   // Crosses regions, storing NULL?
3640 #ifdef ASSERT
3641   z_ltgr(Rnew_val, Rnew_val);
3642   asm_assert_ne("null oop not allowed (G1)", 0x255); // TODO: also on z? Checked by caller on PPC64, so following branch is obsolete:
3643   z_bre(filtered);  // Safety net: don't break if we have a NULL oop.
3644 #endif
3645   Rnew_val = noreg; // end of lifetime
3646 
3647   // Storing region crossing non-NULL, is card already dirty?
3648   assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code");
3649   assert_different_registers(Rtmp1, Rtmp2, Rtmp3);
3650   // Make sure not to use Z_R0 for any of these registers.
3651   Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3;
3652   Register Rbase      = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp3;
3653 
3654   // calculate address of card
3655   load_const_optimized(Rbase, (address)bs->byte_map_base);        // Card table base.
3656   z_srlg(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift); // Index into card table.
3657   add2reg_with_index(Rcard_addr, 0, Rcard_addr, Rbase);           // Explicit calculation needed for cli.
3658   Rbase = noreg; // end of lifetime
3659 
3660   // Filter young.
3661   assert((unsigned int)G1SATBCardTableModRefBS::g1_young_card_val() <= 255, "otherwise check this code");
3662   z_cli(0, Rcard_addr, (int)G1SATBCardTableModRefBS::g1_young_card_val());
3663   z_bre(filtered);
3664 
3665   // Check the card value. If dirty, we're done.
3666   // This also avoids false sharing of the (already dirty) card.
3667   z_sync(); // Required to support concurrent cleaning.
3668   assert((unsigned int)CardTableModRefBS::dirty_card_val() <= 255, "otherwise check this code");
3669   z_cli(0, Rcard_addr, CardTableModRefBS::dirty_card_val()); // Reload after membar.
3670   z_bre(filtered);
3671 
3672   // Storing a region crossing, non-NULL oop, card is clean.
3673   // Dirty card and log.
3674   z_mvi(0, Rcard_addr, CardTableModRefBS::dirty_card_val());
3675 
3676   Register Rcard_addr_x = Rcard_addr;
3677   Register Rqueue_index = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp1;
3678   Register Rqueue_buf   = (Rtmp3 != Z_R0_scratch) ? Rtmp3 : Rtmp1;
3679   const int qidx_off    = in_bytes(JavaThread::dirty_card_queue_offset() + SATBMarkQueue::byte_offset_of_index());
3680   const int qbuf_off    = in_bytes(JavaThread::dirty_card_queue_offset() + SATBMarkQueue::byte_offset_of_buf());
3681   if ((Rcard_addr == Rqueue_buf) || (Rcard_addr == Rqueue_index)) {
3682     Rcard_addr_x = Z_R0_scratch;  // Register shortage. We have to use Z_R0.
3683   }
3684   lgr_if_needed(Rcard_addr_x, Rcard_addr);
3685 
3686   load_and_test_long(Rqueue_index, Address(Z_thread, qidx_off));
3687   z_bre(callRuntime); // Index == 0 then jump to runtime.
3688 
3689   z_lg(Rqueue_buf, qbuf_off, Z_thread);
3690 
3691   add2reg(Rqueue_index, -wordSize); // Decrement index.
3692   z_stg(Rqueue_index, qidx_off, Z_thread);
3693 
3694   z_stg(Rcard_addr_x, 0, Rqueue_index, Rqueue_buf); // Store card.
3695   z_bru(filtered);
3696 
3697   bind(callRuntime);
3698 
3699   // TODO: do we need a frame? Introduced to be on the safe side.
3700   bool needs_frame = true;
3701 
3702   // VM call need frame to access(write) O register.
3703   if (needs_frame) {
3704     save_return_pc();
3705     push_frame_abi160(0); // Will use Z_R0 as tmp on old CPUs.
3706   }
3707 
3708   // Save the live input values.
3709   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr_x, Z_thread);
3710 
3711   if (needs_frame) {
3712     pop_frame();
3713     restore_return_pc();
3714   }
3715 
3716   bind(filtered);
3717 
3718   BLOCK_COMMENT("} g1_write_barrier_post");
3719 }
3720 #endif // INCLUDE_ALL_GCS
3721 
3722 // Last_Java_sp must comply to the rules in frame_s390.hpp.
3723 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation) {
3724   BLOCK_COMMENT("set_last_Java_frame {");
3725 
3726   // Always set last_Java_pc and flags first because once last_Java_sp
3727   // is visible has_last_Java_frame is true and users will look at the
3728   // rest of the fields. (Note: flags should always be zero before we
3729   // get here so doesn't need to be set.)
3730 
3731   // Verify that last_Java_pc was zeroed on return to Java.
3732   if (allow_relocation) {
3733     asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()),
3734                             Z_thread,
3735                             "last_Java_pc not zeroed before leaving Java",
3736                             0x200);
3737   } else {
3738     asm_assert_mem8_is_zero_static(in_bytes(JavaThread::last_Java_pc_offset()),
3739                                    Z_thread,
3740                                    "last_Java_pc not zeroed before leaving Java",
3741                                    0x200);
3742   }
3743 
3744   // When returning from calling out from Java mode the frame anchor's
3745   // last_Java_pc will always be set to NULL. It is set here so that
3746   // if we are doing a call to native (not VM) that we capture the
3747   // known pc and don't have to rely on the native call having a
3748   // standard frame linkage where we can find the pc.
3749   if (last_Java_pc!=noreg) {
3750     z_stg(last_Java_pc, Address(Z_thread, JavaThread::last_Java_pc_offset()));
3751   }
3752 
3753   // This membar release is not required on z/Architecture, since the sequence of stores
3754   // in maintained. Nevertheless, we leave it in to document the required ordering.
3755   // The implementation of z_release() should be empty.
3756   // z_release();
3757 
3758   z_stg(last_Java_sp, Address(Z_thread, JavaThread::last_Java_sp_offset()));
3759   BLOCK_COMMENT("} set_last_Java_frame");
3760 }
3761 
3762 void MacroAssembler::reset_last_Java_frame(bool allow_relocation) {
3763   BLOCK_COMMENT("reset_last_Java_frame {");
3764 
3765   if (allow_relocation) {
3766     asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()),
3767                                Z_thread,
3768                                "SP was not set, still zero",
3769                                0x202);
3770   } else {
3771     asm_assert_mem8_isnot_zero_static(in_bytes(JavaThread::last_Java_sp_offset()),
3772                                       Z_thread,
3773                                       "SP was not set, still zero",
3774                                       0x202);
3775   }
3776 
3777   // _last_Java_sp = 0
3778   // Clearing storage must be atomic here, so don't use clear_mem()!
3779   store_const(Address(Z_thread, JavaThread::last_Java_sp_offset()), 0);
3780 
3781   // _last_Java_pc = 0
3782   store_const(Address(Z_thread, JavaThread::last_Java_pc_offset()), 0);
3783 
3784   BLOCK_COMMENT("} reset_last_Java_frame");
3785   return;
3786 }
3787 
3788 void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation) {
3789   assert_different_registers(sp, tmp1);
3790 
3791   // We cannot trust that code generated by the C++ compiler saves R14
3792   // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at
3793   // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()).
3794   // Therefore we load the PC into tmp1 and let set_last_Java_frame() save
3795   // it into the frame anchor.
3796   get_PC(tmp1);
3797   set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1, allow_relocation);
3798 }
3799 
3800 void MacroAssembler::set_thread_state(JavaThreadState new_state) {
3801   z_release();
3802 
3803   assert(Immediate::is_uimm16(_thread_max_state), "enum value out of range for instruction");
3804   assert(sizeof(JavaThreadState) == sizeof(int), "enum value must have base type int");
3805   store_const(Address(Z_thread, JavaThread::thread_state_offset()), new_state, Z_R0, false);
3806 }
3807 
3808 void MacroAssembler::get_vm_result(Register oop_result) {
3809   verify_thread();
3810 
3811   z_lg(oop_result, Address(Z_thread, JavaThread::vm_result_offset()));
3812   clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(void*));
3813 
3814   verify_oop(oop_result);
3815 }
3816 
3817 void MacroAssembler::get_vm_result_2(Register result) {
3818   verify_thread();
3819 
3820   z_lg(result, Address(Z_thread, JavaThread::vm_result_2_offset()));
3821   clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(void*));
3822 }
3823 
3824 // We require that C code which does not return a value in vm_result will
3825 // leave it undisturbed.
3826 void MacroAssembler::set_vm_result(Register oop_result) {
3827   z_stg(oop_result, Address(Z_thread, JavaThread::vm_result_offset()));
3828 }
3829 
3830 // Explicit null checks (used for method handle code).
3831 void MacroAssembler::null_check(Register reg, Register tmp, int64_t offset) {
3832   if (!ImplicitNullChecks) {
3833     NearLabel ok;
3834 
3835     compare64_and_branch(reg, (intptr_t) 0, Assembler::bcondNotEqual, ok);
3836 
3837     // We just put the address into reg if it was 0 (tmp==Z_R0 is allowed so we can't use it for the address).
3838     address exception_entry = Interpreter::throw_NullPointerException_entry();
3839     load_absolute_address(reg, exception_entry);
3840     z_br(reg);
3841 
3842     bind(ok);
3843   } else {
3844     if (needs_explicit_null_check((intptr_t)offset)) {
3845       // Provoke OS NULL exception if reg = NULL by
3846       // accessing M[reg] w/o changing any registers.
3847       z_lg(tmp, 0, reg);
3848     }
3849     // else
3850       // Nothing to do, (later) access of M[reg + offset]
3851       // will provoke OS NULL exception if reg = NULL.
3852   }
3853 }
3854 
3855 //-------------------------------------
3856 //  Compressed Klass Pointers
3857 //-------------------------------------
3858 
3859 // Klass oop manipulations if compressed.
3860 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
3861   Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible.
3862   address  base    = Universe::narrow_klass_base();
3863   int      shift   = Universe::narrow_klass_shift();
3864   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3865 
3866   BLOCK_COMMENT("cKlass encoder {");
3867 
3868 #ifdef ASSERT
3869   Label ok;
3870   z_tmll(current, KlassAlignmentInBytes-1); // Check alignment.
3871   z_brc(Assembler::bcondAllZero, ok);
3872   // The plain disassembler does not recognize illtrap. It instead displays
3873   // a 32-bit value. Issueing two illtraps assures the disassembler finds
3874   // the proper beginning of the next instruction.
3875   z_illtrap(0xee);
3876   z_illtrap(0xee);
3877   bind(ok);
3878 #endif
3879 
3880   if (base != NULL) {
3881     unsigned int base_h = ((unsigned long)base)>>32;
3882     unsigned int base_l = (unsigned int)((unsigned long)base);
3883     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3884       lgr_if_needed(dst, current);
3885       z_aih(dst, -((int)base_h));     // Base has no set bits in lower half.
3886     } else if ((base_h == 0) && (base_l != 0)) {
3887       lgr_if_needed(dst, current);
3888       z_agfi(dst, -(int)base_l);
3889     } else {
3890       load_const(Z_R0, base);
3891       lgr_if_needed(dst, current);
3892       z_sgr(dst, Z_R0);
3893     }
3894     current = dst;
3895   }
3896   if (shift != 0) {
3897     assert (LogKlassAlignmentInBytes == shift, "decode alg wrong");
3898     z_srlg(dst, current, shift);
3899     current = dst;
3900   }
3901   lgr_if_needed(dst, current); // Move may be required (if neither base nor shift != 0).
3902 
3903   BLOCK_COMMENT("} cKlass encoder");
3904 }
3905 
3906 // This function calculates the size of the code generated by
3907 //   decode_klass_not_null(register dst, Register src)
3908 // when (Universe::heap() != NULL). Hence, if the instructions
3909 // it generates change, then this method needs to be updated.
3910 int MacroAssembler::instr_size_for_decode_klass_not_null() {
3911   address  base    = Universe::narrow_klass_base();
3912   int shift_size   = Universe::narrow_klass_shift() == 0 ? 0 : 6; /* sllg */
3913   int addbase_size = 0;
3914   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3915 
3916   if (base != NULL) {
3917     unsigned int base_h = ((unsigned long)base)>>32;
3918     unsigned int base_l = (unsigned int)((unsigned long)base);
3919     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3920       addbase_size += 6; /* aih */
3921     } else if ((base_h == 0) && (base_l != 0)) {
3922       addbase_size += 6; /* algfi */
3923     } else {
3924       addbase_size += load_const_size();
3925       addbase_size += 4; /* algr */
3926     }
3927   }
3928 #ifdef ASSERT
3929   addbase_size += 10;
3930   addbase_size += 2; // Extra sigill.
3931 #endif
3932   return addbase_size + shift_size;
3933 }
3934 
3935 // !!! If the instructions that get generated here change
3936 //     then function instr_size_for_decode_klass_not_null()
3937 //     needs to get updated.
3938 // This variant of decode_klass_not_null() must generate predictable code!
3939 // The code must only depend on globally known parameters.
3940 void MacroAssembler::decode_klass_not_null(Register dst) {
3941   address  base    = Universe::narrow_klass_base();
3942   int      shift   = Universe::narrow_klass_shift();
3943   int      beg_off = offset();
3944   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3945 
3946   BLOCK_COMMENT("cKlass decoder (const size) {");
3947 
3948   if (shift != 0) { // Shift required?
3949     z_sllg(dst, dst, shift);
3950   }
3951   if (base != NULL) {
3952     unsigned int base_h = ((unsigned long)base)>>32;
3953     unsigned int base_l = (unsigned int)((unsigned long)base);
3954     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3955       z_aih(dst, base_h);     // Base has no set bits in lower half.
3956     } else if ((base_h == 0) && (base_l != 0)) {
3957       z_algfi(dst, base_l);   // Base has no set bits in upper half.
3958     } else {
3959       load_const(Z_R0, base); // Base has set bits everywhere.
3960       z_algr(dst, Z_R0);
3961     }
3962   }
3963 
3964 #ifdef ASSERT
3965   Label ok;
3966   z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
3967   z_brc(Assembler::bcondAllZero, ok);
3968   // The plain disassembler does not recognize illtrap. It instead displays
3969   // a 32-bit value. Issueing two illtraps assures the disassembler finds
3970   // the proper beginning of the next instruction.
3971   z_illtrap(0xd1);
3972   z_illtrap(0xd1);
3973   bind(ok);
3974 #endif
3975   assert(offset() == beg_off + instr_size_for_decode_klass_not_null(), "Code gen mismatch.");
3976 
3977   BLOCK_COMMENT("} cKlass decoder (const size)");
3978 }
3979 
3980 // This variant of decode_klass_not_null() is for cases where
3981 //  1) the size of the generated instructions may vary
3982 //  2) the result is (potentially) stored in a register different from the source.
3983 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
3984   address base  = Universe::narrow_klass_base();
3985   int     shift = Universe::narrow_klass_shift();
3986   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3987 
3988   BLOCK_COMMENT("cKlass decoder {");
3989 
3990   if (src == noreg) src = dst;
3991 
3992   if (shift != 0) { // Shift or at least move required?
3993     z_sllg(dst, src, shift);
3994   } else {
3995     lgr_if_needed(dst, src);
3996   }
3997 
3998   if (base != NULL) {
3999     unsigned int base_h = ((unsigned long)base)>>32;
4000     unsigned int base_l = (unsigned int)((unsigned long)base);
4001     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
4002       z_aih(dst, base_h);     // Base has not set bits in lower half.
4003     } else if ((base_h == 0) && (base_l != 0)) {
4004       z_algfi(dst, base_l);   // Base has no set bits in upper half.
4005     } else {
4006       load_const_optimized(Z_R0, base); // Base has set bits everywhere.
4007       z_algr(dst, Z_R0);
4008     }
4009   }
4010 
4011 #ifdef ASSERT
4012   Label ok;
4013   z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
4014   z_brc(Assembler::bcondAllZero, ok);
4015   // The plain disassembler does not recognize illtrap. It instead displays
4016   // a 32-bit value. Issueing two illtraps assures the disassembler finds
4017   // the proper beginning of the next instruction.
4018   z_illtrap(0xd2);
4019   z_illtrap(0xd2);
4020   bind(ok);
4021 #endif
4022   BLOCK_COMMENT("} cKlass decoder");
4023 }
4024 
4025 void MacroAssembler::load_klass(Register klass, Address mem) {
4026   if (UseCompressedClassPointers) {
4027     z_llgf(klass, mem);
4028     // Attention: no null check here!
4029     decode_klass_not_null(klass);
4030   } else {
4031     z_lg(klass, mem);
4032   }
4033 }
4034 
4035 void MacroAssembler::load_klass(Register klass, Register src_oop) {
4036   if (UseCompressedClassPointers) {
4037     z_llgf(klass, oopDesc::klass_offset_in_bytes(), src_oop);
4038     // Attention: no null check here!
4039     decode_klass_not_null(klass);
4040   } else {
4041     z_lg(klass, oopDesc::klass_offset_in_bytes(), src_oop);
4042   }
4043 }
4044 
4045 void MacroAssembler::load_prototype_header(Register Rheader, Register Rsrc_oop) {
4046   assert_different_registers(Rheader, Rsrc_oop);
4047   load_klass(Rheader, Rsrc_oop);
4048   z_lg(Rheader, Address(Rheader, Klass::prototype_header_offset()));
4049 }
4050 
4051 void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) {
4052   if (UseCompressedClassPointers) {
4053     assert_different_registers(dst_oop, klass, Z_R0);
4054     if (ck == noreg) ck = klass;
4055     encode_klass_not_null(ck, klass);
4056     z_st(ck, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
4057   } else {
4058     z_stg(klass, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
4059   }
4060 }
4061 
4062 void MacroAssembler::store_klass_gap(Register s, Register d) {
4063   if (UseCompressedClassPointers) {
4064     assert(s != d, "not enough registers");
4065     z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes()));
4066   }
4067 }
4068 
4069 // Compare klass ptr in memory against klass ptr in register.
4070 //
4071 // Rop1            - klass in register, always uncompressed.
4072 // disp            - Offset of klass in memory, compressed/uncompressed, depending on runtime flag.
4073 // Rbase           - Base address of cKlass in memory.
4074 // maybeNULL       - True if Rop1 possibly is a NULL.
4075 void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL) {
4076 
4077   BLOCK_COMMENT("compare klass ptr {");
4078 
4079   if (UseCompressedClassPointers) {
4080     const int shift = Universe::narrow_klass_shift();
4081     address   base  = Universe::narrow_klass_base();
4082 
4083     assert((shift == 0) || (shift == LogKlassAlignmentInBytes), "cKlass encoder detected bad shift");
4084     assert_different_registers(Rop1, Z_R0);
4085     assert_different_registers(Rop1, Rbase, Z_R1);
4086 
4087     // First encode register oop and then compare with cOop in memory.
4088     // This sequence saves an unnecessary cOop load and decode.
4089     if (base == NULL) {
4090       if (shift == 0) {
4091         z_cl(Rop1, disp, Rbase);     // Unscaled
4092       } else {
4093         z_srlg(Z_R0, Rop1, shift);   // ZeroBased
4094         z_cl(Z_R0, disp, Rbase);
4095       }
4096     } else {                         // HeapBased
4097 #ifdef ASSERT
4098       bool     used_R0 = true;
4099       bool     used_R1 = true;
4100 #endif
4101       Register current = Rop1;
4102       Label    done;
4103 
4104       if (maybeNULL) {       // NULL ptr must be preserved!
4105         z_ltgr(Z_R0, current);
4106         z_bre(done);
4107         current = Z_R0;
4108       }
4109 
4110       unsigned int base_h = ((unsigned long)base)>>32;
4111       unsigned int base_l = (unsigned int)((unsigned long)base);
4112       if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
4113         lgr_if_needed(Z_R0, current);
4114         z_aih(Z_R0, -((int)base_h));     // Base has no set bits in lower half.
4115       } else if ((base_h == 0) && (base_l != 0)) {
4116         lgr_if_needed(Z_R0, current);
4117         z_agfi(Z_R0, -(int)base_l);
4118       } else {
4119         int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
4120         add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); // Subtract base by adding complement.
4121       }
4122 
4123       if (shift != 0) {
4124         z_srlg(Z_R0, Z_R0, shift);
4125       }
4126       bind(done);
4127       z_cl(Z_R0, disp, Rbase);
4128 #ifdef ASSERT
4129       if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);
4130       if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);
4131 #endif
4132     }
4133   } else {
4134     z_clg(Rop1, disp, Z_R0, Rbase);
4135   }
4136   BLOCK_COMMENT("} compare klass ptr");
4137 }
4138 
4139 //---------------------------
4140 //  Compressed oops
4141 //---------------------------
4142 
4143 void MacroAssembler::encode_heap_oop(Register oop) {
4144   oop_encoder(oop, oop, true /*maybe null*/);
4145 }
4146 
4147 void MacroAssembler::encode_heap_oop_not_null(Register oop) {
4148   oop_encoder(oop, oop, false /*not null*/);
4149 }
4150 
4151 // Called with something derived from the oop base. e.g. oop_base>>3.
4152 int MacroAssembler::get_oop_base_pow2_offset(uint64_t oop_base) {
4153   unsigned int oop_base_ll = ((unsigned int)(oop_base >>  0)) & 0xffff;
4154   unsigned int oop_base_lh = ((unsigned int)(oop_base >> 16)) & 0xffff;
4155   unsigned int oop_base_hl = ((unsigned int)(oop_base >> 32)) & 0xffff;
4156   unsigned int oop_base_hh = ((unsigned int)(oop_base >> 48)) & 0xffff;
4157   unsigned int n_notzero_parts = (oop_base_ll == 0 ? 0:1)
4158                                + (oop_base_lh == 0 ? 0:1)
4159                                + (oop_base_hl == 0 ? 0:1)
4160                                + (oop_base_hh == 0 ? 0:1);
4161 
4162   assert(oop_base != 0, "This is for HeapBased cOops only");
4163 
4164   if (n_notzero_parts != 1) { //  Check if oop_base is just a few pages shy of a power of 2.
4165     uint64_t pow2_offset = 0x10000 - oop_base_ll;
4166     if (pow2_offset < 0x8000) {  // This might not be necessary.
4167       uint64_t oop_base2 = oop_base + pow2_offset;
4168 
4169       oop_base_ll = ((unsigned int)(oop_base2 >>  0)) & 0xffff;
4170       oop_base_lh = ((unsigned int)(oop_base2 >> 16)) & 0xffff;
4171       oop_base_hl = ((unsigned int)(oop_base2 >> 32)) & 0xffff;
4172       oop_base_hh = ((unsigned int)(oop_base2 >> 48)) & 0xffff;
4173       n_notzero_parts = (oop_base_ll == 0 ? 0:1) +
4174                         (oop_base_lh == 0 ? 0:1) +
4175                         (oop_base_hl == 0 ? 0:1) +
4176                         (oop_base_hh == 0 ? 0:1);
4177       if (n_notzero_parts == 1) {
4178         assert(-(int64_t)pow2_offset != (int64_t)-1, "We use -1 to signal uninitialized base register");
4179         return -pow2_offset;
4180       }
4181     }
4182   }
4183   return 0;
4184 }
4185 
4186 // If base address is offset from a straight power of two by just a few pages,
4187 // return this offset to the caller for a possible later composite add.
4188 // TODO/FIX: will only work correctly for 4k pages.
4189 int MacroAssembler::get_oop_base(Register Rbase, uint64_t oop_base) {
4190   int pow2_offset = get_oop_base_pow2_offset(oop_base);
4191 
4192   load_const_optimized(Rbase, oop_base - pow2_offset); // Best job possible.
4193 
4194   return pow2_offset;
4195 }
4196 
4197 int MacroAssembler::get_oop_base_complement(Register Rbase, uint64_t oop_base) {
4198   int offset = get_oop_base(Rbase, oop_base);
4199   z_lcgr(Rbase, Rbase);
4200   return -offset;
4201 }
4202 
4203 // Compare compressed oop in memory against oop in register.
4204 // Rop1            - Oop in register.
4205 // disp            - Offset of cOop in memory.
4206 // Rbase           - Base address of cOop in memory.
4207 // maybeNULL       - True if Rop1 possibly is a NULL.
4208 // maybeNULLtarget - Branch target for Rop1 == NULL, if flow control shall NOT continue with compare instruction.
4209 void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL) {
4210   Register Rbase  = mem.baseOrR0();
4211   Register Rindex = mem.indexOrR0();
4212   int64_t  disp   = mem.disp();
4213 
4214   const int shift = Universe::narrow_oop_shift();
4215   address   base  = Universe::narrow_oop_base();
4216 
4217   assert(UseCompressedOops, "must be on to call this method");
4218   assert(Universe::heap() != NULL, "java heap must be initialized to call this method");
4219   assert((shift == 0) || (shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");
4220   assert_different_registers(Rop1, Z_R0);
4221   assert_different_registers(Rop1, Rbase, Z_R1);
4222   assert_different_registers(Rop1, Rindex, Z_R1);
4223 
4224   BLOCK_COMMENT("compare heap oop {");
4225 
4226   // First encode register oop and then compare with cOop in memory.
4227   // This sequence saves an unnecessary cOop load and decode.
4228   if (base == NULL) {
4229     if (shift == 0) {
4230       z_cl(Rop1, disp, Rindex, Rbase);  // Unscaled
4231     } else {
4232       z_srlg(Z_R0, Rop1, shift);        // ZeroBased
4233       z_cl(Z_R0, disp, Rindex, Rbase);
4234     }
4235   } else {                              // HeapBased
4236 #ifdef ASSERT
4237     bool  used_R0 = true;
4238     bool  used_R1 = true;
4239 #endif
4240     Label done;
4241     int   pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
4242 
4243     if (maybeNULL) {       // NULL ptr must be preserved!
4244       z_ltgr(Z_R0, Rop1);
4245       z_bre(done);
4246     }
4247 
4248     add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1);
4249     z_srlg(Z_R0, Z_R0, shift);
4250 
4251     bind(done);
4252     z_cl(Z_R0, disp, Rindex, Rbase);
4253 #ifdef ASSERT
4254     if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);
4255     if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);
4256 #endif
4257   }
4258   BLOCK_COMMENT("} compare heap oop");
4259 }
4260 
4261 // Load heap oop and decompress, if necessary.
4262 void  MacroAssembler::load_heap_oop(Register dest, const Address &a) {
4263   if (UseCompressedOops) {
4264     z_llgf(dest, a.disp(), a.indexOrR0(), a.baseOrR0());
4265     oop_decoder(dest, dest, true);
4266   } else {
4267     z_lg(dest, a.disp(), a.indexOrR0(), a.baseOrR0());
4268   }
4269 }
4270 
4271 // Load heap oop and decompress, if necessary.
4272 void MacroAssembler::load_heap_oop(Register dest, int64_t disp, Register base) {
4273   if (UseCompressedOops) {
4274     z_llgf(dest, disp, base);
4275     oop_decoder(dest, dest, true);
4276   } else {
4277     z_lg(dest, disp, base);
4278   }
4279 }
4280 
4281 // Load heap oop and decompress, if necessary.
4282 void MacroAssembler::load_heap_oop_not_null(Register dest, int64_t disp, Register base) {
4283   if (UseCompressedOops) {
4284     z_llgf(dest, disp, base);
4285     oop_decoder(dest, dest, false);
4286   } else {
4287     z_lg(dest, disp, base);
4288   }
4289 }
4290 
4291 // Compress, if necessary, and store oop to heap.
4292 void MacroAssembler::store_heap_oop(Register Roop, RegisterOrConstant offset, Register base) {
4293   Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0;
4294   if (UseCompressedOops) {
4295     assert_different_registers(Roop, offset.register_or_noreg(), base);
4296     encode_heap_oop(Roop);
4297     z_st(Roop, offset.constant_or_zero(), Ridx, base);
4298   } else {
4299     z_stg(Roop, offset.constant_or_zero(), Ridx, base);
4300   }
4301 }
4302 
4303 // Compress, if necessary, and store oop to heap. Oop is guaranteed to be not NULL.
4304 void MacroAssembler::store_heap_oop_not_null(Register Roop, RegisterOrConstant offset, Register base) {
4305   Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0;
4306   if (UseCompressedOops) {
4307     assert_different_registers(Roop, offset.register_or_noreg(), base);
4308     encode_heap_oop_not_null(Roop);
4309     z_st(Roop, offset.constant_or_zero(), Ridx, base);
4310   } else {
4311     z_stg(Roop, offset.constant_or_zero(), Ridx, base);
4312   }
4313 }
4314 
4315 // Store NULL oop to heap.
4316 void MacroAssembler::store_heap_oop_null(Register zero, RegisterOrConstant offset, Register base) {
4317   Register Ridx = offset.is_register() ? offset.register_or_noreg() : Z_R0;
4318   if (UseCompressedOops) {
4319     z_st(zero, offset.constant_or_zero(), Ridx, base);
4320   } else {
4321     z_stg(zero, offset.constant_or_zero(), Ridx, base);
4322   }
4323 }
4324 
4325 //-------------------------------------------------
4326 // Encode compressed oop. Generally usable encoder.
4327 //-------------------------------------------------
4328 // Rsrc - contains regular oop on entry. It remains unchanged.
4329 // Rdst - contains compressed oop on exit.
4330 // Rdst and Rsrc may indicate same register, in which case Rsrc does not remain unchanged.
4331 //
4332 // Rdst must not indicate scratch register Z_R1 (Z_R1_scratch) for functionality.
4333 // Rdst should not indicate scratch register Z_R0 (Z_R0_scratch) for performance.
4334 //
4335 // only32bitValid is set, if later code only uses the lower 32 bits. In this
4336 // case we must not fix the upper 32 bits.
4337 void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL,
4338                                  Register Rbase, int pow2_offset, bool only32bitValid) {
4339 
4340   const address oop_base  = Universe::narrow_oop_base();
4341   const int     oop_shift = Universe::narrow_oop_shift();
4342   const bool    disjoint  = Universe::narrow_oop_base_disjoint();
4343 
4344   assert(UseCompressedOops, "must be on to call this method");
4345   assert(Universe::heap() != NULL, "java heap must be initialized to call this encoder");
4346   assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");
4347 
4348   if (disjoint || (oop_base == NULL)) {
4349     BLOCK_COMMENT("cOop encoder zeroBase {");
4350     if (oop_shift == 0) {
4351       if (oop_base != NULL && !only32bitValid) {
4352         z_llgfr(Rdst, Rsrc); // Clear upper bits in case the register will be decoded again.
4353       } else {
4354         lgr_if_needed(Rdst, Rsrc);
4355       }
4356     } else {
4357       z_srlg(Rdst, Rsrc, oop_shift);
4358       if (oop_base != NULL && !only32bitValid) {
4359         z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4360       }
4361     }
4362     BLOCK_COMMENT("} cOop encoder zeroBase");
4363     return;
4364   }
4365 
4366   bool used_R0 = false;
4367   bool used_R1 = false;
4368 
4369   BLOCK_COMMENT("cOop encoder general {");
4370   assert_different_registers(Rdst, Z_R1);
4371   assert_different_registers(Rsrc, Rbase);
4372   if (maybeNULL) {
4373     Label done;
4374     // We reorder shifting and subtracting, so that we can compare
4375     // and shift in parallel:
4376     //
4377     // cycle 0:  potential LoadN, base = <const>
4378     // cycle 1:  base = !base     dst = src >> 3,    cmp cr = (src != 0)
4379     // cycle 2:  if (cr) br,      dst = dst + base + offset
4380 
4381     // Get oop_base components.
4382     if (pow2_offset == -1) {
4383       if (Rdst == Rbase) {
4384         if (Rdst == Z_R1 || Rsrc == Z_R1) {
4385           Rbase = Z_R0;
4386           used_R0 = true;
4387         } else {
4388           Rdst = Z_R1;
4389           used_R1 = true;
4390         }
4391       }
4392       if (Rbase == Z_R1) {
4393         used_R1 = true;
4394       }
4395       pow2_offset = get_oop_base_complement(Rbase, ((uint64_t)(intptr_t)oop_base) >> oop_shift);
4396     }
4397     assert_different_registers(Rdst, Rbase);
4398 
4399     // Check for NULL oop (must be left alone) and shift.
4400     if (oop_shift != 0) {  // Shift out alignment bits
4401       if (((intptr_t)oop_base&0xc000000000000000L) == 0L) { // We are sure: no single address will have the leftmost bit set.
4402         z_srag(Rdst, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4403       } else {
4404         z_srlg(Rdst, Rsrc, oop_shift);
4405         z_ltgr(Rsrc, Rsrc);  // This is the recommended way of testing for zero.
4406         // This probably is faster, as it does not write a register. No!
4407         // z_cghi(Rsrc, 0);
4408       }
4409     } else {
4410       z_ltgr(Rdst, Rsrc);   // Move NULL to result register.
4411     }
4412     z_bre(done);
4413 
4414     // Subtract oop_base components.
4415     if ((Rdst == Z_R0) || (Rbase == Z_R0)) {
4416       z_algr(Rdst, Rbase);
4417       if (pow2_offset != 0) { add2reg(Rdst, pow2_offset); }
4418     } else {
4419       add2reg_with_index(Rdst, pow2_offset, Rbase, Rdst);
4420     }
4421     if (!only32bitValid) {
4422       z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4423     }
4424     bind(done);
4425 
4426   } else {  // not null
4427     // Get oop_base components.
4428     if (pow2_offset == -1) {
4429       pow2_offset = get_oop_base_complement(Rbase, (uint64_t)(intptr_t)oop_base);
4430     }
4431 
4432     // Subtract oop_base components and shift.
4433     if (Rdst == Z_R0 || Rsrc == Z_R0 || Rbase == Z_R0) {
4434       // Don't use lay instruction.
4435       if (Rdst == Rsrc) {
4436         z_algr(Rdst, Rbase);
4437       } else {
4438         lgr_if_needed(Rdst, Rbase);
4439         z_algr(Rdst, Rsrc);
4440       }
4441       if (pow2_offset != 0) add2reg(Rdst, pow2_offset);
4442     } else {
4443       add2reg_with_index(Rdst, pow2_offset, Rbase, Rsrc);
4444     }
4445     if (oop_shift != 0) {   // Shift out alignment bits.
4446       z_srlg(Rdst, Rdst, oop_shift);
4447     }
4448     if (!only32bitValid) {
4449       z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4450     }
4451   }
4452 #ifdef ASSERT
4453   if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb01bUL, 2); }
4454   if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb02bUL, 2); }
4455 #endif
4456   BLOCK_COMMENT("} cOop encoder general");
4457 }
4458 
4459 //-------------------------------------------------
4460 // decode compressed oop. Generally usable decoder.
4461 //-------------------------------------------------
4462 // Rsrc - contains compressed oop on entry.
4463 // Rdst - contains regular oop on exit.
4464 // Rdst and Rsrc may indicate same register.
4465 // Rdst must not be the same register as Rbase, if Rbase was preloaded (before call).
4466 // Rdst can be the same register as Rbase. Then, either Z_R0 or Z_R1 must be available as scratch.
4467 // Rbase - register to use for the base
4468 // pow2_offset - offset of base to nice value. If -1, base must be loaded.
4469 // For performance, it is good to
4470 //  - avoid Z_R0 for any of the argument registers.
4471 //  - keep Rdst and Rsrc distinct from Rbase. Rdst == Rsrc is ok for performance.
4472 //  - avoid Z_R1 for Rdst if Rdst == Rbase.
4473 void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, Register Rbase, int pow2_offset) {
4474 
4475   const address oop_base  = Universe::narrow_oop_base();
4476   const int     oop_shift = Universe::narrow_oop_shift();
4477   const bool    disjoint  = Universe::narrow_oop_base_disjoint();
4478 
4479   assert(UseCompressedOops, "must be on to call this method");
4480   assert(Universe::heap() != NULL, "java heap must be initialized to call this decoder");
4481   assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes),
4482          "cOop encoder detected bad shift");
4483 
4484   // cOops are always loaded zero-extended from memory. No explicit zero-extension necessary.
4485 
4486   if (oop_base != NULL) {
4487     unsigned int oop_base_hl = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xffff;
4488     unsigned int oop_base_hh = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 48)) & 0xffff;
4489     unsigned int oop_base_hf = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xFFFFffff;
4490     if (disjoint && (oop_base_hl == 0 || oop_base_hh == 0)) {
4491       BLOCK_COMMENT("cOop decoder disjointBase {");
4492       // We do not need to load the base. Instead, we can install the upper bits
4493       // with an OR instead of an ADD.
4494       Label done;
4495 
4496       // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.
4497       if (maybeNULL) {  // NULL ptr must be preserved!
4498         z_slag(Rdst, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4499         z_bre(done);
4500       } else {
4501         z_sllg(Rdst, Rsrc, oop_shift);  // Logical shift leaves condition code alone.
4502       }
4503       if ((oop_base_hl != 0) && (oop_base_hh != 0)) {
4504         z_oihf(Rdst, oop_base_hf);
4505       } else if (oop_base_hl != 0) {
4506         z_oihl(Rdst, oop_base_hl);
4507       } else {
4508         assert(oop_base_hh != 0, "not heapbased mode");
4509         z_oihh(Rdst, oop_base_hh);
4510       }
4511       bind(done);
4512       BLOCK_COMMENT("} cOop decoder disjointBase");
4513     } else {
4514       BLOCK_COMMENT("cOop decoder general {");
4515       // There are three decode steps:
4516       //   scale oop offset (shift left)
4517       //   get base (in reg) and pow2_offset (constant)
4518       //   add base, pow2_offset, and oop offset
4519       // The following register overlap situations may exist:
4520       // Rdst == Rsrc,  Rbase any other
4521       //   not a problem. Scaling in-place leaves Rbase undisturbed.
4522       //   Loading Rbase does not impact the scaled offset.
4523       // Rdst == Rbase, Rsrc  any other
4524       //   scaling would destroy a possibly preloaded Rbase. Loading Rbase
4525       //   would destroy the scaled offset.
4526       //   Remedy: use Rdst_tmp if Rbase has been preloaded.
4527       //           use Rbase_tmp if base has to be loaded.
4528       // Rsrc == Rbase, Rdst  any other
4529       //   Only possible without preloaded Rbase.
4530       //   Loading Rbase does not destroy compressed oop because it was scaled into Rdst before.
4531       // Rsrc == Rbase, Rdst == Rbase
4532       //   Only possible without preloaded Rbase.
4533       //   Loading Rbase would destroy compressed oop. Scaling in-place is ok.
4534       //   Remedy: use Rbase_tmp.
4535       //
4536       Label    done;
4537       Register Rdst_tmp       = Rdst;
4538       Register Rbase_tmp      = Rbase;
4539       bool     used_R0        = false;
4540       bool     used_R1        = false;
4541       bool     base_preloaded = pow2_offset >= 0;
4542       guarantee(!(base_preloaded && (Rsrc == Rbase)), "Register clash, check caller");
4543       assert(oop_shift != 0, "room for optimization");
4544 
4545       // Check if we need to use scratch registers.
4546       if (Rdst == Rbase) {
4547         assert(!(((Rdst == Z_R0) && (Rsrc == Z_R1)) || ((Rdst == Z_R1) && (Rsrc == Z_R0))), "need a scratch reg");
4548         if (Rdst != Rsrc) {
4549           if (base_preloaded) { Rdst_tmp  = (Rdst == Z_R1) ? Z_R0 : Z_R1; }
4550           else                { Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; }
4551         } else {
4552           Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1;
4553         }
4554       }
4555       if (base_preloaded) lgr_if_needed(Rbase_tmp, Rbase);
4556 
4557       // Scale oop and check for NULL.
4558       // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.
4559       if (maybeNULL) {  // NULL ptr must be preserved!
4560         z_slag(Rdst_tmp, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4561         z_bre(done);
4562       } else {
4563         z_sllg(Rdst_tmp, Rsrc, oop_shift);  // Logical shift leaves condition code alone.
4564       }
4565 
4566       // Get oop_base components.
4567       if (!base_preloaded) {
4568         pow2_offset = get_oop_base(Rbase_tmp, (uint64_t)(intptr_t)oop_base);
4569       }
4570 
4571       // Add up all components.
4572       if ((Rbase_tmp == Z_R0) || (Rdst_tmp == Z_R0)) {
4573         z_algr(Rdst_tmp, Rbase_tmp);
4574         if (pow2_offset != 0) { add2reg(Rdst_tmp, pow2_offset); }
4575       } else {
4576         add2reg_with_index(Rdst_tmp, pow2_offset, Rbase_tmp, Rdst_tmp);
4577       }
4578 
4579       bind(done);
4580       lgr_if_needed(Rdst, Rdst_tmp);
4581 #ifdef ASSERT
4582       if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb03bUL, 2); }
4583       if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb04bUL, 2); }
4584 #endif
4585       BLOCK_COMMENT("} cOop decoder general");
4586     }
4587   } else {
4588     BLOCK_COMMENT("cOop decoder zeroBase {");
4589     if (oop_shift == 0) {
4590       lgr_if_needed(Rdst, Rsrc);
4591     } else {
4592       z_sllg(Rdst, Rsrc, oop_shift);
4593     }
4594     BLOCK_COMMENT("} cOop decoder zeroBase");
4595   }
4596 }
4597 
4598 void MacroAssembler::load_mirror(Register mirror, Register method) {
4599   mem2reg_opt(mirror, Address(method, Method::const_offset()));
4600   mem2reg_opt(mirror, Address(mirror, ConstMethod::constants_offset()));
4601   mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes()));
4602   mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset()));
4603 }
4604 
4605 //---------------------------------------------------------------
4606 //---  Operations on arrays.
4607 //---------------------------------------------------------------
4608 
4609 // Compiler ensures base is doubleword aligned and cnt is #doublewords.
4610 // Emitter does not KILL cnt and base arguments, since they need to be copied to
4611 // work registers anyway.
4612 // Actually, only r0, r1, and r5 are killed.
4613 unsigned int MacroAssembler::Clear_Array(Register cnt_arg, Register base_pointer_arg, Register src_addr, Register src_len) {
4614   // Src_addr is evenReg.
4615   // Src_len is odd_Reg.
4616 
4617   int      block_start = offset();
4618   Register tmp_reg  = src_len; // Holds target instr addr for EX.
4619   Register dst_len  = Z_R1;    // Holds dst len  for MVCLE.
4620   Register dst_addr = Z_R0;    // Holds dst addr for MVCLE.
4621 
4622   Label doXC, doMVCLE, done;
4623 
4624   BLOCK_COMMENT("Clear_Array {");
4625 
4626   // Check for zero len and convert to long.
4627   z_ltgfr(src_len, cnt_arg);      // Remember casted value for doSTG case.
4628   z_bre(done);                    // Nothing to do if len == 0.
4629 
4630   // Prefetch data to be cleared.
4631   if (VM_Version::has_Prefetch()) {
4632     z_pfd(0x02,   0, Z_R0, base_pointer_arg);
4633     z_pfd(0x02, 256, Z_R0, base_pointer_arg);
4634   }
4635 
4636   z_sllg(dst_len, src_len, 3);    // #bytes to clear.
4637   z_cghi(src_len, 32);            // Check for len <= 256 bytes (<=32 DW).
4638   z_brnh(doXC);                   // If so, use executed XC to clear.
4639 
4640   // MVCLE: initialize long arrays (general case).
4641   bind(doMVCLE);
4642   z_lgr(dst_addr, base_pointer_arg);
4643   clear_reg(src_len, true, false); // Src len of MVCLE is zero.
4644 
4645   MacroAssembler::move_long_ext(dst_addr, src_addr, 0);
4646   z_bru(done);
4647 
4648   // XC: initialize short arrays.
4649   Label XC_template; // Instr template, never exec directly!
4650     bind(XC_template);
4651     z_xc(0,0,base_pointer_arg,0,base_pointer_arg);
4652 
4653   bind(doXC);
4654     add2reg(dst_len, -1);             // Get #bytes-1 for EXECUTE.
4655     if (VM_Version::has_ExecuteExtensions()) {
4656       z_exrl(dst_len, XC_template);   // Execute XC with var. len.
4657     } else {
4658       z_larl(tmp_reg, XC_template);
4659       z_ex(dst_len,0,Z_R0,tmp_reg);   // Execute XC with var. len.
4660     }
4661     // z_bru(done);      // fallthru
4662 
4663   bind(done);
4664 
4665   BLOCK_COMMENT("} Clear_Array");
4666 
4667   int block_end = offset();
4668   return block_end - block_start;
4669 }
4670 
4671 // Compiler ensures base is doubleword aligned and cnt is count of doublewords.
4672 // Emitter does not KILL any arguments nor work registers.
4673 // Emitter generates up to 16 XC instructions, depending on the array length.
4674 unsigned int MacroAssembler::Clear_Array_Const(long cnt, Register base) {
4675   int  block_start    = offset();
4676   int  off;
4677   int  lineSize_Bytes = AllocatePrefetchStepSize;
4678   int  lineSize_DW    = AllocatePrefetchStepSize>>LogBytesPerWord;
4679   bool doPrefetch     = VM_Version::has_Prefetch();
4680   int  XC_maxlen      = 256;
4681   int  numXCInstr     = cnt > 0 ? (cnt*BytesPerWord-1)/XC_maxlen+1 : 0;
4682 
4683   BLOCK_COMMENT("Clear_Array_Const {");
4684   assert(cnt*BytesPerWord <= 4096, "ClearArrayConst can handle 4k only");
4685 
4686   // Do less prefetching for very short arrays.
4687   if (numXCInstr > 0) {
4688     // Prefetch only some cache lines, then begin clearing.
4689     if (doPrefetch) {
4690       if (cnt*BytesPerWord <= lineSize_Bytes/4) {  // If less than 1/4 of a cache line to clear,
4691         z_pfd(0x02, 0, Z_R0, base);                // prefetch just the first cache line.
4692       } else {
4693         assert(XC_maxlen == lineSize_Bytes, "ClearArrayConst needs 256B cache lines");
4694         for (off = 0; (off < AllocatePrefetchLines) && (off <= numXCInstr); off ++) {
4695           z_pfd(0x02, off*lineSize_Bytes, Z_R0, base);
4696         }
4697       }
4698     }
4699 
4700     for (off=0; off<(numXCInstr-1); off++) {
4701       z_xc(off*XC_maxlen, XC_maxlen-1, base, off*XC_maxlen, base);
4702 
4703       // Prefetch some cache lines in advance.
4704       if (doPrefetch && (off <= numXCInstr-AllocatePrefetchLines)) {
4705         z_pfd(0x02, (off+AllocatePrefetchLines)*lineSize_Bytes, Z_R0, base);
4706       }
4707     }
4708     if (off*XC_maxlen < cnt*BytesPerWord) {
4709       z_xc(off*XC_maxlen, (cnt*BytesPerWord-off*XC_maxlen)-1, base, off*XC_maxlen, base);
4710     }
4711   }
4712   BLOCK_COMMENT("} Clear_Array_Const");
4713 
4714   int block_end = offset();
4715   return block_end - block_start;
4716 }
4717 
4718 // Compiler ensures base is doubleword aligned and cnt is #doublewords.
4719 // Emitter does not KILL cnt and base arguments, since they need to be copied to
4720 // work registers anyway.
4721 // Actually, only r0, r1, r4, and r5 (which are work registers) are killed.
4722 //
4723 // For very large arrays, exploit MVCLE H/W support.
4724 // MVCLE instruction automatically exploits H/W-optimized page mover.
4725 // - Bytes up to next page boundary are cleared with a series of XC to self.
4726 // - All full pages are cleared with the page mover H/W assist.
4727 // - Remaining bytes are again cleared by a series of XC to self.
4728 //
4729 unsigned int MacroAssembler::Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register src_addr, Register src_len) {
4730   // Src_addr is evenReg.
4731   // Src_len is odd_Reg.
4732 
4733   int      block_start = offset();
4734   Register dst_len  = Z_R1;      // Holds dst len  for MVCLE.
4735   Register dst_addr = Z_R0;      // Holds dst addr for MVCLE.
4736 
4737   BLOCK_COMMENT("Clear_Array_Const_Big {");
4738 
4739   // Get len to clear.
4740   load_const_optimized(dst_len, (long)cnt*8L);  // in Bytes = #DW*8
4741 
4742   // Prepare other args to MVCLE.
4743   z_lgr(dst_addr, base_pointer_arg);
4744   // Indicate unused result.
4745   (void) clear_reg(src_len, true, false);  // Src len of MVCLE is zero.
4746 
4747   // Clear.
4748   MacroAssembler::move_long_ext(dst_addr, src_addr, 0);
4749   BLOCK_COMMENT("} Clear_Array_Const_Big");
4750 
4751   int block_end = offset();
4752   return block_end - block_start;
4753 }
4754 
4755 // Allocator.
4756 unsigned int MacroAssembler::CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg,
4757                                                            Register cnt_reg,
4758                                                            Register tmp1_reg, Register tmp2_reg) {
4759   // Tmp1 is oddReg.
4760   // Tmp2 is evenReg.
4761 
4762   int block_start = offset();
4763   Label doMVC, doMVCLE, done, MVC_template;
4764 
4765   BLOCK_COMMENT("CopyRawMemory_AlignedDisjoint {");
4766 
4767   // Check for zero len and convert to long.
4768   z_ltgfr(cnt_reg, cnt_reg);      // Remember casted value for doSTG case.
4769   z_bre(done);                    // Nothing to do if len == 0.
4770 
4771   z_sllg(Z_R1, cnt_reg, 3);       // Dst len in bytes. calc early to have the result ready.
4772 
4773   z_cghi(cnt_reg, 32);            // Check for len <= 256 bytes (<=32 DW).
4774   z_brnh(doMVC);                  // If so, use executed MVC to clear.
4775 
4776   bind(doMVCLE);                  // A lot of data (more than 256 bytes).
4777   // Prep dest reg pair.
4778   z_lgr(Z_R0, dst_reg);           // dst addr
4779   // Dst len already in Z_R1.
4780   // Prep src reg pair.
4781   z_lgr(tmp2_reg, src_reg);       // src addr
4782   z_lgr(tmp1_reg, Z_R1);          // Src len same as dst len.
4783 
4784   // Do the copy.
4785   move_long_ext(Z_R0, tmp2_reg, 0xb0); // Bypass cache.
4786   z_bru(done);                         // All done.
4787 
4788   bind(MVC_template);             // Just some data (not more than 256 bytes).
4789   z_mvc(0, 0, dst_reg, 0, src_reg);
4790 
4791   bind(doMVC);
4792 
4793   if (VM_Version::has_ExecuteExtensions()) {
4794     add2reg(Z_R1, -1);
4795   } else {
4796     add2reg(tmp1_reg, -1, Z_R1);
4797     z_larl(Z_R1, MVC_template);
4798   }
4799 
4800   if (VM_Version::has_Prefetch()) {
4801     z_pfd(1,  0,Z_R0,src_reg);
4802     z_pfd(2,  0,Z_R0,dst_reg);
4803     //    z_pfd(1,256,Z_R0,src_reg);    // Assume very short copy.
4804     //    z_pfd(2,256,Z_R0,dst_reg);
4805   }
4806 
4807   if (VM_Version::has_ExecuteExtensions()) {
4808     z_exrl(Z_R1, MVC_template);
4809   } else {
4810     z_ex(tmp1_reg, 0, Z_R0, Z_R1);
4811   }
4812 
4813   bind(done);
4814 
4815   BLOCK_COMMENT("} CopyRawMemory_AlignedDisjoint");
4816 
4817   int block_end = offset();
4818   return block_end - block_start;
4819 }
4820 
4821 //------------------------------------------------------
4822 //   Special String Intrinsics. Implementation
4823 //------------------------------------------------------
4824 
4825 // Intrinsics for CompactStrings
4826 
4827 // Compress char[] to byte[]. odd_reg contains cnt. Kills dst. Early clobber: result
4828 // The result is the number of characters copied before the first incompatible character was found.
4829 // If tmp2 is provided and the compression fails, the compression stops exactly at this point and the result is precise.
4830 //
4831 // Note: Does not behave exactly like package private StringUTF16 compress java implementation in case of failure:
4832 // - Different number of characters may have been written to dead array (if tmp2 not provided).
4833 // - Returns a number <cnt instead of 0. (Result gets compared with cnt.)
4834 unsigned int MacroAssembler::string_compress(Register result, Register src, Register dst, Register odd_reg,
4835                                              Register even_reg, Register tmp, Register tmp2) {
4836   int block_start = offset();
4837   Label Lloop1, Lloop2, Lslow, Ldone;
4838   const Register addr2 = dst, ind1 = result, mask = tmp;
4839   const bool precise = (tmp2 != noreg);
4840 
4841   BLOCK_COMMENT("string_compress {");
4842 
4843   z_sll(odd_reg, 1);       // Number of bytes to read. (Must be a positive simm32.)
4844   clear_reg(ind1);         // Index to read.
4845   z_llilf(mask, 0xFF00FF00);
4846   z_ahi(odd_reg, -16);     // Last possible index for fast loop.
4847   z_brl(Lslow);
4848 
4849   // ind1: index, even_reg: index increment, odd_reg: index limit
4850   z_iihf(mask, 0xFF00FF00);
4851   z_lhi(even_reg, 16);
4852 
4853   bind(Lloop1); // 8 Characters per iteration.
4854   z_lg(Z_R0, Address(src, ind1));
4855   z_lg(Z_R1, Address(src, ind1, 8));
4856   if (precise) {
4857     if (VM_Version::has_DistinctOpnds()) {
4858       z_ogrk(tmp2, Z_R0, Z_R1);
4859     } else {
4860       z_lgr(tmp2, Z_R0);
4861       z_ogr(tmp2, Z_R1);
4862     }
4863     z_ngr(tmp2, mask);
4864     z_brne(Lslow);         // Failed fast case, retry slowly.
4865   }
4866   z_stcmh(Z_R0, 5, 0, addr2);
4867   z_stcm(Z_R0, 5, 2, addr2);
4868   if (!precise) { z_ogr(Z_R0, Z_R1); }
4869   z_stcmh(Z_R1, 5, 4, addr2);
4870   z_stcm(Z_R1, 5, 6, addr2);
4871   if (!precise) {
4872     z_ngr(Z_R0, mask);
4873     z_brne(Ldone);         // Failed (more than needed was written).
4874   }
4875   z_aghi(addr2, 8);
4876   z_brxle(ind1, even_reg, Lloop1);
4877 
4878   bind(Lslow);
4879   // Compute index limit and skip if negative.
4880   z_ahi(odd_reg, 16-2);    // Last possible index for slow loop.
4881   z_lhi(even_reg, 2);
4882   z_cr(ind1, odd_reg);
4883   z_brh(Ldone);
4884 
4885   bind(Lloop2); // 1 Character per iteration.
4886   z_llh(Z_R0, Address(src, ind1));
4887   z_tmll(Z_R0, 0xFF00);
4888   z_brnaz(Ldone);          // Failed slow case: Return number of written characters.
4889   z_stc(Z_R0, Address(addr2));
4890   z_aghi(addr2, 1);
4891   z_brxle(ind1, even_reg, Lloop2);
4892 
4893   bind(Ldone);             // result = ind1 = 2*cnt
4894   z_srl(ind1, 1);
4895 
4896   BLOCK_COMMENT("} string_compress");
4897 
4898   return offset() - block_start;
4899 }
4900 
4901 // Inflate byte[] to char[].
4902 unsigned int MacroAssembler::string_inflate_trot(Register src, Register dst, Register cnt, Register tmp) {
4903   int block_start = offset();
4904 
4905   BLOCK_COMMENT("string_inflate {");
4906 
4907   Register stop_char = Z_R0;
4908   Register table     = Z_R1;
4909   Register src_addr  = tmp;
4910 
4911   assert_different_registers(Z_R0, Z_R1, tmp, src, dst, cnt);
4912   assert(dst->encoding()%2 == 0, "must be even reg");
4913   assert(cnt->encoding()%2 == 1, "must be odd reg");
4914   assert(cnt->encoding() - dst->encoding() == 1, "must be even/odd pair");
4915 
4916   StubRoutines::zarch::generate_load_trot_table_addr(this, table);  // kills Z_R0 (if ASSERT)
4917   clear_reg(stop_char);  // Stop character. Not used here, but initialized to have a defined value.
4918   lgr_if_needed(src_addr, src);
4919   z_llgfr(cnt, cnt);     // # src characters, must be a positive simm32.
4920 
4921   translate_ot(dst, src_addr, /* mask = */ 0x0001);
4922 
4923   BLOCK_COMMENT("} string_inflate");
4924 
4925   return offset() - block_start;
4926 }
4927 
4928 // Inflate byte[] to char[]. odd_reg contains cnt. Kills src.
4929 unsigned int MacroAssembler::string_inflate(Register src, Register dst, Register odd_reg,
4930                                             Register even_reg, Register tmp) {
4931   int block_start = offset();
4932 
4933   BLOCK_COMMENT("string_inflate {");
4934 
4935   Label Lloop1, Lloop2, Lslow, Ldone;
4936   const Register addr1 = src, ind2 = tmp;
4937 
4938   z_sll(odd_reg, 1);       // Number of bytes to write. (Must be a positive simm32.)
4939   clear_reg(ind2);         // Index to write.
4940   z_ahi(odd_reg, -16);     // Last possible index for fast loop.
4941   z_brl(Lslow);
4942 
4943   // ind2: index, even_reg: index increment, odd_reg: index limit
4944   clear_reg(Z_R0);
4945   clear_reg(Z_R1);
4946   z_lhi(even_reg, 16);
4947 
4948   bind(Lloop1); // 8 Characters per iteration.
4949   z_icmh(Z_R0, 5, 0, addr1);
4950   z_icmh(Z_R1, 5, 4, addr1);
4951   z_icm(Z_R0, 5, 2, addr1);
4952   z_icm(Z_R1, 5, 6, addr1);
4953   z_aghi(addr1, 8);
4954   z_stg(Z_R0, Address(dst, ind2));
4955   z_stg(Z_R1, Address(dst, ind2, 8));
4956   z_brxle(ind2, even_reg, Lloop1);
4957 
4958   bind(Lslow);
4959   // Compute index limit and skip if negative.
4960   z_ahi(odd_reg, 16-2);    // Last possible index for slow loop.
4961   z_lhi(even_reg, 2);
4962   z_cr(ind2, odd_reg);
4963   z_brh(Ldone);
4964 
4965   bind(Lloop2); // 1 Character per iteration.
4966   z_llc(Z_R0, Address(addr1));
4967   z_sth(Z_R0, Address(dst, ind2));
4968   z_aghi(addr1, 1);
4969   z_brxle(ind2, even_reg, Lloop2);
4970 
4971   bind(Ldone);
4972 
4973   BLOCK_COMMENT("} string_inflate");
4974 
4975   return offset() - block_start;
4976 }
4977 
4978 // Kills src.
4979 unsigned int MacroAssembler::has_negatives(Register result, Register src, Register cnt,
4980                                            Register odd_reg, Register even_reg, Register tmp) {
4981   int block_start = offset();
4982   Label Lloop1, Lloop2, Lslow, Lnotfound, Ldone;
4983   const Register addr = src, mask = tmp;
4984 
4985   BLOCK_COMMENT("has_negatives {");
4986 
4987   z_llgfr(Z_R1, cnt);      // Number of bytes to read. (Must be a positive simm32.)
4988   z_llilf(mask, 0x80808080);
4989   z_lhi(result, 1);        // Assume true.
4990   // Last possible addr for fast loop.
4991   z_lay(odd_reg, -16, Z_R1, src);
4992   z_chi(cnt, 16);
4993   z_brl(Lslow);
4994 
4995   // ind1: index, even_reg: index increment, odd_reg: index limit
4996   z_iihf(mask, 0x80808080);
4997   z_lghi(even_reg, 16);
4998 
4999   bind(Lloop1); // 16 bytes per iteration.
5000   z_lg(Z_R0, Address(addr));
5001   z_lg(Z_R1, Address(addr, 8));
5002   z_ogr(Z_R0, Z_R1);
5003   z_ngr(Z_R0, mask);
5004   z_brne(Ldone);           // If found return 1.
5005   z_brxlg(addr, even_reg, Lloop1);
5006 
5007   bind(Lslow);
5008   z_aghi(odd_reg, 16-1);   // Last possible addr for slow loop.
5009   z_lghi(even_reg, 1);
5010   z_cgr(addr, odd_reg);
5011   z_brh(Lnotfound);
5012 
5013   bind(Lloop2); // 1 byte per iteration.
5014   z_cli(Address(addr), 0x80);
5015   z_brnl(Ldone);           // If found return 1.
5016   z_brxlg(addr, even_reg, Lloop2);
5017 
5018   bind(Lnotfound);
5019   z_lhi(result, 0);
5020 
5021   bind(Ldone);
5022 
5023   BLOCK_COMMENT("} has_negatives");
5024 
5025   return offset() - block_start;
5026 }
5027 
5028 // kill: cnt1, cnt2, odd_reg, even_reg; early clobber: result
5029 unsigned int MacroAssembler::string_compare(Register str1, Register str2,
5030                                             Register cnt1, Register cnt2,
5031                                             Register odd_reg, Register even_reg, Register result, int ae) {
5032   int block_start = offset();
5033 
5034   assert_different_registers(str1, cnt1, cnt2, odd_reg, even_reg, result);
5035   assert_different_registers(str2, cnt1, cnt2, odd_reg, even_reg, result);
5036 
5037   // If strings are equal up to min length, return the length difference.
5038   const Register diff = result, // Pre-set result with length difference.
5039                  min  = cnt1,   // min number of bytes
5040                  tmp  = cnt2;
5041 
5042   // Note: Making use of the fact that compareTo(a, b) == -compareTo(b, a)
5043   // we interchange str1 and str2 in the UL case and negate the result.
5044   // Like this, str1 is always latin1 encoded, except for the UU case.
5045   // In addition, we need 0 (or sign which is 0) extend when using 64 bit register.
5046   const bool used_as_LU = (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL);
5047 
5048   BLOCK_COMMENT("string_compare {");
5049 
5050   if (used_as_LU) {
5051     z_srl(cnt2, 1);
5052   }
5053 
5054   // See if the lengths are different, and calculate min in cnt1.
5055   // Save diff in case we need it for a tie-breaker.
5056 
5057   // diff = cnt1 - cnt2
5058   if (VM_Version::has_DistinctOpnds()) {
5059     z_srk(diff, cnt1, cnt2);
5060   } else {
5061     z_lr(diff, cnt1);
5062     z_sr(diff, cnt2);
5063   }
5064   if (str1 != str2) {
5065     if (VM_Version::has_LoadStoreConditional()) {
5066       z_locr(min, cnt2, Assembler::bcondHigh);
5067     } else {
5068       Label Lskip;
5069       z_brl(Lskip);    // min ok if cnt1 < cnt2
5070       z_lr(min, cnt2); // min = cnt2
5071       bind(Lskip);
5072     }
5073   }
5074 
5075   if (ae == StrIntrinsicNode::UU) {
5076     z_sra(diff, 1);
5077   }
5078   if (str1 != str2) {
5079     Label Ldone;
5080     if (used_as_LU) {
5081       // Loop which searches the first difference character by character.
5082       Label Lloop;
5083       const Register ind1 = Z_R1,
5084                      ind2 = min;
5085       int stride1 = 1, stride2 = 2; // See comment above.
5086 
5087       // ind1: index, even_reg: index increment, odd_reg: index limit
5088       z_llilf(ind1, (unsigned int)(-stride1));
5089       z_lhi(even_reg, stride1);
5090       add2reg(odd_reg, -stride1, min);
5091       clear_reg(ind2); // kills min
5092 
5093       bind(Lloop);
5094       z_brxh(ind1, even_reg, Ldone);
5095       z_llc(tmp, Address(str1, ind1));
5096       z_llh(Z_R0, Address(str2, ind2));
5097       z_ahi(ind2, stride2);
5098       z_sr(tmp, Z_R0);
5099       z_bre(Lloop);
5100 
5101       z_lr(result, tmp);
5102 
5103     } else {
5104       // Use clcle in fast loop (only for same encoding).
5105       z_lgr(Z_R0, str1);
5106       z_lgr(even_reg, str2);
5107       z_llgfr(Z_R1, min);
5108       z_llgfr(odd_reg, min);
5109 
5110       if (ae == StrIntrinsicNode::LL) {
5111         compare_long_ext(Z_R0, even_reg, 0);
5112       } else {
5113         compare_long_uni(Z_R0, even_reg, 0);
5114       }
5115       z_bre(Ldone);
5116       z_lgr(Z_R1, Z_R0);
5117       if (ae == StrIntrinsicNode::LL) {
5118         z_llc(Z_R0, Address(even_reg));
5119         z_llc(result, Address(Z_R1));
5120       } else {
5121         z_llh(Z_R0, Address(even_reg));
5122         z_llh(result, Address(Z_R1));
5123       }
5124       z_sr(result, Z_R0);
5125     }
5126 
5127     // Otherwise, return the difference between the first mismatched chars.
5128     bind(Ldone);
5129   }
5130 
5131   if (ae == StrIntrinsicNode::UL) {
5132     z_lcr(result, result); // Negate result (see note above).
5133   }
5134 
5135   BLOCK_COMMENT("} string_compare");
5136 
5137   return offset() - block_start;
5138 }
5139 
5140 unsigned int MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2, Register limit,
5141                                           Register odd_reg, Register even_reg, Register result, bool is_byte) {
5142   int block_start = offset();
5143 
5144   BLOCK_COMMENT("array_equals {");
5145 
5146   assert_different_registers(ary1, limit, odd_reg, even_reg);
5147   assert_different_registers(ary2, limit, odd_reg, even_reg);
5148 
5149   Label Ldone, Ldone_true, Ldone_false, Lclcle, CLC_template;
5150   int base_offset = 0;
5151 
5152   if (ary1 != ary2) {
5153     if (is_array_equ) {
5154       base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR);
5155 
5156       // Return true if the same array.
5157       compareU64_and_branch(ary1, ary2, Assembler::bcondEqual, Ldone_true);
5158 
5159       // Return false if one of them is NULL.
5160       compareU64_and_branch(ary1, (intptr_t)0, Assembler::bcondEqual, Ldone_false);
5161       compareU64_and_branch(ary2, (intptr_t)0, Assembler::bcondEqual, Ldone_false);
5162 
5163       // Load the lengths of arrays.
5164       z_llgf(odd_reg, Address(ary1, arrayOopDesc::length_offset_in_bytes()));
5165 
5166       // Return false if the two arrays are not equal length.
5167       z_c(odd_reg, Address(ary2, arrayOopDesc::length_offset_in_bytes()));
5168       z_brne(Ldone_false);
5169 
5170       // string len in bytes (right operand)
5171       if (!is_byte) {
5172         z_chi(odd_reg, 128);
5173         z_sll(odd_reg, 1); // preserves flags
5174         z_brh(Lclcle);
5175       } else {
5176         compareU32_and_branch(odd_reg, (intptr_t)256, Assembler::bcondHigh, Lclcle);
5177       }
5178     } else {
5179       z_llgfr(odd_reg, limit); // Need to zero-extend prior to using the value.
5180       compareU32_and_branch(limit, (intptr_t)256, Assembler::bcondHigh, Lclcle);
5181     }
5182 
5183 
5184     // Use clc instruction for up to 256 bytes.
5185     {
5186       Register str1_reg = ary1,
5187           str2_reg = ary2;
5188       if (is_array_equ) {
5189         str1_reg = Z_R1;
5190         str2_reg = even_reg;
5191         add2reg(str1_reg, base_offset, ary1); // string addr (left operand)
5192         add2reg(str2_reg, base_offset, ary2); // string addr (right operand)
5193       }
5194       z_ahi(odd_reg, -1); // Clc uses decremented limit. Also compare result to 0.
5195       z_brl(Ldone_true);
5196       // Note: We could jump to the template if equal.
5197 
5198       assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware");
5199       z_exrl(odd_reg, CLC_template);
5200       z_bre(Ldone_true);
5201       // fall through
5202 
5203       bind(Ldone_false);
5204       clear_reg(result);
5205       z_bru(Ldone);
5206 
5207       bind(CLC_template);
5208       z_clc(0, 0, str1_reg, 0, str2_reg);
5209     }
5210 
5211     // Use clcle instruction.
5212     {
5213       bind(Lclcle);
5214       add2reg(even_reg, base_offset, ary2); // string addr (right operand)
5215       add2reg(Z_R0, base_offset, ary1);     // string addr (left operand)
5216 
5217       z_lgr(Z_R1, odd_reg); // string len in bytes (left operand)
5218       if (is_byte) {
5219         compare_long_ext(Z_R0, even_reg, 0);
5220       } else {
5221         compare_long_uni(Z_R0, even_reg, 0);
5222       }
5223       z_lghi(result, 0); // Preserve flags.
5224       z_brne(Ldone);
5225     }
5226   }
5227   // fall through
5228 
5229   bind(Ldone_true);
5230   z_lghi(result, 1); // All characters are equal.
5231   bind(Ldone);
5232 
5233   BLOCK_COMMENT("} array_equals");
5234 
5235   return offset() - block_start;
5236 }
5237 
5238 // kill: haycnt, needlecnt, odd_reg, even_reg; early clobber: result
5239 unsigned int MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt,
5240                                             Register needle, Register needlecnt, int needlecntval,
5241                                             Register odd_reg, Register even_reg, int ae) {
5242   int block_start = offset();
5243 
5244   // Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite!
5245   assert(ae != StrIntrinsicNode::LU, "Invalid encoding");
5246   const int h_csize = (ae == StrIntrinsicNode::LL) ? 1 : 2;
5247   const int n_csize = (ae == StrIntrinsicNode::UU) ? 2 : 1;
5248   Label L_needle1, L_Found, L_NotFound;
5249 
5250   BLOCK_COMMENT("string_indexof {");
5251 
5252   if (needle == haystack) {
5253     z_lhi(result, 0);
5254   } else {
5255 
5256   // Load first character of needle (R0 used by search_string instructions).
5257   if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); }
5258 
5259   // Compute last haystack addr to use if no match gets found.
5260   if (needlecnt != noreg) { // variable needlecnt
5261     z_ahi(needlecnt, -1); // Remaining characters after first one.
5262     z_sr(haycnt, needlecnt); // Compute index succeeding last element to compare.
5263     if (n_csize == 2) { z_sll(needlecnt, 1); } // In bytes.
5264   } else { // constant needlecnt
5265     assert((needlecntval & 0x7fff) == needlecntval, "must be positive simm16 immediate");
5266     // Compute index succeeding last element to compare.
5267     if (needlecntval != 1) { z_ahi(haycnt, 1 - needlecntval); }
5268   }
5269 
5270   z_llgfr(haycnt, haycnt); // Clear high half.
5271   z_lgr(result, haystack); // Final result will be computed from needle start pointer.
5272   if (h_csize == 2) { z_sll(haycnt, 1); } // Scale to number of bytes.
5273   z_agr(haycnt, haystack); // Point to address succeeding last element (haystack+scale*(haycnt-needlecnt+1)).
5274 
5275   if (h_csize != n_csize) {
5276     assert(ae == StrIntrinsicNode::UL, "Invalid encoding");
5277 
5278     if (needlecnt != noreg || needlecntval != 1) {
5279       if (needlecnt != noreg) {
5280         compare32_and_branch(needlecnt, (intptr_t)0, Assembler::bcondEqual, L_needle1);
5281       }
5282 
5283       // Main Loop: UL version (now we have at least 2 characters).
5284       Label L_OuterLoop, L_InnerLoop, L_Skip;
5285       bind(L_OuterLoop); // Search for 1st 2 characters.
5286       z_lgr(Z_R1, haycnt);
5287       MacroAssembler::search_string_uni(Z_R1, result);
5288       z_brc(Assembler::bcondNotFound, L_NotFound);
5289       z_lgr(result, Z_R1);
5290 
5291       z_lghi(Z_R1, n_csize);
5292       z_lghi(even_reg, h_csize);
5293       bind(L_InnerLoop);
5294       z_llgc(odd_reg, Address(needle, Z_R1));
5295       z_ch(odd_reg, Address(result, even_reg));
5296       z_brne(L_Skip);
5297       if (needlecnt != noreg) { z_cr(Z_R1, needlecnt); } else { z_chi(Z_R1, needlecntval - 1); }
5298       z_brnl(L_Found);
5299       z_aghi(Z_R1, n_csize);
5300       z_aghi(even_reg, h_csize);
5301       z_bru(L_InnerLoop);
5302 
5303       bind(L_Skip);
5304       z_aghi(result, h_csize); // This is the new address we want to use for comparing.
5305       z_bru(L_OuterLoop);
5306     }
5307 
5308   } else {
5309     const intptr_t needle_bytes = (n_csize == 2) ? ((needlecntval - 1) << 1) : (needlecntval - 1);
5310     Label L_clcle;
5311 
5312     if (needlecnt != noreg || (needlecntval != 1 && needle_bytes <= 256)) {
5313       if (needlecnt != noreg) {
5314         compare32_and_branch(needlecnt, 256, Assembler::bcondHigh, L_clcle);
5315         z_ahi(needlecnt, -1); // remaining bytes -1 (for CLC)
5316         z_brl(L_needle1);
5317       }
5318 
5319       // Main Loop: clc version (now we have at least 2 characters).
5320       Label L_OuterLoop, CLC_template;
5321       bind(L_OuterLoop); // Search for 1st 2 characters.
5322       z_lgr(Z_R1, haycnt);
5323       if (h_csize == 1) {
5324         MacroAssembler::search_string(Z_R1, result);
5325       } else {
5326         MacroAssembler::search_string_uni(Z_R1, result);
5327       }
5328       z_brc(Assembler::bcondNotFound, L_NotFound);
5329       z_lgr(result, Z_R1);
5330 
5331       if (needlecnt != noreg) {
5332         assert(VM_Version::has_ExecuteExtensions(), "unsupported hardware");
5333         z_exrl(needlecnt, CLC_template);
5334       } else {
5335         z_clc(h_csize, needle_bytes -1, Z_R1, n_csize, needle);
5336       }
5337       z_bre(L_Found);
5338       z_aghi(result, h_csize); // This is the new address we want to use for comparing.
5339       z_bru(L_OuterLoop);
5340 
5341       if (needlecnt != noreg) {
5342         bind(CLC_template);
5343         z_clc(h_csize, 0, Z_R1, n_csize, needle);
5344       }
5345     }
5346 
5347     if (needlecnt != noreg || needle_bytes > 256) {
5348       bind(L_clcle);
5349 
5350       // Main Loop: clcle version (now we have at least 256 bytes).
5351       Label L_OuterLoop, CLC_template;
5352       bind(L_OuterLoop); // Search for 1st 2 characters.
5353       z_lgr(Z_R1, haycnt);
5354       if (h_csize == 1) {
5355         MacroAssembler::search_string(Z_R1, result);
5356       } else {
5357         MacroAssembler::search_string_uni(Z_R1, result);
5358       }
5359       z_brc(Assembler::bcondNotFound, L_NotFound);
5360 
5361       add2reg(Z_R0, n_csize, needle);
5362       add2reg(even_reg, h_csize, Z_R1);
5363       z_lgr(result, Z_R1);
5364       if (needlecnt != noreg) {
5365         z_llgfr(Z_R1, needlecnt); // needle len in bytes (left operand)
5366         z_llgfr(odd_reg, needlecnt);
5367       } else {
5368         load_const_optimized(Z_R1, needle_bytes);
5369         if (Immediate::is_simm16(needle_bytes)) { z_lghi(odd_reg, needle_bytes); } else { z_lgr(odd_reg, Z_R1); }
5370       }
5371       if (h_csize == 1) {
5372         compare_long_ext(Z_R0, even_reg, 0);
5373       } else {
5374         compare_long_uni(Z_R0, even_reg, 0);
5375       }
5376       z_bre(L_Found);
5377 
5378       if (n_csize == 2) { z_llgh(Z_R0, Address(needle)); } else { z_llgc(Z_R0, Address(needle)); } // Reload.
5379       z_aghi(result, h_csize); // This is the new address we want to use for comparing.
5380       z_bru(L_OuterLoop);
5381     }
5382   }
5383 
5384   if (needlecnt != noreg || needlecntval == 1) {
5385     bind(L_needle1);
5386 
5387     // Single needle character version.
5388     if (h_csize == 1) {
5389       MacroAssembler::search_string(haycnt, result);
5390     } else {
5391       MacroAssembler::search_string_uni(haycnt, result);
5392     }
5393     z_lgr(result, haycnt);
5394     z_brc(Assembler::bcondFound, L_Found);
5395   }
5396 
5397   bind(L_NotFound);
5398   add2reg(result, -1, haystack); // Return -1.
5399 
5400   bind(L_Found); // Return index (or -1 in fallthrough case).
5401   z_sgr(result, haystack);
5402   if (h_csize == 2) { z_srag(result, result, exact_log2(sizeof(jchar))); }
5403   }
5404   BLOCK_COMMENT("} string_indexof");
5405 
5406   return offset() - block_start;
5407 }
5408 
5409 // early clobber: result
5410 unsigned int MacroAssembler::string_indexof_char(Register result, Register haystack, Register haycnt,
5411                                                  Register needle, jchar needleChar, Register odd_reg, Register even_reg, bool is_byte) {
5412   int block_start = offset();
5413 
5414   BLOCK_COMMENT("string_indexof_char {");
5415 
5416   if (needle == haystack) {
5417     z_lhi(result, 0);
5418   } else {
5419 
5420   Label Ldone;
5421 
5422   z_llgfr(odd_reg, haycnt);  // Preset loop ctr/searchrange end.
5423   if (needle == noreg) {
5424     load_const_optimized(Z_R0, (unsigned long)needleChar);
5425   } else {
5426     if (is_byte) {
5427       z_llgcr(Z_R0, needle); // First (and only) needle char.
5428     } else {
5429       z_llghr(Z_R0, needle); // First (and only) needle char.
5430     }
5431   }
5432 
5433   if (!is_byte) {
5434     z_agr(odd_reg, odd_reg); // Calc #bytes to be processed with SRSTU.
5435   }
5436 
5437   z_lgr(even_reg, haystack); // haystack addr
5438   z_agr(odd_reg, haystack);  // First char after range end.
5439   z_lghi(result, -1);
5440 
5441   if (is_byte) {
5442     MacroAssembler::search_string(odd_reg, even_reg);
5443   } else {
5444     MacroAssembler::search_string_uni(odd_reg, even_reg);
5445   }
5446   z_brc(Assembler::bcondNotFound, Ldone);
5447   if (is_byte) {
5448     if (VM_Version::has_DistinctOpnds()) {
5449       z_sgrk(result, odd_reg, haystack);
5450     } else {
5451       z_sgr(odd_reg, haystack);
5452       z_lgr(result, odd_reg);
5453     }
5454   } else {
5455     z_slgr(odd_reg, haystack);
5456     z_srlg(result, odd_reg, exact_log2(sizeof(jchar)));
5457   }
5458 
5459   bind(Ldone);
5460   }
5461   BLOCK_COMMENT("} string_indexof_char");
5462 
5463   return offset() - block_start;
5464 }
5465 
5466 
5467 //-------------------------------------------------
5468 //   Constants (scalar and oop) in constant pool
5469 //-------------------------------------------------
5470 
5471 // Add a non-relocated constant to the CP.
5472 int MacroAssembler::store_const_in_toc(AddressLiteral& val) {
5473   long    value  = val.value();
5474   address tocPos = long_constant(value);
5475 
5476   if (tocPos != NULL) {
5477     int tocOffset = (int)(tocPos - code()->consts()->start());
5478     return tocOffset;
5479   }
5480   // Address_constant returned NULL, so no constant entry has been created.
5481   // In that case, we return a "fatal" offset, just in case that subsequently
5482   // generated access code is executed.
5483   return -1;
5484 }
5485 
5486 // Returns the TOC offset where the address is stored.
5487 // Add a relocated constant to the CP.
5488 int MacroAssembler::store_oop_in_toc(AddressLiteral& oop) {
5489   // Use RelocationHolder::none for the constant pool entry.
5490   // Otherwise we will end up with a failing NativeCall::verify(x),
5491   // where x is the address of the constant pool entry.
5492   address tocPos = address_constant((address)oop.value(), RelocationHolder::none);
5493 
5494   if (tocPos != NULL) {
5495     int              tocOffset = (int)(tocPos - code()->consts()->start());
5496     RelocationHolder rsp = oop.rspec();
5497     Relocation      *rel = rsp.reloc();
5498 
5499     // Store toc_offset in relocation, used by call_far_patchable.
5500     if ((relocInfo::relocType)rel->type() == relocInfo::runtime_call_w_cp_type) {
5501       ((runtime_call_w_cp_Relocation *)(rel))->set_constant_pool_offset(tocOffset);
5502     }
5503     // Relocate at the load's pc.
5504     relocate(rsp);
5505 
5506     return tocOffset;
5507   }
5508   // Address_constant returned NULL, so no constant entry has been created
5509   // in that case, we return a "fatal" offset, just in case that subsequently
5510   // generated access code is executed.
5511   return -1;
5512 }
5513 
5514 bool MacroAssembler::load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc) {
5515   int     tocOffset = store_const_in_toc(a);
5516   if (tocOffset == -1) return false;
5517   address tocPos    = tocOffset + code()->consts()->start();
5518   assert((address)code()->consts()->start() != NULL, "Please add CP address");
5519 
5520   load_long_pcrelative(dst, tocPos);
5521   return true;
5522 }
5523 
5524 bool MacroAssembler::load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc) {
5525   int     tocOffset = store_oop_in_toc(a);
5526   if (tocOffset == -1) return false;
5527   address tocPos    = tocOffset + code()->consts()->start();
5528   assert((address)code()->consts()->start() != NULL, "Please add CP address");
5529 
5530   load_addr_pcrelative(dst, tocPos);
5531   return true;
5532 }
5533 
5534 // If the instruction sequence at the given pc is a load_const_from_toc
5535 // sequence, return the value currently stored at the referenced position
5536 // in the TOC.
5537 intptr_t MacroAssembler::get_const_from_toc(address pc) {
5538 
5539   assert(is_load_const_from_toc(pc), "must be load_const_from_pool");
5540 
5541   long    offset  = get_load_const_from_toc_offset(pc);
5542   address dataLoc = NULL;
5543   if (is_load_const_from_toc_pcrelative(pc)) {
5544     dataLoc = pc + offset;
5545   } else {
5546     CodeBlob* cb = CodeCache::find_blob_unsafe(pc);   // Else we get assertion if nmethod is zombie.
5547     assert(cb && cb->is_nmethod(), "sanity");
5548     nmethod* nm = (nmethod*)cb;
5549     dataLoc = nm->ctable_begin() + offset;
5550   }
5551   return *(intptr_t *)dataLoc;
5552 }
5553 
5554 // If the instruction sequence at the given pc is a load_const_from_toc
5555 // sequence, copy the passed-in new_data value into the referenced
5556 // position in the TOC.
5557 void MacroAssembler::set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb) {
5558   assert(is_load_const_from_toc(pc), "must be load_const_from_pool");
5559 
5560   long    offset = MacroAssembler::get_load_const_from_toc_offset(pc);
5561   address dataLoc = NULL;
5562   if (is_load_const_from_toc_pcrelative(pc)) {
5563     dataLoc = pc+offset;
5564   } else {
5565     nmethod* nm = CodeCache::find_nmethod(pc);
5566     assert((cb == NULL) || (nm == (nmethod*)cb), "instruction address should be in CodeBlob");
5567     dataLoc = nm->ctable_begin() + offset;
5568   }
5569   if (*(unsigned long *)dataLoc != new_data) { // Prevent cache invalidation: update only if necessary.
5570     *(unsigned long *)dataLoc = new_data;
5571   }
5572 }
5573 
5574 // Dynamic TOC. Getter must only be called if "a" is a load_const_from_toc
5575 // site. Verify by calling is_load_const_from_toc() before!!
5576 // Offset is +/- 2**32 -> use long.
5577 long MacroAssembler::get_load_const_from_toc_offset(address a) {
5578   assert(is_load_const_from_toc_pcrelative(a), "expected pc relative load");
5579   //  expected code sequence:
5580   //    z_lgrl(t, simm32);    len = 6
5581   unsigned long inst;
5582   unsigned int  len = get_instruction(a, &inst);
5583   return get_pcrel_offset(inst);
5584 }
5585 
5586 //**********************************************************************************
5587 //  inspection of generated instruction sequences for a particular pattern
5588 //**********************************************************************************
5589 
5590 bool MacroAssembler::is_load_const_from_toc_pcrelative(address a) {
5591 #ifdef ASSERT
5592   unsigned long inst;
5593   unsigned int  len = get_instruction(a+2, &inst);
5594   if ((len == 6) && is_load_pcrelative_long(a) && is_call_pcrelative_long(inst)) {
5595     const int range = 128;
5596     Assembler::dump_code_range(tty, a, range, "instr(a) == z_lgrl && instr(a+2) == z_brasl");
5597     VM_Version::z_SIGSEGV();
5598   }
5599 #endif
5600   // expected code sequence:
5601   //   z_lgrl(t, relAddr32);    len = 6
5602   //TODO: verify accessed data is in CP, if possible.
5603   return is_load_pcrelative_long(a);  // TODO: might be too general. Currently, only lgrl is used.
5604 }
5605 
5606 bool MacroAssembler::is_load_const_from_toc_call(address a) {
5607   return is_load_const_from_toc(a) && is_call_byregister(a + load_const_from_toc_size());
5608 }
5609 
5610 bool MacroAssembler::is_load_const_call(address a) {
5611   return is_load_const(a) && is_call_byregister(a + load_const_size());
5612 }
5613 
5614 //-------------------------------------------------
5615 //   Emitters for some really CICS instructions
5616 //-------------------------------------------------
5617 
5618 void MacroAssembler::move_long_ext(Register dst, Register src, unsigned int pad) {
5619   assert(dst->encoding()%2==0, "must be an even/odd register pair");
5620   assert(src->encoding()%2==0, "must be an even/odd register pair");
5621   assert(pad<256, "must be a padding BYTE");
5622 
5623   Label retry;
5624   bind(retry);
5625   Assembler::z_mvcle(dst, src, pad);
5626   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5627 }
5628 
5629 void MacroAssembler::compare_long_ext(Register left, Register right, unsigned int pad) {
5630   assert(left->encoding() % 2 == 0, "must be an even/odd register pair");
5631   assert(right->encoding() % 2 == 0, "must be an even/odd register pair");
5632   assert(pad<256, "must be a padding BYTE");
5633 
5634   Label retry;
5635   bind(retry);
5636   Assembler::z_clcle(left, right, pad, Z_R0);
5637   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5638 }
5639 
5640 void MacroAssembler::compare_long_uni(Register left, Register right, unsigned int pad) {
5641   assert(left->encoding() % 2 == 0, "must be an even/odd register pair");
5642   assert(right->encoding() % 2 == 0, "must be an even/odd register pair");
5643   assert(pad<=0xfff, "must be a padding HALFWORD");
5644   assert(VM_Version::has_ETF2(), "instruction must be available");
5645 
5646   Label retry;
5647   bind(retry);
5648   Assembler::z_clclu(left, right, pad, Z_R0);
5649   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5650 }
5651 
5652 void MacroAssembler::search_string(Register end, Register start) {
5653   assert(end->encoding() != 0, "end address must not be in R0");
5654   assert(start->encoding() != 0, "start address must not be in R0");
5655 
5656   Label retry;
5657   bind(retry);
5658   Assembler::z_srst(end, start);
5659   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5660 }
5661 
5662 void MacroAssembler::search_string_uni(Register end, Register start) {
5663   assert(end->encoding() != 0, "end address must not be in R0");
5664   assert(start->encoding() != 0, "start address must not be in R0");
5665   assert(VM_Version::has_ETF3(), "instruction must be available");
5666 
5667   Label retry;
5668   bind(retry);
5669   Assembler::z_srstu(end, start);
5670   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5671 }
5672 
5673 void MacroAssembler::kmac(Register srcBuff) {
5674   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
5675   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
5676 
5677   Label retry;
5678   bind(retry);
5679   Assembler::z_kmac(Z_R0, srcBuff);
5680   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5681 }
5682 
5683 void MacroAssembler::kimd(Register srcBuff) {
5684   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
5685   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
5686 
5687   Label retry;
5688   bind(retry);
5689   Assembler::z_kimd(Z_R0, srcBuff);
5690   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5691 }
5692 
5693 void MacroAssembler::klmd(Register srcBuff) {
5694   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
5695   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
5696 
5697   Label retry;
5698   bind(retry);
5699   Assembler::z_klmd(Z_R0, srcBuff);
5700   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5701 }
5702 
5703 void MacroAssembler::km(Register dstBuff, Register srcBuff) {
5704   // DstBuff and srcBuff are allowed to be the same register (encryption in-place).
5705   // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.
5706   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
5707   assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");
5708   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
5709 
5710   Label retry;
5711   bind(retry);
5712   Assembler::z_km(dstBuff, srcBuff);
5713   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5714 }
5715 
5716 void MacroAssembler::kmc(Register dstBuff, Register srcBuff) {
5717   // DstBuff and srcBuff are allowed to be the same register (encryption in-place).
5718   // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.
5719   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
5720   assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");
5721   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
5722 
5723   Label retry;
5724   bind(retry);
5725   Assembler::z_kmc(dstBuff, srcBuff);
5726   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5727 }
5728 
5729 void MacroAssembler::cksm(Register crcBuff, Register srcBuff) {
5730   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
5731 
5732   Label retry;
5733   bind(retry);
5734   Assembler::z_cksm(crcBuff, srcBuff);
5735   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5736 }
5737 
5738 void MacroAssembler::translate_oo(Register r1, Register r2, uint m3) {
5739   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
5740   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
5741 
5742   Label retry;
5743   bind(retry);
5744   Assembler::z_troo(r1, r2, m3);
5745   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5746 }
5747 
5748 void MacroAssembler::translate_ot(Register r1, Register r2, uint m3) {
5749   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
5750   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
5751 
5752   Label retry;
5753   bind(retry);
5754   Assembler::z_trot(r1, r2, m3);
5755   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5756 }
5757 
5758 void MacroAssembler::translate_to(Register r1, Register r2, uint m3) {
5759   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
5760   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
5761 
5762   Label retry;
5763   bind(retry);
5764   Assembler::z_trto(r1, r2, m3);
5765   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5766 }
5767 
5768 void MacroAssembler::translate_tt(Register r1, Register r2, uint m3) {
5769   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
5770   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
5771 
5772   Label retry;
5773   bind(retry);
5774   Assembler::z_trtt(r1, r2, m3);
5775   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
5776 }
5777 
5778 void MacroAssembler::generate_safepoint_check(Label& slow_path, Register scratch, bool may_relocate) {
5779   if (scratch == noreg) scratch = Z_R1;
5780   address Astate = SafepointSynchronize::address_of_state();
5781   BLOCK_COMMENT("safepoint check:");
5782 
5783   if (may_relocate) {
5784     ptrdiff_t total_distance = Astate - this->pc();
5785     if (RelAddr::is_in_range_of_RelAddr32(total_distance)) {
5786       RelocationHolder rspec = external_word_Relocation::spec(Astate);
5787       (this)->relocate(rspec, relocInfo::pcrel_addr_format);
5788       load_absolute_address(scratch, Astate);
5789     } else {
5790       load_const_optimized(scratch, Astate);
5791     }
5792   } else {
5793     load_absolute_address(scratch, Astate);
5794   }
5795   z_cli(/*SafepointSynchronize::sz_state()*/4-1, scratch, SafepointSynchronize::_not_synchronized);
5796   z_brne(slow_path);
5797 }
5798 
5799 
5800 void MacroAssembler::generate_type_profiling(const Register Rdata,
5801                                              const Register Rreceiver_klass,
5802                                              const Register Rwanted_receiver_klass,
5803                                              const Register Rmatching_row,
5804                                              bool is_virtual_call) {
5805   const int row_size = in_bytes(ReceiverTypeData::receiver_offset(1)) -
5806                        in_bytes(ReceiverTypeData::receiver_offset(0));
5807   const int num_rows = ReceiverTypeData::row_limit();
5808   NearLabel found_free_row;
5809   NearLabel do_increment;
5810   NearLabel found_no_slot;
5811 
5812   BLOCK_COMMENT("type profiling {");
5813 
5814   // search for:
5815   //    a) The type given in Rwanted_receiver_klass.
5816   //    b) The *first* empty row.
5817 
5818   // First search for a) only, just running over b) with no regard.
5819   // This is possible because
5820   //    wanted_receiver_class == receiver_class  &&  wanted_receiver_class == 0
5821   // is never true (receiver_class can't be zero).
5822   for (int row_num = 0; row_num < num_rows; row_num++) {
5823     // Row_offset should be a well-behaved positive number. The generated code relies
5824     // on that wrt constant code size. Add2reg can handle all row_offset values, but
5825     // will have to vary generated code size.
5826     int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num));
5827     assert(Displacement::is_shortDisp(row_offset), "Limitation of generated code");
5828 
5829     // Is Rwanted_receiver_klass in this row?
5830     if (VM_Version::has_CompareBranch()) {
5831       z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata);
5832       // Rmatching_row = Rdata + row_offset;
5833       add2reg(Rmatching_row, row_offset, Rdata);
5834       // if (*row_recv == (intptr_t) receiver_klass) goto fill_existing_slot;
5835       compare64_and_branch(Rwanted_receiver_klass, Rreceiver_klass, Assembler::bcondEqual, do_increment);
5836     } else {
5837       add2reg(Rmatching_row, row_offset, Rdata);
5838       z_cg(Rreceiver_klass, row_offset, Z_R0, Rdata);
5839       z_bre(do_increment);
5840     }
5841   }
5842 
5843   // Now that we did not find a match, let's search for b).
5844 
5845   // We could save the first calculation of Rmatching_row if we woud search for a) in reverse order.
5846   // We would then end up here with Rmatching_row containing the value for row_num == 0.
5847   // We would not see much benefit, if any at all, because the CPU can schedule
5848   // two instructions together with a branch anyway.
5849   for (int row_num = 0; row_num < num_rows; row_num++) {
5850     int row_offset = in_bytes(ReceiverTypeData::receiver_offset(row_num));
5851 
5852     // Has this row a zero receiver_klass, i.e. is it empty?
5853     if (VM_Version::has_CompareBranch()) {
5854       z_lg(Rwanted_receiver_klass, row_offset, Z_R0, Rdata);
5855       // Rmatching_row = Rdata + row_offset
5856       add2reg(Rmatching_row, row_offset, Rdata);
5857       // if (*row_recv == (intptr_t) 0) goto found_free_row
5858       compare64_and_branch(Rwanted_receiver_klass, (intptr_t)0, Assembler::bcondEqual, found_free_row);
5859     } else {
5860       add2reg(Rmatching_row, row_offset, Rdata);
5861       load_and_test_long(Rwanted_receiver_klass, Address(Rdata, row_offset));
5862       z_bre(found_free_row);  // zero -> Found a free row.
5863     }
5864   }
5865 
5866   // No match, no empty row found.
5867   // Increment total counter to indicate polymorphic case.
5868   if (is_virtual_call) {
5869     add2mem_64(Address(Rdata, CounterData::count_offset()), 1, Rmatching_row);
5870   }
5871   z_bru(found_no_slot);
5872 
5873   // Here we found an empty row, but we have not found Rwanted_receiver_klass.
5874   // Rmatching_row holds the address to the first empty row.
5875   bind(found_free_row);
5876   // Store receiver_klass into empty slot.
5877   z_stg(Rreceiver_klass, 0, Z_R0, Rmatching_row);
5878 
5879   // Increment the counter of Rmatching_row.
5880   bind(do_increment);
5881   ByteSize counter_offset = ReceiverTypeData::receiver_count_offset(0) - ReceiverTypeData::receiver_offset(0);
5882   add2mem_64(Address(Rmatching_row, counter_offset), 1, Rdata);
5883 
5884   bind(found_no_slot);
5885 
5886   BLOCK_COMMENT("} type profiling");
5887 }
5888 
5889 //---------------------------------------
5890 // Helpers for Intrinsic Emitters
5891 //---------------------------------------
5892 
5893 /**
5894  * uint32_t crc;
5895  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
5896  */
5897 void MacroAssembler::fold_byte_crc32(Register crc, Register val, Register table, Register tmp) {
5898   assert_different_registers(crc, table, tmp);
5899   assert_different_registers(val, table);
5900   if (crc == val) {      // Must rotate first to use the unmodified value.
5901     rotate_then_insert(tmp, val, 56-2, 63-2, 2, true);  // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
5902     z_srl(crc, 8);       // Unsigned shift, clear leftmost 8 bits.
5903   } else {
5904     z_srl(crc, 8);       // Unsigned shift, clear leftmost 8 bits.
5905     rotate_then_insert(tmp, val, 56-2, 63-2, 2, true);  // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
5906   }
5907   z_x(crc, Address(table, tmp, 0));
5908 }
5909 
5910 /**
5911  * uint32_t crc;
5912  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
5913  */
5914 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
5915   fold_byte_crc32(crc, crc, table, tmp);
5916 }
5917 
5918 /**
5919  * Emits code to update CRC-32 with a byte value according to constants in table.
5920  *
5921  * @param [in,out]crc Register containing the crc.
5922  * @param [in]val     Register containing the byte to fold into the CRC.
5923  * @param [in]table   Register containing the table of crc constants.
5924  *
5925  * uint32_t crc;
5926  * val = crc_table[(val ^ crc) & 0xFF];
5927  * crc = val ^ (crc >> 8);
5928  */
5929 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
5930   z_xr(val, crc);
5931   fold_byte_crc32(crc, val, table, val);
5932 }
5933 
5934 
5935 /**
5936  * @param crc   register containing existing CRC (32-bit)
5937  * @param buf   register pointing to input byte buffer (byte*)
5938  * @param len   register containing number of bytes
5939  * @param table register pointing to CRC table
5940  */
5941 void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, Register data) {
5942   assert_different_registers(crc, buf, len, table, data);
5943 
5944   Label L_mainLoop, L_done;
5945   const int mainLoop_stepping = 1;
5946 
5947   // Process all bytes in a single-byte loop.
5948   z_ltr(len, len);
5949   z_brnh(L_done);
5950 
5951   bind(L_mainLoop);
5952     z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
5953     add2reg(buf, mainLoop_stepping);        // Advance buffer position.
5954     update_byte_crc32(crc, data, table);
5955     z_brct(len, L_mainLoop);                // Iterate.
5956 
5957   bind(L_done);
5958 }
5959 
5960 /**
5961  * Emits code to update CRC-32 with a 4-byte value according to constants in table.
5962  * Implementation according to jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.c.
5963  *
5964  */
5965 void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
5966                                         Register t0,  Register t1,  Register t2,    Register t3) {
5967   // This is what we implement (the DOBIG4 part):
5968   //
5969   // #define DOBIG4 c ^= *++buf4; \
5970   //         c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \
5971   //             crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]
5972   // #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4
5973   // Pre-calculate (constant) column offsets, use columns 4..7 for big-endian.
5974   const int ix0 = 4*(4*CRC32_COLUMN_SIZE);
5975   const int ix1 = 5*(4*CRC32_COLUMN_SIZE);
5976   const int ix2 = 6*(4*CRC32_COLUMN_SIZE);
5977   const int ix3 = 7*(4*CRC32_COLUMN_SIZE);
5978 
5979   // XOR crc with next four bytes of buffer.
5980   lgr_if_needed(t0, crc);
5981   z_x(t0, Address(buf, bufDisp));
5982   if (bufInc != 0) {
5983     add2reg(buf, bufInc);
5984   }
5985 
5986   // Chop crc into 4 single-byte pieces, shifted left 2 bits, to form the table indices.
5987   rotate_then_insert(t3, t0, 56-2, 63-2, 2,    true);  // ((c >>  0) & 0xff) << 2
5988   rotate_then_insert(t2, t0, 56-2, 63-2, 2-8,  true);  // ((c >>  8) & 0xff) << 2
5989   rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true);  // ((c >> 16) & 0xff) << 2
5990   rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true);  // ((c >> 24) & 0xff) << 2
5991 
5992   // XOR indexed table values to calculate updated crc.
5993   z_ly(t2, Address(table, t2, (intptr_t)ix1));
5994   z_ly(t0, Address(table, t0, (intptr_t)ix3));
5995   z_xy(t2, Address(table, t3, (intptr_t)ix0));
5996   z_xy(t0, Address(table, t1, (intptr_t)ix2));
5997   z_xr(t0, t2);           // Now t0 contains the updated CRC value.
5998   lgr_if_needed(crc, t0);
5999 }
6000 
6001 /**
6002  * @param crc   register containing existing CRC (32-bit)
6003  * @param buf   register pointing to input byte buffer (byte*)
6004  * @param len   register containing number of bytes
6005  * @param table register pointing to CRC table
6006  *
6007  * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
6008  */
6009 void MacroAssembler::kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
6010                                         Register t0,  Register t1,  Register t2,  Register t3,
6011                                         bool invertCRC) {
6012   assert_different_registers(crc, buf, len, table);
6013 
6014   Label L_mainLoop, L_tail;
6015   Register  data = t0;
6016   Register  ctr  = Z_R0;
6017   const int mainLoop_stepping = 8;
6018   const int tailLoop_stepping = 1;
6019   const int log_stepping      = exact_log2(mainLoop_stepping);
6020 
6021   // Don't test for len <= 0 here. This pathological case should not occur anyway.
6022   // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
6023   // The situation itself is detected and handled correctly by the conditional branches
6024   // following aghi(len, -stepping) and aghi(len, +stepping).
6025 
6026   if (invertCRC) {
6027     not_(crc, noreg, false);           // 1s complement of crc
6028   }
6029 
6030 #if 0
6031   {
6032     // Pre-mainLoop alignment did not show any positive effect on performance.
6033     // We leave the code in for reference. Maybe the vector instructions in z13 depend on alignment.
6034 
6035     z_cghi(len, mainLoop_stepping);    // Alignment is useless for short data streams.
6036     z_brnh(L_tail);
6037 
6038     // Align buf to word (4-byte) boundary.
6039     z_lcr(ctr, buf);
6040     rotate_then_insert(ctr, ctr, 62, 63, 0, true); // TODO: should set cc
6041     z_sgfr(len, ctr);                  // Remaining len after alignment.
6042 
6043     update_byteLoop_crc32(crc, buf, ctr, table, data);
6044   }
6045 #endif
6046 
6047   // Check for short (<mainLoop_stepping bytes) buffer.
6048   z_srag(ctr, len, log_stepping);
6049   z_brnh(L_tail);
6050 
6051   z_lrvr(crc, crc);          // Revert byte order because we are dealing with big-endian data.
6052   rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop
6053 
6054   BIND(L_mainLoop);
6055     update_1word_crc32(crc, buf, table, 0, 0, crc, t1, t2, t3);
6056     update_1word_crc32(crc, buf, table, 4, mainLoop_stepping, crc, t1, t2, t3);
6057     z_brct(ctr, L_mainLoop); // Iterate.
6058 
6059   z_lrvr(crc, crc);          // Revert byte order back to original.
6060 
6061   // Process last few (<8) bytes of buffer.
6062   BIND(L_tail);
6063   update_byteLoop_crc32(crc, buf, len, table, data);
6064 
6065   if (invertCRC) {
6066     not_(crc, noreg, false);           // 1s complement of crc
6067   }
6068 }
6069 
6070 /**
6071  * @param crc   register containing existing CRC (32-bit)
6072  * @param buf   register pointing to input byte buffer (byte*)
6073  * @param len   register containing number of bytes
6074  * @param table register pointing to CRC table
6075  *
6076  * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
6077  */
6078 void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
6079                                         Register t0,  Register t1,  Register t2,  Register t3,
6080                                         bool invertCRC) {
6081   assert_different_registers(crc, buf, len, table);
6082 
6083   Label L_mainLoop, L_tail;
6084   Register  data = t0;
6085   Register  ctr  = Z_R0;
6086   const int mainLoop_stepping = 4;
6087   const int log_stepping      = exact_log2(mainLoop_stepping);
6088 
6089   // Don't test for len <= 0 here. This pathological case should not occur anyway.
6090   // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
6091   // The situation itself is detected and handled correctly by the conditional branches
6092   // following aghi(len, -stepping) and aghi(len, +stepping).
6093 
6094   if (invertCRC) {
6095     not_(crc, noreg, false);           // 1s complement of crc
6096   }
6097 
6098   // Check for short (<4 bytes) buffer.
6099   z_srag(ctr, len, log_stepping);
6100   z_brnh(L_tail);
6101 
6102   z_lrvr(crc, crc);          // Revert byte order because we are dealing with big-endian data.
6103   rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop
6104 
6105   BIND(L_mainLoop);
6106     update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3);
6107     z_brct(ctr, L_mainLoop); // Iterate.
6108 
6109   z_lrvr(crc, crc);          // Revert byte order back to original.
6110 
6111   // Process last few (<8) bytes of buffer.
6112   BIND(L_tail);
6113   update_byteLoop_crc32(crc, buf, len, table, data);
6114 
6115   if (invertCRC) {
6116     not_(crc, noreg, false);           // 1s complement of crc
6117   }
6118 }
6119 
6120 /**
6121  * @param crc   register containing existing CRC (32-bit)
6122  * @param buf   register pointing to input byte buffer (byte*)
6123  * @param len   register containing number of bytes
6124  * @param table register pointing to CRC table
6125  */
6126 void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
6127                                         Register t0,  Register t1,  Register t2,  Register t3,
6128                                         bool invertCRC) {
6129   assert_different_registers(crc, buf, len, table);
6130   Register data = t0;
6131 
6132   if (invertCRC) {
6133     not_(crc, noreg, false);           // 1s complement of crc
6134   }
6135 
6136   update_byteLoop_crc32(crc, buf, len, table, data);
6137 
6138   if (invertCRC) {
6139     not_(crc, noreg, false);           // 1s complement of crc
6140   }
6141 }
6142 
6143 void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
6144                                              bool invertCRC) {
6145   assert_different_registers(crc, buf, len, table, tmp);
6146 
6147   if (invertCRC) {
6148     not_(crc, noreg, false);           // 1s complement of crc
6149   }
6150 
6151   z_llgc(tmp, Address(buf, (intptr_t)0));  // Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
6152   update_byte_crc32(crc, tmp, table);
6153 
6154   if (invertCRC) {
6155     not_(crc, noreg, false);           // 1s complement of crc
6156   }
6157 }
6158 
6159 void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table,
6160                                                 bool invertCRC) {
6161   assert_different_registers(crc, val, table);
6162 
6163   if (invertCRC) {
6164     not_(crc, noreg, false);           // 1s complement of crc
6165   }
6166 
6167   update_byte_crc32(crc, val, table);
6168 
6169   if (invertCRC) {
6170     not_(crc, noreg, false);           // 1s complement of crc
6171   }
6172 }
6173 
6174 //
6175 // Code for BigInteger::multiplyToLen() intrinsic.
6176 //
6177 
6178 // dest_lo += src1 + src2
6179 // dest_hi += carry1 + carry2
6180 // Z_R7 is destroyed !
6181 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo,
6182                                      Register src1, Register src2) {
6183   clear_reg(Z_R7);
6184   z_algr(dest_lo, src1);
6185   z_alcgr(dest_hi, Z_R7);
6186   z_algr(dest_lo, src2);
6187   z_alcgr(dest_hi, Z_R7);
6188 }
6189 
6190 // Multiply 64 bit by 64 bit first loop.
6191 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart,
6192                                            Register x_xstart,
6193                                            Register y, Register y_idx,
6194                                            Register z,
6195                                            Register carry,
6196                                            Register product,
6197                                            Register idx, Register kdx) {
6198   // jlong carry, x[], y[], z[];
6199   // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) {
6200   //   huge_128 product = y[idx] * x[xstart] + carry;
6201   //   z[kdx] = (jlong)product;
6202   //   carry  = (jlong)(product >>> 64);
6203   // }
6204   // z[xstart] = carry;
6205 
6206   Label L_first_loop, L_first_loop_exit;
6207   Label L_one_x, L_one_y, L_multiply;
6208 
6209   z_aghi(xstart, -1);
6210   z_brl(L_one_x);   // Special case: length of x is 1.
6211 
6212   // Load next two integers of x.
6213   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
6214   mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0));
6215 
6216 
6217   bind(L_first_loop);
6218 
6219   z_aghi(idx, -1);
6220   z_brl(L_first_loop_exit);
6221   z_aghi(idx, -1);
6222   z_brl(L_one_y);
6223 
6224   // Load next two integers of y.
6225   z_sllg(Z_R1_scratch, idx, LogBytesPerInt);
6226   mem2reg_opt(y_idx, Address(y, Z_R1_scratch, 0));
6227 
6228 
6229   bind(L_multiply);
6230 
6231   Register multiplicand = product->successor();
6232   Register product_low = multiplicand;
6233 
6234   lgr_if_needed(multiplicand, x_xstart);
6235   z_mlgr(product, y_idx);     // multiplicand * y_idx -> product::multiplicand
6236   clear_reg(Z_R7);
6237   z_algr(product_low, carry); // Add carry to result.
6238   z_alcgr(product, Z_R7);     // Add carry of the last addition.
6239   add2reg(kdx, -2);
6240 
6241   // Store result.
6242   z_sllg(Z_R7, kdx, LogBytesPerInt);
6243   reg2mem_opt(product_low, Address(z, Z_R7, 0));
6244   lgr_if_needed(carry, product);
6245   z_bru(L_first_loop);
6246 
6247 
6248   bind(L_one_y); // Load one 32 bit portion of y as (0,value).
6249 
6250   clear_reg(y_idx);
6251   mem2reg_opt(y_idx, Address(y, (intptr_t) 0), false);
6252   z_bru(L_multiply);
6253 
6254 
6255   bind(L_one_x); // Load one 32 bit portion of x as (0,value).
6256 
6257   clear_reg(x_xstart);
6258   mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false);
6259   z_bru(L_first_loop);
6260 
6261   bind(L_first_loop_exit);
6262 }
6263 
6264 // Multiply 64 bit by 64 bit and add 128 bit.
6265 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y,
6266                                             Register z,
6267                                             Register yz_idx, Register idx,
6268                                             Register carry, Register product,
6269                                             int offset) {
6270   // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
6271   // z[kdx] = (jlong)product;
6272 
6273   Register multiplicand = product->successor();
6274   Register product_low = multiplicand;
6275 
6276   z_sllg(Z_R7, idx, LogBytesPerInt);
6277   mem2reg_opt(yz_idx, Address(y, Z_R7, offset));
6278 
6279   lgr_if_needed(multiplicand, x_xstart);
6280   z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand
6281   mem2reg_opt(yz_idx, Address(z, Z_R7, offset));
6282 
6283   add2_with_carry(product, product_low, carry, yz_idx);
6284 
6285   z_sllg(Z_R7, idx, LogBytesPerInt);
6286   reg2mem_opt(product_low, Address(z, Z_R7, offset));
6287 
6288 }
6289 
6290 // Multiply 128 bit by 128 bit. Unrolled inner loop.
6291 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart,
6292                                              Register y, Register z,
6293                                              Register yz_idx, Register idx,
6294                                              Register jdx,
6295                                              Register carry, Register product,
6296                                              Register carry2) {
6297   // jlong carry, x[], y[], z[];
6298   // int kdx = ystart+1;
6299   // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
6300   //   huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
6301   //   z[kdx+idx+1] = (jlong)product;
6302   //   jlong carry2 = (jlong)(product >>> 64);
6303   //   product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
6304   //   z[kdx+idx] = (jlong)product;
6305   //   carry = (jlong)(product >>> 64);
6306   // }
6307   // idx += 2;
6308   // if (idx > 0) {
6309   //   product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
6310   //   z[kdx+idx] = (jlong)product;
6311   //   carry = (jlong)(product >>> 64);
6312   // }
6313 
6314   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
6315 
6316   // scale the index
6317   lgr_if_needed(jdx, idx);
6318   and_imm(jdx, 0xfffffffffffffffcL);
6319   rshift(jdx, 2);
6320 
6321 
6322   bind(L_third_loop);
6323 
6324   z_aghi(jdx, -1);
6325   z_brl(L_third_loop_exit);
6326   add2reg(idx, -4);
6327 
6328   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
6329   lgr_if_needed(carry2, product);
6330 
6331   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
6332   lgr_if_needed(carry, product);
6333   z_bru(L_third_loop);
6334 
6335 
6336   bind(L_third_loop_exit);  // Handle any left-over operand parts.
6337 
6338   and_imm(idx, 0x3);
6339   z_brz(L_post_third_loop_done);
6340 
6341   Label L_check_1;
6342 
6343   z_aghi(idx, -2);
6344   z_brl(L_check_1);
6345 
6346   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
6347   lgr_if_needed(carry, product);
6348 
6349 
6350   bind(L_check_1);
6351 
6352   add2reg(idx, 0x2);
6353   and_imm(idx, 0x1);
6354   z_aghi(idx, -1);
6355   z_brl(L_post_third_loop_done);
6356 
6357   Register   multiplicand = product->successor();
6358   Register   product_low = multiplicand;
6359 
6360   z_sllg(Z_R7, idx, LogBytesPerInt);
6361   clear_reg(yz_idx);
6362   mem2reg_opt(yz_idx, Address(y, Z_R7, 0), false);
6363   lgr_if_needed(multiplicand, x_xstart);
6364   z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand
6365   clear_reg(yz_idx);
6366   mem2reg_opt(yz_idx, Address(z, Z_R7, 0), false);
6367 
6368   add2_with_carry(product, product_low, yz_idx, carry);
6369 
6370   z_sllg(Z_R7, idx, LogBytesPerInt);
6371   reg2mem_opt(product_low, Address(z, Z_R7, 0), false);
6372   rshift(product_low, 32);
6373 
6374   lshift(product, 32);
6375   z_ogr(product_low, product);
6376   lgr_if_needed(carry, product_low);
6377 
6378   bind(L_post_third_loop_done);
6379 }
6380 
6381 void MacroAssembler::multiply_to_len(Register x, Register xlen,
6382                                      Register y, Register ylen,
6383                                      Register z,
6384                                      Register tmp1, Register tmp2,
6385                                      Register tmp3, Register tmp4,
6386                                      Register tmp5) {
6387   ShortBranchVerifier sbv(this);
6388 
6389   assert_different_registers(x, xlen, y, ylen, z,
6390                              tmp1, tmp2, tmp3, tmp4, tmp5, Z_R1_scratch, Z_R7);
6391   assert_different_registers(x, xlen, y, ylen, z,
6392                              tmp1, tmp2, tmp3, tmp4, tmp5, Z_R8);
6393 
6394   z_stmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);
6395 
6396   // In openJdk, we store the argument as 32-bit value to slot.
6397   Address zlen(Z_SP, _z_abi(remaining_cargs));  // Int in long on big endian.
6398 
6399   const Register idx = tmp1;
6400   const Register kdx = tmp2;
6401   const Register xstart = tmp3;
6402 
6403   const Register y_idx = tmp4;
6404   const Register carry = tmp5;
6405   const Register product  = Z_R0_scratch;
6406   const Register x_xstart = Z_R8;
6407 
6408   // First Loop.
6409   //
6410   //   final static long LONG_MASK = 0xffffffffL;
6411   //   int xstart = xlen - 1;
6412   //   int ystart = ylen - 1;
6413   //   long carry = 0;
6414   //   for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
6415   //     long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
6416   //     z[kdx] = (int)product;
6417   //     carry = product >>> 32;
6418   //   }
6419   //   z[xstart] = (int)carry;
6420   //
6421 
6422   lgr_if_needed(idx, ylen);  // idx = ylen
6423   z_llgf(kdx, zlen);         // C2 does not respect int to long conversion for stub calls, thus load zero-extended.
6424   clear_reg(carry);          // carry = 0
6425 
6426   Label L_done;
6427 
6428   lgr_if_needed(xstart, xlen);
6429   z_aghi(xstart, -1);
6430   z_brl(L_done);
6431 
6432   multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
6433 
6434   NearLabel L_second_loop;
6435   compare64_and_branch(kdx, RegisterOrConstant((intptr_t) 0), bcondEqual, L_second_loop);
6436 
6437   NearLabel L_carry;
6438   z_aghi(kdx, -1);
6439   z_brz(L_carry);
6440 
6441   // Store lower 32 bits of carry.
6442   z_sllg(Z_R1_scratch, kdx, LogBytesPerInt);
6443   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
6444   rshift(carry, 32);
6445   z_aghi(kdx, -1);
6446 
6447 
6448   bind(L_carry);
6449 
6450   // Store upper 32 bits of carry.
6451   z_sllg(Z_R1_scratch, kdx, LogBytesPerInt);
6452   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
6453 
6454   // Second and third (nested) loops.
6455   //
6456   // for (int i = xstart-1; i >= 0; i--) { // Second loop
6457   //   carry = 0;
6458   //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
6459   //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
6460   //                    (z[k] & LONG_MASK) + carry;
6461   //     z[k] = (int)product;
6462   //     carry = product >>> 32;
6463   //   }
6464   //   z[i] = (int)carry;
6465   // }
6466   //
6467   // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
6468 
6469   const Register jdx = tmp1;
6470 
6471   bind(L_second_loop);
6472 
6473   clear_reg(carry);           // carry = 0;
6474   lgr_if_needed(jdx, ylen);   // j = ystart+1
6475 
6476   z_aghi(xstart, -1);         // i = xstart-1;
6477   z_brl(L_done);
6478 
6479   // Use free slots in the current stackframe instead of push/pop.
6480   Address zsave(Z_SP, _z_abi(carg_1));
6481   reg2mem_opt(z, zsave);
6482 
6483 
6484   Label L_last_x;
6485 
6486   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
6487   load_address(z, Address(z, Z_R1_scratch, 4)); // z = z + k - j
6488   z_aghi(xstart, -1);                           // i = xstart-1;
6489   z_brl(L_last_x);
6490 
6491   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
6492   mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0));
6493 
6494 
6495   Label L_third_loop_prologue;
6496 
6497   bind(L_third_loop_prologue);
6498 
6499   Address xsave(Z_SP, _z_abi(carg_2));
6500   Address xlensave(Z_SP, _z_abi(carg_3));
6501   Address ylensave(Z_SP, _z_abi(carg_4));
6502 
6503   reg2mem_opt(x, xsave);
6504   reg2mem_opt(xstart, xlensave);
6505   reg2mem_opt(ylen, ylensave);
6506 
6507 
6508   multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
6509 
6510   mem2reg_opt(z, zsave);
6511   mem2reg_opt(x, xsave);
6512   mem2reg_opt(xlen, xlensave);   // This is the decrement of the loop counter!
6513   mem2reg_opt(ylen, ylensave);
6514 
6515   add2reg(tmp3, 1, xlen);
6516   z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt);
6517   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
6518   z_aghi(tmp3, -1);
6519   z_brl(L_done);
6520 
6521   rshift(carry, 32);
6522   z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt);
6523   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
6524   z_bru(L_second_loop);
6525 
6526   // Next infrequent code is moved outside loops.
6527   bind(L_last_x);
6528 
6529   clear_reg(x_xstart);
6530   mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false);
6531   z_bru(L_third_loop_prologue);
6532 
6533   bind(L_done);
6534 
6535   z_lmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);
6536 }
6537 
6538 #ifndef PRODUCT
6539 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false).
6540 void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) {
6541   Label ok;
6542   if (check_equal) {
6543     z_bre(ok);
6544   } else {
6545     z_brne(ok);
6546   }
6547   stop(msg, id);
6548   bind(ok);
6549 }
6550 
6551 // Assert if CC indicates "low".
6552 void MacroAssembler::asm_assert_low(const char *msg, int id) {
6553   Label ok;
6554   z_brnl(ok);
6555   stop(msg, id);
6556   bind(ok);
6557 }
6558 
6559 // Assert if CC indicates "high".
6560 void MacroAssembler::asm_assert_high(const char *msg, int id) {
6561   Label ok;
6562   z_brnh(ok);
6563   stop(msg, id);
6564   bind(ok);
6565 }
6566 
6567 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false)
6568 // generate non-relocatable code.
6569 void MacroAssembler::asm_assert_static(bool check_equal, const char *msg, int id) {
6570   Label ok;
6571   if (check_equal) { z_bre(ok); }
6572   else             { z_brne(ok); }
6573   stop_static(msg, id);
6574   bind(ok);
6575 }
6576 
6577 void MacroAssembler::asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset,
6578                                           Register mem_base, const char* msg, int id) {
6579   switch (size) {
6580     case 4:
6581       load_and_test_int(Z_R0, Address(mem_base, mem_offset));
6582       break;
6583     case 8:
6584       load_and_test_long(Z_R0,  Address(mem_base, mem_offset));
6585       break;
6586     default:
6587       ShouldNotReachHere();
6588   }
6589   if (allow_relocation) { asm_assert(check_equal, msg, id); }
6590   else                  { asm_assert_static(check_equal, msg, id); }
6591 }
6592 
6593 // Check the condition
6594 //   expected_size == FP - SP
6595 // after transformation:
6596 //   expected_size - FP + SP == 0
6597 // Destroys Register expected_size if no tmp register is passed.
6598 void MacroAssembler::asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) {
6599   if (tmp == noreg) {
6600     tmp = expected_size;
6601   } else {
6602     if (tmp != expected_size) {
6603       z_lgr(tmp, expected_size);
6604     }
6605     z_algr(tmp, Z_SP);
6606     z_slg(tmp, 0, Z_R0, Z_SP);
6607     asm_assert_eq(msg, id);
6608   }
6609 }
6610 #endif // !PRODUCT
6611 
6612 void MacroAssembler::verify_thread() {
6613   if (VerifyThread) {
6614     unimplemented("", 117);
6615   }
6616 }
6617 
6618 // Plausibility check for oops.
6619 void MacroAssembler::verify_oop(Register oop, const char* msg) {
6620   if (!VerifyOops) return;
6621 
6622   BLOCK_COMMENT("verify_oop {");
6623   Register tmp = Z_R0;
6624   unsigned int nbytes_save = 6 *8;
6625   address entry = StubRoutines::verify_oop_subroutine_entry_address();
6626   save_return_pc();
6627   push_frame_abi160(nbytes_save);
6628   z_stmg(Z_R0, Z_R5, 160, Z_SP);
6629 
6630   z_lgr(Z_ARG2, oop);
6631   load_const(Z_ARG1, (address) msg);
6632   load_const(Z_R1, entry);
6633   z_lg(Z_R1, 0, Z_R1);
6634   call_c(Z_R1);
6635 
6636   z_lmg(Z_R0, Z_R5, 160, Z_SP);
6637   pop_frame();
6638 
6639   restore_return_pc();
6640   BLOCK_COMMENT("} verify_oop ");
6641 }
6642 
6643 const char* MacroAssembler::stop_types[] = {
6644   "stop",
6645   "untested",
6646   "unimplemented",
6647   "shouldnotreachhere"
6648 };
6649 
6650 static void stop_on_request(const char* tp, const char* msg) {
6651   tty->print("Z assembly code requires stop: (%s) %s\n", tp, msg);
6652   guarantee(false, "Z assembly code requires stop: %s", msg);
6653 }
6654 
6655 void MacroAssembler::stop(int type, const char* msg, int id) {
6656   BLOCK_COMMENT(err_msg("stop: %s {", msg));
6657 
6658   // Setup arguments.
6659   load_const(Z_ARG1, (void*) stop_types[type%stop_end]);
6660   load_const(Z_ARG2, (void*) msg);
6661   get_PC(Z_R14); // Following code pushes a frame without entering a new function. Use current pc as return address.
6662   save_return_pc();    // Saves return pc Z_R14.
6663   push_frame_abi160(0);
6664   call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
6665   // The plain disassembler does not recognize illtrap. It instead displays
6666   // a 32-bit value. Issueing two illtraps assures the disassembler finds
6667   // the proper beginning of the next instruction.
6668   z_illtrap(); // Illegal instruction.
6669   z_illtrap(); // Illegal instruction.
6670 
6671   BLOCK_COMMENT(" } stop");
6672 }
6673 
6674 // Special version of stop() for code size reduction.
6675 // Reuses the previously generated call sequence, if any.
6676 // Generates the call sequence on its own, if necessary.
6677 // Note: This code will work only in non-relocatable code!
6678 //       The relative address of the data elements (arg1, arg2) must not change.
6679 //       The reentry point must not move relative to it's users. This prerequisite
6680 //       should be given for "hand-written" code, if all chain calls are in the same code blob.
6681 //       Generated code must not undergo any transformation, e.g. ShortenBranches, to be safe.
6682 address MacroAssembler::stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation) {
6683   BLOCK_COMMENT(err_msg("stop_chain(%s,%s): %s {", reentry==NULL?"init":"cont", allow_relocation?"reloc ":"static", msg));
6684 
6685   // Setup arguments.
6686   if (allow_relocation) {
6687     // Relocatable version (for comparison purposes). Remove after some time.
6688     load_const(Z_ARG1, (void*) stop_types[type%stop_end]);
6689     load_const(Z_ARG2, (void*) msg);
6690   } else {
6691     load_absolute_address(Z_ARG1, (address)stop_types[type%stop_end]);
6692     load_absolute_address(Z_ARG2, (address)msg);
6693   }
6694   if ((reentry != NULL) && RelAddr::is_in_range_of_RelAddr16(reentry, pc())) {
6695     BLOCK_COMMENT("branch to reentry point:");
6696     z_brc(bcondAlways, reentry);
6697   } else {
6698     BLOCK_COMMENT("reentry point:");
6699     reentry = pc();      // Re-entry point for subsequent stop calls.
6700     save_return_pc();    // Saves return pc Z_R14.
6701     push_frame_abi160(0);
6702     if (allow_relocation) {
6703       reentry = NULL;    // Prevent reentry if code relocation is allowed.
6704       call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
6705     } else {
6706       call_VM_leaf_static(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
6707     }
6708     z_illtrap(); // Illegal instruction as emergency stop, should the above call return.
6709   }
6710   BLOCK_COMMENT(" } stop_chain");
6711 
6712   return reentry;
6713 }
6714 
6715 // Special version of stop() for code size reduction.
6716 // Assumes constant relative addresses for data and runtime call.
6717 void MacroAssembler::stop_static(int type, const char* msg, int id) {
6718   stop_chain(NULL, type, msg, id, false);
6719 }
6720 
6721 void MacroAssembler::stop_subroutine() {
6722   unimplemented("stop_subroutine", 710);
6723 }
6724 
6725 // Prints msg to stdout from within generated code..
6726 void MacroAssembler::warn(const char* msg) {
6727   RegisterSaver::save_live_registers(this, RegisterSaver::all_registers, Z_R14);
6728   load_absolute_address(Z_R1, (address) warning);
6729   load_absolute_address(Z_ARG1, (address) msg);
6730   (void) call(Z_R1);
6731   RegisterSaver::restore_live_registers(this, RegisterSaver::all_registers);
6732 }
6733 
6734 #ifndef PRODUCT
6735 
6736 // Write pattern 0x0101010101010101 in region [low-before, high+after].
6737 void MacroAssembler::zap_from_to(Register low, Register high, Register val, Register addr, int before, int after) {
6738   if (!ZapEmptyStackFields) return;
6739   BLOCK_COMMENT("zap memory region {");
6740   load_const_optimized(val, 0x0101010101010101);
6741   int size = before + after;
6742   if (low == high && size < 5 && size > 0) {
6743     int offset = -before*BytesPerWord;
6744     for (int i = 0; i < size; ++i) {
6745       z_stg(val, Address(low, offset));
6746       offset +=(1*BytesPerWord);
6747     }
6748   } else {
6749     add2reg(addr, -before*BytesPerWord, low);
6750     if (after) {
6751 #ifdef ASSERT
6752       jlong check = after * BytesPerWord;
6753       assert(Immediate::is_simm32(check) && Immediate::is_simm32(-check), "value not encodable !");
6754 #endif
6755       add2reg(high, after * BytesPerWord);
6756     }
6757     NearLabel loop;
6758     bind(loop);
6759     z_stg(val, Address(addr));
6760     add2reg(addr, 8);
6761     compare64_and_branch(addr, high, bcondNotHigh, loop);
6762     if (after) {
6763       add2reg(high, -after * BytesPerWord);
6764     }
6765   }
6766   BLOCK_COMMENT("} zap memory region");
6767 }
6768 #endif // !PRODUCT
6769 
6770 SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value, Register _rscratch) {
6771   _masm = masm;
6772   _masm->load_absolute_address(_rscratch, (address)flag_addr);
6773   _masm->load_and_test_int(_rscratch, Address(_rscratch));
6774   if (value) {
6775     _masm->z_brne(_label); // Skip if true, i.e. != 0.
6776   } else {
6777     _masm->z_bre(_label);  // Skip if false, i.e. == 0.
6778   }
6779 }
6780 
6781 SkipIfEqual::~SkipIfEqual() {
6782   _masm->bind(_label);
6783 }