1 /*
   2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2012, 2017 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "compiler/disassembler.hpp"
  29 #include "gc_interface/collectedHeap.inline.hpp"
  30 #include "interpreter/interpreter.hpp"
  31 #include "memory/cardTableModRefBS.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "prims/methodHandles.hpp"
  34 #include "runtime/biasedLocking.hpp"
  35 #include "runtime/interfaceSupport.hpp"
  36 #include "runtime/objectMonitor.hpp"
  37 #include "runtime/os.hpp"
  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "utilities/macros.hpp"
  41 #if INCLUDE_ALL_GCS
  42 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  43 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
  44 #include "gc_implementation/g1/heapRegion.hpp"
  45 #endif // INCLUDE_ALL_GCS
  46 
  47 #ifdef PRODUCT
  48 #define BLOCK_COMMENT(str) // nothing
  49 #else
  50 #define BLOCK_COMMENT(str) block_comment(str)
  51 #endif
  52 
  53 #ifdef ASSERT
  54 // On RISC, there's no benefit to verifying instruction boundaries.
  55 bool AbstractAssembler::pd_check_instruction_mark() { return false; }
  56 #endif
  57 
  58 void MacroAssembler::ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop) {
  59   assert(Assembler::is_simm(si31, 31) && si31 >= 0, "si31 out of range");
  60   if (Assembler::is_simm(si31, 16)) {
  61     ld(d, si31, a);
  62     if (emit_filler_nop) nop();
  63   } else {
  64     const int hi = MacroAssembler::largeoffset_si16_si16_hi(si31);
  65     const int lo = MacroAssembler::largeoffset_si16_si16_lo(si31);
  66     addis(d, a, hi);
  67     ld(d, lo, d);
  68   }
  69 }
  70 
  71 void MacroAssembler::ld_largeoffset(Register d, int si31, Register a, int emit_filler_nop) {
  72   assert_different_registers(d, a);
  73   ld_largeoffset_unchecked(d, si31, a, emit_filler_nop);
  74 }
  75 
  76 void MacroAssembler::load_sized_value(Register dst, RegisterOrConstant offs, Register base,
  77                                       size_t size_in_bytes, bool is_signed) {
  78   switch (size_in_bytes) {
  79   case  8:              ld(dst, offs, base);                         break;
  80   case  4:  is_signed ? lwa(dst, offs, base) : lwz(dst, offs, base); break;
  81   case  2:  is_signed ? lha(dst, offs, base) : lhz(dst, offs, base); break;
  82   case  1:  lbz(dst, offs, base); if (is_signed) extsb(dst, dst);    break; // lba doesn't exist :(
  83   default:  ShouldNotReachHere();
  84   }
  85 }
  86 
  87 void MacroAssembler::store_sized_value(Register dst, RegisterOrConstant offs, Register base,
  88                                        size_t size_in_bytes) {
  89   switch (size_in_bytes) {
  90   case  8:  std(dst, offs, base); break;
  91   case  4:  stw(dst, offs, base); break;
  92   case  2:  sth(dst, offs, base); break;
  93   case  1:  stb(dst, offs, base); break;
  94   default:  ShouldNotReachHere();
  95   }
  96 }
  97 
  98 void MacroAssembler::align(int modulus, int max, int rem) {
  99   int padding = (rem + modulus - (offset() % modulus)) % modulus;
 100   if (padding > max) return;
 101   for (int c = (padding >> 2); c > 0; --c) { nop(); }
 102 }
 103 
 104 // Issue instructions that calculate given TOC from global TOC.
 105 void MacroAssembler::calculate_address_from_global_toc(Register dst, address addr, bool hi16, bool lo16,
 106                                                        bool add_relocation, bool emit_dummy_addr) {
 107   int offset = -1;
 108   if (emit_dummy_addr) {
 109     offset = -128; // dummy address
 110   } else if (addr != (address)(intptr_t)-1) {
 111     offset = MacroAssembler::offset_to_global_toc(addr);
 112   }
 113 
 114   if (hi16) {
 115     addis(dst, R29, MacroAssembler::largeoffset_si16_si16_hi(offset));
 116   }
 117   if (lo16) {
 118     if (add_relocation) {
 119       // Relocate at the addi to avoid confusion with a load from the method's TOC.
 120       relocate(internal_word_Relocation::spec(addr));
 121     }
 122     addi(dst, dst, MacroAssembler::largeoffset_si16_si16_lo(offset));
 123   }
 124 }
 125 
 126 int MacroAssembler::patch_calculate_address_from_global_toc_at(address a, address bound, address addr) {
 127   const int offset = MacroAssembler::offset_to_global_toc(addr);
 128 
 129   const address inst2_addr = a;
 130   const int inst2 = *(int *)inst2_addr;
 131 
 132   // The relocation points to the second instruction, the addi,
 133   // and the addi reads and writes the same register dst.
 134   const int dst = inv_rt_field(inst2);
 135   assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst");
 136 
 137   // Now, find the preceding addis which writes to dst.
 138   int inst1 = 0;
 139   address inst1_addr = inst2_addr - BytesPerInstWord;
 140   while (inst1_addr >= bound) {
 141     inst1 = *(int *) inst1_addr;
 142     if (is_addis(inst1) && inv_rt_field(inst1) == dst) {
 143       // Stop, found the addis which writes dst.
 144       break;
 145     }
 146     inst1_addr -= BytesPerInstWord;
 147   }
 148 
 149   assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC");
 150   set_imm((int *)inst1_addr, MacroAssembler::largeoffset_si16_si16_hi(offset));
 151   set_imm((int *)inst2_addr, MacroAssembler::largeoffset_si16_si16_lo(offset));
 152   return (int)((intptr_t)addr - (intptr_t)inst1_addr);
 153 }
 154 
 155 address MacroAssembler::get_address_of_calculate_address_from_global_toc_at(address a, address bound) {
 156   const address inst2_addr = a;
 157   const int inst2 = *(int *)inst2_addr;
 158 
 159   // The relocation points to the second instruction, the addi,
 160   // and the addi reads and writes the same register dst.
 161   const int dst = inv_rt_field(inst2);
 162   assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst");
 163 
 164   // Now, find the preceding addis which writes to dst.
 165   int inst1 = 0;
 166   address inst1_addr = inst2_addr - BytesPerInstWord;
 167   while (inst1_addr >= bound) {
 168     inst1 = *(int *) inst1_addr;
 169     if (is_addis(inst1) && inv_rt_field(inst1) == dst) {
 170       // stop, found the addis which writes dst
 171       break;
 172     }
 173     inst1_addr -= BytesPerInstWord;
 174   }
 175 
 176   assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC");
 177 
 178   int offset = (get_imm(inst1_addr, 0) << 16) + get_imm(inst2_addr, 0);
 179   // -1 is a special case
 180   if (offset == -1) {
 181     return (address)(intptr_t)-1;
 182   } else {
 183     return global_toc() + offset;
 184   }
 185 }
 186 
 187 #ifdef _LP64
 188 // Patch compressed oops or klass constants.
 189 // Assembler sequence is
 190 // 1) compressed oops:
 191 //    lis  rx = const.hi
 192 //    ori rx = rx | const.lo
 193 // 2) compressed klass:
 194 //    lis  rx = const.hi
 195 //    clrldi rx = rx & 0xFFFFffff // clearMS32b, optional
 196 //    ori rx = rx | const.lo
 197 // Clrldi will be passed by.
 198 int MacroAssembler::patch_set_narrow_oop(address a, address bound, narrowOop data) {
 199   assert(UseCompressedOops, "Should only patch compressed oops");
 200 
 201   const address inst2_addr = a;
 202   const int inst2 = *(int *)inst2_addr;
 203 
 204   // The relocation points to the second instruction, the ori,
 205   // and the ori reads and writes the same register dst.
 206   const int dst = inv_rta_field(inst2);
 207   assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be ori reading and writing dst");
 208   // Now, find the preceding addis which writes to dst.
 209   int inst1 = 0;
 210   address inst1_addr = inst2_addr - BytesPerInstWord;
 211   bool inst1_found = false;
 212   while (inst1_addr >= bound) {
 213     inst1 = *(int *)inst1_addr;
 214     if (is_lis(inst1) && inv_rs_field(inst1) == dst) { inst1_found = true; break; }
 215     inst1_addr -= BytesPerInstWord;
 216   }
 217   assert(inst1_found, "inst is not lis");
 218 
 219   int xc = (data >> 16) & 0xffff;
 220   int xd = (data >>  0) & 0xffff;
 221 
 222   set_imm((int *)inst1_addr, (short)(xc)); // see enc_load_con_narrow_hi/_lo
 223   set_imm((int *)inst2_addr,        (xd)); // unsigned int
 224   return (int)((intptr_t)inst2_addr - (intptr_t)inst1_addr);
 225 }
 226 
 227 // Get compressed oop or klass constant.
 228 narrowOop MacroAssembler::get_narrow_oop(address a, address bound) {
 229   assert(UseCompressedOops, "Should only patch compressed oops");
 230 
 231   const address inst2_addr = a;
 232   const int inst2 = *(int *)inst2_addr;
 233 
 234   // The relocation points to the second instruction, the ori,
 235   // and the ori reads and writes the same register dst.
 236   const int dst = inv_rta_field(inst2);
 237   assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be ori reading and writing dst");
 238   // Now, find the preceding lis which writes to dst.
 239   int inst1 = 0;
 240   address inst1_addr = inst2_addr - BytesPerInstWord;
 241   bool inst1_found = false;
 242 
 243   while (inst1_addr >= bound) {
 244     inst1 = *(int *) inst1_addr;
 245     if (is_lis(inst1) && inv_rs_field(inst1) == dst) { inst1_found = true; break;}
 246     inst1_addr -= BytesPerInstWord;
 247   }
 248   assert(inst1_found, "inst is not lis");
 249 
 250   uint xl = ((unsigned int) (get_imm(inst2_addr, 0) & 0xffff));
 251   uint xh = (((get_imm(inst1_addr, 0)) & 0xffff) << 16);
 252 
 253   return (int) (xl | xh);
 254 }
 255 #endif // _LP64
 256 
 257 void MacroAssembler::load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc) {
 258   int toc_offset = 0;
 259   // Use RelocationHolder::none for the constant pool entry, otherwise
 260   // we will end up with a failing NativeCall::verify(x) where x is
 261   // the address of the constant pool entry.
 262   // FIXME: We should insert relocation information for oops at the constant
 263   // pool entries instead of inserting it at the loads; patching of a constant
 264   // pool entry should be less expensive.
 265   address oop_address = address_constant((address)a.value(), RelocationHolder::none);
 266   // Relocate at the pc of the load.
 267   relocate(a.rspec());
 268   toc_offset = (int)(oop_address - code()->consts()->start());
 269   ld_largeoffset_unchecked(dst, toc_offset, toc, true);
 270 }
 271 
 272 bool MacroAssembler::is_load_const_from_method_toc_at(address a) {
 273   const address inst1_addr = a;
 274   const int inst1 = *(int *)inst1_addr;
 275 
 276    // The relocation points to the ld or the addis.
 277    return (is_ld(inst1)) ||
 278           (is_addis(inst1) && inv_ra_field(inst1) != 0);
 279 }
 280 
 281 int MacroAssembler::get_offset_of_load_const_from_method_toc_at(address a) {
 282   assert(is_load_const_from_method_toc_at(a), "must be load_const_from_method_toc");
 283 
 284   const address inst1_addr = a;
 285   const int inst1 = *(int *)inst1_addr;
 286 
 287   if (is_ld(inst1)) {
 288     return inv_d1_field(inst1);
 289   } else if (is_addis(inst1)) {
 290     const int dst = inv_rt_field(inst1);
 291 
 292     // Now, find the succeeding ld which reads and writes to dst.
 293     address inst2_addr = inst1_addr + BytesPerInstWord;
 294     int inst2 = 0;
 295     while (true) {
 296       inst2 = *(int *) inst2_addr;
 297       if (is_ld(inst2) && inv_ra_field(inst2) == dst && inv_rt_field(inst2) == dst) {
 298         // Stop, found the ld which reads and writes dst.
 299         break;
 300       }
 301       inst2_addr += BytesPerInstWord;
 302     }
 303     return (inv_d1_field(inst1) << 16) + inv_d1_field(inst2);
 304   }
 305   ShouldNotReachHere();
 306   return 0;
 307 }
 308 
 309 // Get the constant from a `load_const' sequence.
 310 long MacroAssembler::get_const(address a) {
 311   assert(is_load_const_at(a), "not a load of a constant");
 312   const int *p = (const int*) a;
 313   unsigned long x = (((unsigned long) (get_imm(a,0) & 0xffff)) << 48);
 314   if (is_ori(*(p+1))) {
 315     x |= (((unsigned long) (get_imm(a,1) & 0xffff)) << 32);
 316     x |= (((unsigned long) (get_imm(a,3) & 0xffff)) << 16);
 317     x |= (((unsigned long) (get_imm(a,4) & 0xffff)));
 318   } else if (is_lis(*(p+1))) {
 319     x |= (((unsigned long) (get_imm(a,2) & 0xffff)) << 32);
 320     x |= (((unsigned long) (get_imm(a,1) & 0xffff)) << 16);
 321     x |= (((unsigned long) (get_imm(a,3) & 0xffff)));
 322   } else {
 323     ShouldNotReachHere();
 324     return (long) 0;
 325   }
 326   return (long) x;
 327 }
 328 
 329 // Patch the 64 bit constant of a `load_const' sequence. This is a low
 330 // level procedure. It neither flushes the instruction cache nor is it
 331 // mt safe.
 332 void MacroAssembler::patch_const(address a, long x) {
 333   assert(is_load_const_at(a), "not a load of a constant");
 334   int *p = (int*) a;
 335   if (is_ori(*(p+1))) {
 336     set_imm(0 + p, (x >> 48) & 0xffff);
 337     set_imm(1 + p, (x >> 32) & 0xffff);
 338     set_imm(3 + p, (x >> 16) & 0xffff);
 339     set_imm(4 + p, x & 0xffff);
 340   } else if (is_lis(*(p+1))) {
 341     set_imm(0 + p, (x >> 48) & 0xffff);
 342     set_imm(2 + p, (x >> 32) & 0xffff);
 343     set_imm(1 + p, (x >> 16) & 0xffff);
 344     set_imm(3 + p, x & 0xffff);
 345   } else {
 346     ShouldNotReachHere();
 347   }
 348 }
 349 
 350 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
 351   assert(oop_recorder() != NULL, "this assembler needs a Recorder");
 352   int index = oop_recorder()->allocate_metadata_index(obj);
 353   RelocationHolder rspec = metadata_Relocation::spec(index);
 354   return AddressLiteral((address)obj, rspec);
 355 }
 356 
 357 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
 358   assert(oop_recorder() != NULL, "this assembler needs a Recorder");
 359   int index = oop_recorder()->find_index(obj);
 360   RelocationHolder rspec = metadata_Relocation::spec(index);
 361   return AddressLiteral((address)obj, rspec);
 362 }
 363 
 364 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
 365   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
 366   int oop_index = oop_recorder()->allocate_oop_index(obj);
 367   return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
 368 }
 369 
 370 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
 371   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
 372   int oop_index = oop_recorder()->find_index(obj);
 373   return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
 374 }
 375 
 376 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
 377                                                       Register tmp, int offset) {
 378   intptr_t value = *delayed_value_addr;
 379   if (value != 0) {
 380     return RegisterOrConstant(value + offset);
 381   }
 382 
 383   // Load indirectly to solve generation ordering problem.
 384   // static address, no relocation
 385   int simm16_offset = load_const_optimized(tmp, delayed_value_addr, noreg, true);
 386   ld(tmp, simm16_offset, tmp); // must be aligned ((xa & 3) == 0)
 387 
 388   if (offset != 0) {
 389     addi(tmp, tmp, offset);
 390   }
 391 
 392   return RegisterOrConstant(tmp);
 393 }
 394 
 395 #ifndef PRODUCT
 396 void MacroAssembler::pd_print_patched_instruction(address branch) {
 397   Unimplemented(); // TODO: PPC port
 398 }
 399 #endif // ndef PRODUCT
 400 
 401 // Conditional far branch for destinations encodable in 24+2 bits.
 402 void MacroAssembler::bc_far(int boint, int biint, Label& dest, int optimize) {
 403 
 404   // If requested by flag optimize, relocate the bc_far as a
 405   // runtime_call and prepare for optimizing it when the code gets
 406   // relocated.
 407   if (optimize == bc_far_optimize_on_relocate) {
 408     relocate(relocInfo::runtime_call_type);
 409   }
 410 
 411   // variant 2:
 412   //
 413   //    b!cxx SKIP
 414   //    bxx   DEST
 415   //  SKIP:
 416   //
 417 
 418   const int opposite_boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(boint)),
 419                                                 opposite_bcond(inv_boint_bcond(boint)));
 420 
 421   // We emit two branches.
 422   // First, a conditional branch which jumps around the far branch.
 423   const address not_taken_pc = pc() + 2 * BytesPerInstWord;
 424   const address bc_pc        = pc();
 425   bc(opposite_boint, biint, not_taken_pc);
 426 
 427   const int bc_instr = *(int*)bc_pc;
 428   assert(not_taken_pc == (address)inv_bd_field(bc_instr, (intptr_t)bc_pc), "postcondition");
 429   assert(opposite_boint == inv_bo_field(bc_instr), "postcondition");
 430   assert(boint == add_bhint_to_boint(opposite_bhint(inv_boint_bhint(inv_bo_field(bc_instr))),
 431                                      opposite_bcond(inv_boint_bcond(inv_bo_field(bc_instr)))),
 432          "postcondition");
 433   assert(biint == inv_bi_field(bc_instr), "postcondition");
 434 
 435   // Second, an unconditional far branch which jumps to dest.
 436   // Note: target(dest) remembers the current pc (see CodeSection::target)
 437   //       and returns the current pc if the label is not bound yet; when
 438   //       the label gets bound, the unconditional far branch will be patched.
 439   const address target_pc = target(dest);
 440   const address b_pc  = pc();
 441   b(target_pc);
 442 
 443   assert(not_taken_pc == pc(),                     "postcondition");
 444   assert(dest.is_bound() || target_pc == b_pc, "postcondition");
 445 }
 446 
 447 bool MacroAssembler::is_bc_far_at(address instruction_addr) {
 448   return is_bc_far_variant1_at(instruction_addr) ||
 449          is_bc_far_variant2_at(instruction_addr) ||
 450          is_bc_far_variant3_at(instruction_addr);
 451 }
 452 
 453 address MacroAssembler::get_dest_of_bc_far_at(address instruction_addr) {
 454   if (is_bc_far_variant1_at(instruction_addr)) {
 455     const address instruction_1_addr = instruction_addr;
 456     const int instruction_1 = *(int*)instruction_1_addr;
 457     return (address)inv_bd_field(instruction_1, (intptr_t)instruction_1_addr);
 458   } else if (is_bc_far_variant2_at(instruction_addr)) {
 459     const address instruction_2_addr = instruction_addr + 4;
 460     return bxx_destination(instruction_2_addr);
 461   } else if (is_bc_far_variant3_at(instruction_addr)) {
 462     return instruction_addr + 8;
 463   }
 464   // variant 4 ???
 465   ShouldNotReachHere();
 466   return NULL;
 467 }
 468 void MacroAssembler::set_dest_of_bc_far_at(address instruction_addr, address dest) {
 469 
 470   if (is_bc_far_variant3_at(instruction_addr)) {
 471     // variant 3, far cond branch to the next instruction, already patched to nops:
 472     //
 473     //    nop
 474     //    endgroup
 475     //  SKIP/DEST:
 476     //
 477     return;
 478   }
 479 
 480   // first, extract boint and biint from the current branch
 481   int boint = 0;
 482   int biint = 0;
 483 
 484   ResourceMark rm;
 485   const int code_size = 2 * BytesPerInstWord;
 486   CodeBuffer buf(instruction_addr, code_size);
 487   MacroAssembler masm(&buf);
 488   if (is_bc_far_variant2_at(instruction_addr) && dest == instruction_addr + 8) {
 489     // Far branch to next instruction: Optimize it by patching nops (produce variant 3).
 490     masm.nop();
 491     masm.endgroup();
 492   } else {
 493     if (is_bc_far_variant1_at(instruction_addr)) {
 494       // variant 1, the 1st instruction contains the destination address:
 495       //
 496       //    bcxx  DEST
 497       //    endgroup
 498       //
 499       const int instruction_1 = *(int*)(instruction_addr);
 500       boint = inv_bo_field(instruction_1);
 501       biint = inv_bi_field(instruction_1);
 502     } else if (is_bc_far_variant2_at(instruction_addr)) {
 503       // variant 2, the 2nd instruction contains the destination address:
 504       //
 505       //    b!cxx SKIP
 506       //    bxx   DEST
 507       //  SKIP:
 508       //
 509       const int instruction_1 = *(int*)(instruction_addr);
 510       boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(inv_bo_field(instruction_1))),
 511           opposite_bcond(inv_boint_bcond(inv_bo_field(instruction_1))));
 512       biint = inv_bi_field(instruction_1);
 513     } else {
 514       // variant 4???
 515       ShouldNotReachHere();
 516     }
 517 
 518     // second, set the new branch destination and optimize the code
 519     if (dest != instruction_addr + 4 && // the bc_far is still unbound!
 520         masm.is_within_range_of_bcxx(dest, instruction_addr)) {
 521       // variant 1:
 522       //
 523       //    bcxx  DEST
 524       //    endgroup
 525       //
 526       masm.bc(boint, biint, dest);
 527       masm.endgroup();
 528     } else {
 529       // variant 2:
 530       //
 531       //    b!cxx SKIP
 532       //    bxx   DEST
 533       //  SKIP:
 534       //
 535       const int opposite_boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(boint)),
 536                                                     opposite_bcond(inv_boint_bcond(boint)));
 537       const address not_taken_pc = masm.pc() + 2 * BytesPerInstWord;
 538       masm.bc(opposite_boint, biint, not_taken_pc);
 539       masm.b(dest);
 540     }
 541   }
 542   ICache::ppc64_flush_icache_bytes(instruction_addr, code_size);
 543 }
 544 
 545 // Emit a NOT mt-safe patchable 64 bit absolute call/jump.
 546 void MacroAssembler::bxx64_patchable(address dest, relocInfo::relocType rt, bool link) {
 547   // get current pc
 548   uint64_t start_pc = (uint64_t) pc();
 549 
 550   const address pc_of_bl = (address) (start_pc + (6*BytesPerInstWord)); // bl is last
 551   const address pc_of_b  = (address) (start_pc + (0*BytesPerInstWord)); // b is first
 552 
 553   // relocate here
 554   if (rt != relocInfo::none) {
 555     relocate(rt);
 556   }
 557 
 558   if ( ReoptimizeCallSequences &&
 559        (( link && is_within_range_of_b(dest, pc_of_bl)) ||
 560         (!link && is_within_range_of_b(dest, pc_of_b)))) {
 561     // variant 2:
 562     // Emit an optimized, pc-relative call/jump.
 563 
 564     if (link) {
 565       // some padding
 566       nop();
 567       nop();
 568       nop();
 569       nop();
 570       nop();
 571       nop();
 572 
 573       // do the call
 574       assert(pc() == pc_of_bl, "just checking");
 575       bl(dest, relocInfo::none);
 576     } else {
 577       // do the jump
 578       assert(pc() == pc_of_b, "just checking");
 579       b(dest, relocInfo::none);
 580 
 581       // some padding
 582       nop();
 583       nop();
 584       nop();
 585       nop();
 586       nop();
 587       nop();
 588     }
 589 
 590     // Assert that we can identify the emitted call/jump.
 591     assert(is_bxx64_patchable_variant2_at((address)start_pc, link),
 592            "can't identify emitted call");
 593   } else {
 594     // variant 1:
 595     mr(R0, R11);  // spill R11 -> R0.
 596 
 597     // Load the destination address into CTR,
 598     // calculate destination relative to global toc.
 599     calculate_address_from_global_toc(R11, dest, true, true, false);
 600 
 601     mtctr(R11);
 602     mr(R11, R0);  // spill R11 <- R0.
 603     nop();
 604 
 605     // do the call/jump
 606     if (link) {
 607       bctrl();
 608     } else{
 609       bctr();
 610     }
 611     // Assert that we can identify the emitted call/jump.
 612     assert(is_bxx64_patchable_variant1b_at((address)start_pc, link),
 613            "can't identify emitted call");
 614   }
 615 
 616   // Assert that we can identify the emitted call/jump.
 617   assert(is_bxx64_patchable_at((address)start_pc, link),
 618          "can't identify emitted call");
 619   assert(get_dest_of_bxx64_patchable_at((address)start_pc, link) == dest,
 620          "wrong encoding of dest address");
 621 }
 622 
 623 // Identify a bxx64_patchable instruction.
 624 bool MacroAssembler::is_bxx64_patchable_at(address instruction_addr, bool link) {
 625   return is_bxx64_patchable_variant1b_at(instruction_addr, link)
 626     //|| is_bxx64_patchable_variant1_at(instruction_addr, link)
 627       || is_bxx64_patchable_variant2_at(instruction_addr, link);
 628 }
 629 
 630 // Does the call64_patchable instruction use a pc-relative encoding of
 631 // the call destination?
 632 bool MacroAssembler::is_bxx64_patchable_pcrelative_at(address instruction_addr, bool link) {
 633   // variant 2 is pc-relative
 634   return is_bxx64_patchable_variant2_at(instruction_addr, link);
 635 }
 636 
 637 // Identify variant 1.
 638 bool MacroAssembler::is_bxx64_patchable_variant1_at(address instruction_addr, bool link) {
 639   unsigned int* instr = (unsigned int*) instruction_addr;
 640   return (link ? is_bctrl(instr[6]) : is_bctr(instr[6])) // bctr[l]
 641       && is_mtctr(instr[5]) // mtctr
 642     && is_load_const_at(instruction_addr);
 643 }
 644 
 645 // Identify variant 1b: load destination relative to global toc.
 646 bool MacroAssembler::is_bxx64_patchable_variant1b_at(address instruction_addr, bool link) {
 647   unsigned int* instr = (unsigned int*) instruction_addr;
 648   return (link ? is_bctrl(instr[6]) : is_bctr(instr[6])) // bctr[l]
 649     && is_mtctr(instr[3]) // mtctr
 650     && is_calculate_address_from_global_toc_at(instruction_addr + 2*BytesPerInstWord, instruction_addr);
 651 }
 652 
 653 // Identify variant 2.
 654 bool MacroAssembler::is_bxx64_patchable_variant2_at(address instruction_addr, bool link) {
 655   unsigned int* instr = (unsigned int*) instruction_addr;
 656   if (link) {
 657     return is_bl (instr[6])  // bl dest is last
 658       && is_nop(instr[0])  // nop
 659       && is_nop(instr[1])  // nop
 660       && is_nop(instr[2])  // nop
 661       && is_nop(instr[3])  // nop
 662       && is_nop(instr[4])  // nop
 663       && is_nop(instr[5]); // nop
 664   } else {
 665     return is_b  (instr[0])  // b  dest is first
 666       && is_nop(instr[1])  // nop
 667       && is_nop(instr[2])  // nop
 668       && is_nop(instr[3])  // nop
 669       && is_nop(instr[4])  // nop
 670       && is_nop(instr[5])  // nop
 671       && is_nop(instr[6]); // nop
 672   }
 673 }
 674 
 675 // Set dest address of a bxx64_patchable instruction.
 676 void MacroAssembler::set_dest_of_bxx64_patchable_at(address instruction_addr, address dest, bool link) {
 677   ResourceMark rm;
 678   int code_size = MacroAssembler::bxx64_patchable_size;
 679   CodeBuffer buf(instruction_addr, code_size);
 680   MacroAssembler masm(&buf);
 681   masm.bxx64_patchable(dest, relocInfo::none, link);
 682   ICache::ppc64_flush_icache_bytes(instruction_addr, code_size);
 683 }
 684 
 685 // Get dest address of a bxx64_patchable instruction.
 686 address MacroAssembler::get_dest_of_bxx64_patchable_at(address instruction_addr, bool link) {
 687   if (is_bxx64_patchable_variant1_at(instruction_addr, link)) {
 688     return (address) (unsigned long) get_const(instruction_addr);
 689   } else if (is_bxx64_patchable_variant2_at(instruction_addr, link)) {
 690     unsigned int* instr = (unsigned int*) instruction_addr;
 691     if (link) {
 692       const int instr_idx = 6; // bl is last
 693       int branchoffset = branch_destination(instr[instr_idx], 0);
 694       return instruction_addr + branchoffset + instr_idx*BytesPerInstWord;
 695     } else {
 696       const int instr_idx = 0; // b is first
 697       int branchoffset = branch_destination(instr[instr_idx], 0);
 698       return instruction_addr + branchoffset + instr_idx*BytesPerInstWord;
 699     }
 700   // Load dest relative to global toc.
 701   } else if (is_bxx64_patchable_variant1b_at(instruction_addr, link)) {
 702     return get_address_of_calculate_address_from_global_toc_at(instruction_addr + 2*BytesPerInstWord,
 703                                                                instruction_addr);
 704   } else {
 705     ShouldNotReachHere();
 706     return NULL;
 707   }
 708 }
 709 
 710 // Uses ordering which corresponds to ABI:
 711 //    _savegpr0_14:  std  r14,-144(r1)
 712 //    _savegpr0_15:  std  r15,-136(r1)
 713 //    _savegpr0_16:  std  r16,-128(r1)
 714 void MacroAssembler::save_nonvolatile_gprs(Register dst, int offset) {
 715   std(R14, offset, dst);   offset += 8;
 716   std(R15, offset, dst);   offset += 8;
 717   std(R16, offset, dst);   offset += 8;
 718   std(R17, offset, dst);   offset += 8;
 719   std(R18, offset, dst);   offset += 8;
 720   std(R19, offset, dst);   offset += 8;
 721   std(R20, offset, dst);   offset += 8;
 722   std(R21, offset, dst);   offset += 8;
 723   std(R22, offset, dst);   offset += 8;
 724   std(R23, offset, dst);   offset += 8;
 725   std(R24, offset, dst);   offset += 8;
 726   std(R25, offset, dst);   offset += 8;
 727   std(R26, offset, dst);   offset += 8;
 728   std(R27, offset, dst);   offset += 8;
 729   std(R28, offset, dst);   offset += 8;
 730   std(R29, offset, dst);   offset += 8;
 731   std(R30, offset, dst);   offset += 8;
 732   std(R31, offset, dst);   offset += 8;
 733 
 734   stfd(F14, offset, dst);   offset += 8;
 735   stfd(F15, offset, dst);   offset += 8;
 736   stfd(F16, offset, dst);   offset += 8;
 737   stfd(F17, offset, dst);   offset += 8;
 738   stfd(F18, offset, dst);   offset += 8;
 739   stfd(F19, offset, dst);   offset += 8;
 740   stfd(F20, offset, dst);   offset += 8;
 741   stfd(F21, offset, dst);   offset += 8;
 742   stfd(F22, offset, dst);   offset += 8;
 743   stfd(F23, offset, dst);   offset += 8;
 744   stfd(F24, offset, dst);   offset += 8;
 745   stfd(F25, offset, dst);   offset += 8;
 746   stfd(F26, offset, dst);   offset += 8;
 747   stfd(F27, offset, dst);   offset += 8;
 748   stfd(F28, offset, dst);   offset += 8;
 749   stfd(F29, offset, dst);   offset += 8;
 750   stfd(F30, offset, dst);   offset += 8;
 751   stfd(F31, offset, dst);
 752 }
 753 
 754 // Uses ordering which corresponds to ABI:
 755 //    _restgpr0_14:  ld   r14,-144(r1)
 756 //    _restgpr0_15:  ld   r15,-136(r1)
 757 //    _restgpr0_16:  ld   r16,-128(r1)
 758 void MacroAssembler::restore_nonvolatile_gprs(Register src, int offset) {
 759   ld(R14, offset, src);   offset += 8;
 760   ld(R15, offset, src);   offset += 8;
 761   ld(R16, offset, src);   offset += 8;
 762   ld(R17, offset, src);   offset += 8;
 763   ld(R18, offset, src);   offset += 8;
 764   ld(R19, offset, src);   offset += 8;
 765   ld(R20, offset, src);   offset += 8;
 766   ld(R21, offset, src);   offset += 8;
 767   ld(R22, offset, src);   offset += 8;
 768   ld(R23, offset, src);   offset += 8;
 769   ld(R24, offset, src);   offset += 8;
 770   ld(R25, offset, src);   offset += 8;
 771   ld(R26, offset, src);   offset += 8;
 772   ld(R27, offset, src);   offset += 8;
 773   ld(R28, offset, src);   offset += 8;
 774   ld(R29, offset, src);   offset += 8;
 775   ld(R30, offset, src);   offset += 8;
 776   ld(R31, offset, src);   offset += 8;
 777 
 778   // FP registers
 779   lfd(F14, offset, src);   offset += 8;
 780   lfd(F15, offset, src);   offset += 8;
 781   lfd(F16, offset, src);   offset += 8;
 782   lfd(F17, offset, src);   offset += 8;
 783   lfd(F18, offset, src);   offset += 8;
 784   lfd(F19, offset, src);   offset += 8;
 785   lfd(F20, offset, src);   offset += 8;
 786   lfd(F21, offset, src);   offset += 8;
 787   lfd(F22, offset, src);   offset += 8;
 788   lfd(F23, offset, src);   offset += 8;
 789   lfd(F24, offset, src);   offset += 8;
 790   lfd(F25, offset, src);   offset += 8;
 791   lfd(F26, offset, src);   offset += 8;
 792   lfd(F27, offset, src);   offset += 8;
 793   lfd(F28, offset, src);   offset += 8;
 794   lfd(F29, offset, src);   offset += 8;
 795   lfd(F30, offset, src);   offset += 8;
 796   lfd(F31, offset, src);
 797 }
 798 
 799 // For verify_oops.
 800 void MacroAssembler::save_volatile_gprs(Register dst, int offset) {
 801   std(R2,  offset, dst);   offset += 8;
 802   std(R3,  offset, dst);   offset += 8;
 803   std(R4,  offset, dst);   offset += 8;
 804   std(R5,  offset, dst);   offset += 8;
 805   std(R6,  offset, dst);   offset += 8;
 806   std(R7,  offset, dst);   offset += 8;
 807   std(R8,  offset, dst);   offset += 8;
 808   std(R9,  offset, dst);   offset += 8;
 809   std(R10, offset, dst);   offset += 8;
 810   std(R11, offset, dst);   offset += 8;
 811   std(R12, offset, dst);
 812 }
 813 
 814 // For verify_oops.
 815 void MacroAssembler::restore_volatile_gprs(Register src, int offset) {
 816   ld(R2,  offset, src);   offset += 8;
 817   ld(R3,  offset, src);   offset += 8;
 818   ld(R4,  offset, src);   offset += 8;
 819   ld(R5,  offset, src);   offset += 8;
 820   ld(R6,  offset, src);   offset += 8;
 821   ld(R7,  offset, src);   offset += 8;
 822   ld(R8,  offset, src);   offset += 8;
 823   ld(R9,  offset, src);   offset += 8;
 824   ld(R10, offset, src);   offset += 8;
 825   ld(R11, offset, src);   offset += 8;
 826   ld(R12, offset, src);
 827 }
 828 
 829 void MacroAssembler::save_LR_CR(Register tmp) {
 830   mfcr(tmp);
 831   std(tmp, _abi(cr), R1_SP);
 832   mflr(tmp);
 833   std(tmp, _abi(lr), R1_SP);
 834   // Tmp must contain lr on exit! (see return_addr and prolog in ppc64.ad)
 835 }
 836 
 837 void MacroAssembler::restore_LR_CR(Register tmp) {
 838   assert(tmp != R1_SP, "must be distinct");
 839   ld(tmp, _abi(lr), R1_SP);
 840   mtlr(tmp);
 841   ld(tmp, _abi(cr), R1_SP);
 842   mtcr(tmp);
 843 }
 844 
 845 address MacroAssembler::get_PC_trash_LR(Register result) {
 846   Label L;
 847   bl(L);
 848   bind(L);
 849   address lr_pc = pc();
 850   mflr(result);
 851   return lr_pc;
 852 }
 853 
 854 void MacroAssembler::resize_frame(Register offset, Register tmp) {
 855 #ifdef ASSERT
 856   assert_different_registers(offset, tmp, R1_SP);
 857   andi_(tmp, offset, frame::alignment_in_bytes-1);
 858   asm_assert_eq("resize_frame: unaligned", 0x204);
 859 #endif
 860 
 861   // tmp <- *(SP)
 862   ld(tmp, _abi(callers_sp), R1_SP);
 863   // addr <- SP + offset;
 864   // *(addr) <- tmp;
 865   // SP <- addr
 866   stdux(tmp, R1_SP, offset);
 867 }
 868 
 869 void MacroAssembler::resize_frame(int offset, Register tmp) {
 870   assert(is_simm(offset, 16), "too big an offset");
 871   assert_different_registers(tmp, R1_SP);
 872   assert((offset & (frame::alignment_in_bytes-1))==0, "resize_frame: unaligned");
 873   // tmp <- *(SP)
 874   ld(tmp, _abi(callers_sp), R1_SP);
 875   // addr <- SP + offset;
 876   // *(addr) <- tmp;
 877   // SP <- addr
 878   stdu(tmp, offset, R1_SP);
 879 }
 880 
 881 void MacroAssembler::resize_frame_absolute(Register addr, Register tmp1, Register tmp2) {
 882   // (addr == tmp1) || (addr == tmp2) is allowed here!
 883   assert(tmp1 != tmp2, "must be distinct");
 884 
 885   // compute offset w.r.t. current stack pointer
 886   // tmp_1 <- addr - SP (!)
 887   subf(tmp1, R1_SP, addr);
 888 
 889   // atomically update SP keeping back link.
 890   resize_frame(tmp1/* offset */, tmp2/* tmp */);
 891 }
 892 
 893 void MacroAssembler::push_frame(Register bytes, Register tmp) {
 894 #ifdef ASSERT
 895   assert(bytes != R0, "r0 not allowed here");
 896   andi_(R0, bytes, frame::alignment_in_bytes-1);
 897   asm_assert_eq("push_frame(Reg, Reg): unaligned", 0x203);
 898 #endif
 899   neg(tmp, bytes);
 900   stdux(R1_SP, R1_SP, tmp);
 901 }
 902 
 903 // Push a frame of size `bytes'.
 904 void MacroAssembler::push_frame(unsigned int bytes, Register tmp) {
 905   long offset = align_addr(bytes, frame::alignment_in_bytes);
 906   if (is_simm(-offset, 16)) {
 907     stdu(R1_SP, -offset, R1_SP);
 908   } else {
 909     load_const(tmp, -offset);
 910     stdux(R1_SP, R1_SP, tmp);
 911   }
 912 }
 913 
 914 // Push a frame of size `bytes' plus abi_reg_args on top.
 915 void MacroAssembler::push_frame_reg_args(unsigned int bytes, Register tmp) {
 916   push_frame(bytes + frame::abi_reg_args_size, tmp);
 917 }
 918 
 919 // Setup up a new C frame with a spill area for non-volatile GPRs and
 920 // additional space for local variables.
 921 void MacroAssembler::push_frame_reg_args_nonvolatiles(unsigned int bytes,
 922                                                       Register tmp) {
 923   push_frame(bytes + frame::abi_reg_args_size + frame::spill_nonvolatiles_size, tmp);
 924 }
 925 
 926 // Pop current C frame.
 927 void MacroAssembler::pop_frame() {
 928   ld(R1_SP, _abi(callers_sp), R1_SP);
 929 }
 930 
 931 #if defined(ABI_ELFv2)
 932 address MacroAssembler::branch_to(Register r_function_entry, bool and_link) {
 933   // TODO(asmundak): make sure the caller uses R12 as function descriptor
 934   // most of the times.
 935   if (R12 != r_function_entry) {
 936     mr(R12, r_function_entry);
 937   }
 938   mtctr(R12);
 939   // Do a call or a branch.
 940   if (and_link) {
 941     bctrl();
 942   } else {
 943     bctr();
 944   }
 945   _last_calls_return_pc = pc();
 946 
 947   return _last_calls_return_pc;
 948 }
 949 
 950 // Call a C function via a function descriptor and use full C
 951 // calling conventions. Updates and returns _last_calls_return_pc.
 952 address MacroAssembler::call_c(Register r_function_entry) {
 953   return branch_to(r_function_entry, /*and_link=*/true);
 954 }
 955 
 956 // For tail calls: only branch, don't link, so callee returns to caller of this function.
 957 address MacroAssembler::call_c_and_return_to_caller(Register r_function_entry) {
 958   return branch_to(r_function_entry, /*and_link=*/false);
 959 }
 960 
 961 address MacroAssembler::call_c(address function_entry, relocInfo::relocType rt) {
 962   load_const(R12, function_entry, R0);
 963   return branch_to(R12,  /*and_link=*/true);
 964 }
 965 
 966 #else
 967 // Generic version of a call to C function via a function descriptor
 968 // with variable support for C calling conventions (TOC, ENV, etc.).
 969 // Updates and returns _last_calls_return_pc.
 970 address MacroAssembler::branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call,
 971                                   bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee) {
 972   // we emit standard ptrgl glue code here
 973   assert((function_descriptor != R0), "function_descriptor cannot be R0");
 974 
 975   // retrieve necessary entries from the function descriptor
 976   ld(R0, in_bytes(FunctionDescriptor::entry_offset()), function_descriptor);
 977   mtctr(R0);
 978 
 979   if (load_toc_of_callee) {
 980     ld(R2_TOC, in_bytes(FunctionDescriptor::toc_offset()), function_descriptor);
 981   }
 982   if (load_env_of_callee) {
 983     ld(R11, in_bytes(FunctionDescriptor::env_offset()), function_descriptor);
 984   } else if (load_toc_of_callee) {
 985     li(R11, 0);
 986   }
 987 
 988   // do a call or a branch
 989   if (and_link) {
 990     bctrl();
 991   } else {
 992     bctr();
 993   }
 994   _last_calls_return_pc = pc();
 995 
 996   return _last_calls_return_pc;
 997 }
 998 
 999 // Call a C function via a function descriptor and use full C calling
1000 // conventions.
1001 // We don't use the TOC in generated code, so there is no need to save
1002 // and restore its value.
1003 address MacroAssembler::call_c(Register fd) {
1004   return branch_to(fd, /*and_link=*/true,
1005                        /*save toc=*/false,
1006                        /*restore toc=*/false,
1007                        /*load toc=*/true,
1008                        /*load env=*/true);
1009 }
1010 
1011 address MacroAssembler::call_c_and_return_to_caller(Register fd) {
1012   return branch_to(fd, /*and_link=*/false,
1013                        /*save toc=*/false,
1014                        /*restore toc=*/false,
1015                        /*load toc=*/true,
1016                        /*load env=*/true);
1017 }
1018 
1019 address MacroAssembler::call_c(const FunctionDescriptor* fd, relocInfo::relocType rt) {
1020   if (rt != relocInfo::none) {
1021     // this call needs to be relocatable
1022     if (!ReoptimizeCallSequences
1023         || (rt != relocInfo::runtime_call_type && rt != relocInfo::none)
1024         || fd == NULL   // support code-size estimation
1025         || !fd->is_friend_function()
1026         || fd->entry() == NULL) {
1027       // it's not a friend function as defined by class FunctionDescriptor,
1028       // so do a full call-c here.
1029       load_const(R11, (address)fd, R0);
1030 
1031       bool has_env = (fd != NULL && fd->env() != NULL);
1032       return branch_to(R11, /*and_link=*/true,
1033                             /*save toc=*/false,
1034                             /*restore toc=*/false,
1035                             /*load toc=*/true,
1036                             /*load env=*/has_env);
1037     } else {
1038       // It's a friend function. Load the entry point and don't care about
1039       // toc and env. Use an optimizable call instruction, but ensure the
1040       // same code-size as in the case of a non-friend function.
1041       nop();
1042       nop();
1043       nop();
1044       bl64_patchable(fd->entry(), rt);
1045       _last_calls_return_pc = pc();
1046       return _last_calls_return_pc;
1047     }
1048   } else {
1049     // This call does not need to be relocatable, do more aggressive
1050     // optimizations.
1051     if (!ReoptimizeCallSequences
1052       || !fd->is_friend_function()) {
1053       // It's not a friend function as defined by class FunctionDescriptor,
1054       // so do a full call-c here.
1055       load_const(R11, (address)fd, R0);
1056       return branch_to(R11, /*and_link=*/true,
1057                             /*save toc=*/false,
1058                             /*restore toc=*/false,
1059                             /*load toc=*/true,
1060                             /*load env=*/true);
1061     } else {
1062       // it's a friend function, load the entry point and don't care about
1063       // toc and env.
1064       address dest = fd->entry();
1065       if (is_within_range_of_b(dest, pc())) {
1066         bl(dest);
1067       } else {
1068         bl64_patchable(dest, rt);
1069       }
1070       _last_calls_return_pc = pc();
1071       return _last_calls_return_pc;
1072     }
1073   }
1074 }
1075 
1076 // Call a C function.  All constants needed reside in TOC.
1077 //
1078 // Read the address to call from the TOC.
1079 // Read env from TOC, if fd specifies an env.
1080 // Read new TOC from TOC.
1081 address MacroAssembler::call_c_using_toc(const FunctionDescriptor* fd,
1082                                          relocInfo::relocType rt, Register toc) {
1083   if (!ReoptimizeCallSequences
1084     || (rt != relocInfo::runtime_call_type && rt != relocInfo::none)
1085     || !fd->is_friend_function()) {
1086     // It's not a friend function as defined by class FunctionDescriptor,
1087     // so do a full call-c here.
1088     assert(fd->entry() != NULL, "function must be linked");
1089 
1090     AddressLiteral fd_entry(fd->entry());
1091     load_const_from_method_toc(R11, fd_entry, toc);
1092     mtctr(R11);
1093     if (fd->env() == NULL) {
1094       li(R11, 0);
1095       nop();
1096     } else {
1097       AddressLiteral fd_env(fd->env());
1098       load_const_from_method_toc(R11, fd_env, toc);
1099     }
1100     AddressLiteral fd_toc(fd->toc());
1101     load_toc_from_toc(R2_TOC, fd_toc, toc);
1102     // R2_TOC is killed.
1103     bctrl();
1104     _last_calls_return_pc = pc();
1105   } else {
1106     // It's a friend function, load the entry point and don't care about
1107     // toc and env. Use an optimizable call instruction, but ensure the
1108     // same code-size as in the case of a non-friend function.
1109     nop();
1110     bl64_patchable(fd->entry(), rt);
1111     _last_calls_return_pc = pc();
1112   }
1113   return _last_calls_return_pc;
1114 }
1115 #endif // ABI_ELFv2
1116 
1117 void MacroAssembler::call_VM_base(Register oop_result,
1118                                   Register last_java_sp,
1119                                   address  entry_point,
1120                                   bool     check_exceptions) {
1121   BLOCK_COMMENT("call_VM {");
1122   // Determine last_java_sp register.
1123   if (!last_java_sp->is_valid()) {
1124     last_java_sp = R1_SP;
1125   }
1126   set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, R11_scratch1);
1127 
1128   // ARG1 must hold thread address.
1129   mr(R3_ARG1, R16_thread);
1130 #if defined(ABI_ELFv2)
1131   address return_pc = call_c(entry_point, relocInfo::none);
1132 #else
1133   address return_pc = call_c((FunctionDescriptor*)entry_point, relocInfo::none);
1134 #endif
1135 
1136   reset_last_Java_frame();
1137 
1138   // Check for pending exceptions.
1139   if (check_exceptions) {
1140     // We don't check for exceptions here.
1141     ShouldNotReachHere();
1142   }
1143 
1144   // Get oop result if there is one and reset the value in the thread.
1145   if (oop_result->is_valid()) {
1146     get_vm_result(oop_result);
1147   }
1148 
1149   _last_calls_return_pc = return_pc;
1150   BLOCK_COMMENT("} call_VM");
1151 }
1152 
1153 void MacroAssembler::call_VM_leaf_base(address entry_point) {
1154   BLOCK_COMMENT("call_VM_leaf {");
1155 #if defined(ABI_ELFv2)
1156   call_c(entry_point, relocInfo::none);
1157 #else
1158   call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::none);
1159 #endif
1160   BLOCK_COMMENT("} call_VM_leaf");
1161 }
1162 
1163 void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
1164   call_VM_base(oop_result, noreg, entry_point, check_exceptions);
1165 }
1166 
1167 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1,
1168                              bool check_exceptions) {
1169   // R3_ARG1 is reserved for the thread.
1170   mr_if_needed(R4_ARG2, arg_1);
1171   call_VM(oop_result, entry_point, check_exceptions);
1172 }
1173 
1174 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2,
1175                              bool check_exceptions) {
1176   // R3_ARG1 is reserved for the thread
1177   mr_if_needed(R4_ARG2, arg_1);
1178   assert(arg_2 != R4_ARG2, "smashed argument");
1179   mr_if_needed(R5_ARG3, arg_2);
1180   call_VM(oop_result, entry_point, check_exceptions);
1181 }
1182 
1183 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3,
1184                              bool check_exceptions) {
1185   // R3_ARG1 is reserved for the thread
1186   mr_if_needed(R4_ARG2, arg_1);
1187   assert(arg_2 != R4_ARG2, "smashed argument");
1188   mr_if_needed(R5_ARG3, arg_2);
1189   mr_if_needed(R6_ARG4, arg_3);
1190   call_VM(oop_result, entry_point, check_exceptions);
1191 }
1192 
1193 void MacroAssembler::call_VM_leaf(address entry_point) {
1194   call_VM_leaf_base(entry_point);
1195 }
1196 
1197 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {
1198   mr_if_needed(R3_ARG1, arg_1);
1199   call_VM_leaf(entry_point);
1200 }
1201 
1202 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
1203   mr_if_needed(R3_ARG1, arg_1);
1204   assert(arg_2 != R3_ARG1, "smashed argument");
1205   mr_if_needed(R4_ARG2, arg_2);
1206   call_VM_leaf(entry_point);
1207 }
1208 
1209 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
1210   mr_if_needed(R3_ARG1, arg_1);
1211   assert(arg_2 != R3_ARG1, "smashed argument");
1212   mr_if_needed(R4_ARG2, arg_2);
1213   assert(arg_3 != R3_ARG1 && arg_3 != R4_ARG2, "smashed argument");
1214   mr_if_needed(R5_ARG3, arg_3);
1215   call_VM_leaf(entry_point);
1216 }
1217 
1218 // Check whether instruction is a read access to the polling page
1219 // which was emitted by load_from_polling_page(..).
1220 bool MacroAssembler::is_load_from_polling_page(int instruction, void* ucontext,
1221                                                address* polling_address_ptr) {
1222   if (!is_ld(instruction))
1223     return false; // It's not a ld. Fail.
1224 
1225   int rt = inv_rt_field(instruction);
1226   int ra = inv_ra_field(instruction);
1227   int ds = inv_ds_field(instruction);
1228   if (!(ds == 0 && ra != 0 && rt == 0)) {
1229     return false; // It's not a ld(r0, X, ra). Fail.
1230   }
1231 
1232   if (!ucontext) {
1233     // Set polling address.
1234     if (polling_address_ptr != NULL) {
1235       *polling_address_ptr = NULL;
1236     }
1237     return true; // No ucontext given. Can't check value of ra. Assume true.
1238   }
1239 
1240 #ifdef LINUX
1241   // Ucontext given. Check that register ra contains the address of
1242   // the safepoing polling page.
1243   ucontext_t* uc = (ucontext_t*) ucontext;
1244   // Set polling address.
1245   address addr = (address)uc->uc_mcontext.regs->gpr[ra] + (ssize_t)ds;
1246   if (polling_address_ptr != NULL) {
1247     *polling_address_ptr = addr;
1248   }
1249   return os::is_poll_address(addr);
1250 #else
1251   // Not on Linux, ucontext must be NULL.
1252   ShouldNotReachHere();
1253   return false;
1254 #endif
1255 }
1256 
1257 bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) {
1258 #ifdef LINUX
1259   ucontext_t* uc = (ucontext_t*) ucontext;
1260 
1261   if (is_stwx(instruction) || is_stwux(instruction)) {
1262     int ra = inv_ra_field(instruction);
1263     int rb = inv_rb_field(instruction);
1264 
1265     // look up content of ra and rb in ucontext
1266     address ra_val=(address)uc->uc_mcontext.regs->gpr[ra];
1267     long rb_val=(long)uc->uc_mcontext.regs->gpr[rb];
1268     return os::is_memory_serialize_page(thread, ra_val+rb_val);
1269   } else if (is_stw(instruction) || is_stwu(instruction)) {
1270     int ra = inv_ra_field(instruction);
1271     int d1 = inv_d1_field(instruction);
1272 
1273     // look up content of ra in ucontext
1274     address ra_val=(address)uc->uc_mcontext.regs->gpr[ra];
1275     return os::is_memory_serialize_page(thread, ra_val+d1);
1276   } else {
1277     return false;
1278   }
1279 #else
1280   // workaround not needed on !LINUX :-)
1281   ShouldNotCallThis();
1282   return false;
1283 #endif
1284 }
1285 
1286 void MacroAssembler::bang_stack_with_offset(int offset) {
1287   // When increasing the stack, the old stack pointer will be written
1288   // to the new top of stack according to the PPC64 abi.
1289   // Therefore, stack banging is not necessary when increasing
1290   // the stack by <= os::vm_page_size() bytes.
1291   // When increasing the stack by a larger amount, this method is
1292   // called repeatedly to bang the intermediate pages.
1293 
1294   // Stack grows down, caller passes positive offset.
1295   assert(offset > 0, "must bang with positive offset");
1296 
1297   long stdoffset = -offset;
1298 
1299   if (is_simm(stdoffset, 16)) {
1300     // Signed 16 bit offset, a simple std is ok.
1301     if (UseLoadInstructionsForStackBangingPPC64) {
1302       ld(R0, (int)(signed short)stdoffset, R1_SP);
1303     } else {
1304       std(R0,(int)(signed short)stdoffset, R1_SP);
1305     }
1306   } else if (is_simm(stdoffset, 31)) {
1307     const int hi = MacroAssembler::largeoffset_si16_si16_hi(stdoffset);
1308     const int lo = MacroAssembler::largeoffset_si16_si16_lo(stdoffset);
1309 
1310     Register tmp = R11;
1311     addis(tmp, R1_SP, hi);
1312     if (UseLoadInstructionsForStackBangingPPC64) {
1313       ld(R0,  lo, tmp);
1314     } else {
1315       std(R0, lo, tmp);
1316     }
1317   } else {
1318     ShouldNotReachHere();
1319   }
1320 }
1321 
1322 // If instruction is a stack bang of the form
1323 //    std    R0,    x(Ry),       (see bang_stack_with_offset())
1324 //    stdu   R1_SP, x(R1_SP),    (see push_frame(), resize_frame())
1325 // or stdux  R1_SP, Rx, R1_SP    (see push_frame(), resize_frame())
1326 // return the banged address. Otherwise, return 0.
1327 address MacroAssembler::get_stack_bang_address(int instruction, void *ucontext) {
1328 #ifdef LINUX
1329   ucontext_t* uc = (ucontext_t*) ucontext;
1330   int rs = inv_rs_field(instruction);
1331   int ra = inv_ra_field(instruction);
1332   if (   (is_ld(instruction)   && rs == 0 &&  UseLoadInstructionsForStackBangingPPC64)
1333       || (is_std(instruction)  && rs == 0 && !UseLoadInstructionsForStackBangingPPC64)
1334       || (is_stdu(instruction) && rs == 1)) {
1335     int ds = inv_ds_field(instruction);
1336     // return banged address
1337     return ds+(address)uc->uc_mcontext.regs->gpr[ra];
1338   } else if (is_stdux(instruction) && rs == 1) {
1339     int rb = inv_rb_field(instruction);
1340     address sp = (address)uc->uc_mcontext.regs->gpr[1];
1341     long rb_val = (long)uc->uc_mcontext.regs->gpr[rb];
1342     return ra != 1 || rb_val >= 0 ? NULL         // not a stack bang
1343                                   : sp + rb_val; // banged address
1344   }
1345   return NULL; // not a stack bang
1346 #else
1347   // workaround not needed on !LINUX :-)
1348   ShouldNotCallThis();
1349   return NULL;
1350 #endif
1351 }
1352 
1353 // CmpxchgX sets condition register to cmpX(current, compare).
1354 void MacroAssembler::cmpxchgw(ConditionRegister flag, Register dest_current_value,
1355                               Register compare_value, Register exchange_value,
1356                               Register addr_base, int semantics, bool cmpxchgx_hint,
1357                               Register int_flag_success, bool contention_hint) {
1358   Label retry;
1359   Label failed;
1360   Label done;
1361 
1362   // Save one branch if result is returned via register and
1363   // result register is different from the other ones.
1364   bool use_result_reg    = (int_flag_success != noreg);
1365   bool preset_result_reg = (int_flag_success != dest_current_value && int_flag_success != compare_value &&
1366                             int_flag_success != exchange_value && int_flag_success != addr_base);
1367 
1368   // release/fence semantics
1369   if (semantics & MemBarRel) {
1370     release();
1371   }
1372 
1373   if (use_result_reg && preset_result_reg) {
1374     li(int_flag_success, 0); // preset (assume cas failed)
1375   }
1376 
1377   // Add simple guard in order to reduce risk of starving under high contention (recommended by IBM).
1378   if (contention_hint) { // Don't try to reserve if cmp fails.
1379     lwz(dest_current_value, 0, addr_base);
1380     cmpw(flag, dest_current_value, compare_value);
1381     bne(flag, failed);
1382   }
1383 
1384   // atomic emulation loop
1385   bind(retry);
1386 
1387   lwarx(dest_current_value, addr_base, cmpxchgx_hint);
1388   cmpw(flag, dest_current_value, compare_value);
1389   if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
1390     bne_predict_not_taken(flag, failed);
1391   } else {
1392     bne(                  flag, failed);
1393   }
1394   // branch to done  => (flag == ne), (dest_current_value != compare_value)
1395   // fall through    => (flag == eq), (dest_current_value == compare_value)
1396 
1397   stwcx_(exchange_value, addr_base);
1398   if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
1399     bne_predict_not_taken(CCR0, retry); // StXcx_ sets CCR0.
1400   } else {
1401     bne(                  CCR0, retry); // StXcx_ sets CCR0.
1402   }
1403   // fall through    => (flag == eq), (dest_current_value == compare_value), (swapped)
1404 
1405   // Result in register (must do this at the end because int_flag_success can be the
1406   // same register as one above).
1407   if (use_result_reg) {
1408     li(int_flag_success, 1);
1409   }
1410 
1411   if (semantics & MemBarFenceAfter) {
1412     fence();
1413   } else if (semantics & MemBarAcq) {
1414     isync();
1415   }
1416 
1417   if (use_result_reg && !preset_result_reg) {
1418     b(done);
1419   }
1420 
1421   bind(failed);
1422   if (use_result_reg && !preset_result_reg) {
1423     li(int_flag_success, 0);
1424   }
1425 
1426   bind(done);
1427   // (flag == ne) => (dest_current_value != compare_value), (!swapped)
1428   // (flag == eq) => (dest_current_value == compare_value), ( swapped)
1429 }
1430 
1431 // Preforms atomic compare exchange:
1432 //   if (compare_value == *addr_base)
1433 //     *addr_base = exchange_value
1434 //     int_flag_success = 1;
1435 //   else
1436 //     int_flag_success = 0;
1437 //
1438 // ConditionRegister flag       = cmp(compare_value, *addr_base)
1439 // Register dest_current_value  = *addr_base
1440 // Register compare_value       Used to compare with value in memory
1441 // Register exchange_value      Written to memory if compare_value == *addr_base
1442 // Register addr_base           The memory location to compareXChange
1443 // Register int_flag_success    Set to 1 if exchange_value was written to *addr_base
1444 //
1445 // To avoid the costly compare exchange the value is tested beforehand.
1446 // Several special cases exist to avoid that unnecessary information is generated.
1447 //
1448 void MacroAssembler::cmpxchgd(ConditionRegister flag,
1449                               Register dest_current_value, Register compare_value, Register exchange_value,
1450                               Register addr_base, int semantics, bool cmpxchgx_hint,
1451                               Register int_flag_success, Label* failed_ext, bool contention_hint) {
1452   Label retry;
1453   Label failed_int;
1454   Label& failed = (failed_ext != NULL) ? *failed_ext : failed_int;
1455   Label done;
1456 
1457   // Save one branch if result is returned via register and result register is different from the other ones.
1458   bool use_result_reg    = (int_flag_success!=noreg);
1459   bool preset_result_reg = (int_flag_success!=dest_current_value && int_flag_success!=compare_value &&
1460                             int_flag_success!=exchange_value && int_flag_success!=addr_base);
1461   assert(int_flag_success == noreg || failed_ext == NULL, "cannot have both");
1462 
1463   // release/fence semantics
1464   if (semantics & MemBarRel) {
1465     release();
1466   }
1467 
1468   if (use_result_reg && preset_result_reg) {
1469     li(int_flag_success, 0); // preset (assume cas failed)
1470   }
1471 
1472   // Add simple guard in order to reduce risk of starving under high contention (recommended by IBM).
1473   if (contention_hint) { // Don't try to reserve if cmp fails.
1474     ld(dest_current_value, 0, addr_base);
1475     cmpd(flag, dest_current_value, compare_value);
1476     bne(flag, failed);
1477   }
1478 
1479   // atomic emulation loop
1480   bind(retry);
1481 
1482   ldarx(dest_current_value, addr_base, cmpxchgx_hint);
1483   cmpd(flag, dest_current_value, compare_value);
1484   if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
1485     bne_predict_not_taken(flag, failed);
1486   } else {
1487     bne(                  flag, failed);
1488   }
1489 
1490   stdcx_(exchange_value, addr_base);
1491   if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
1492     bne_predict_not_taken(CCR0, retry); // stXcx_ sets CCR0
1493   } else {
1494     bne(                  CCR0, retry); // stXcx_ sets CCR0
1495   }
1496 
1497   // result in register (must do this at the end because int_flag_success can be the same register as one above)
1498   if (use_result_reg) {
1499     li(int_flag_success, 1);
1500   }
1501 
1502   // POWER6 doesn't need isync in CAS.
1503   // Always emit isync to be on the safe side.
1504   if (semantics & MemBarFenceAfter) {
1505     fence();
1506   } else if (semantics & MemBarAcq) {
1507     isync();
1508   }
1509 
1510   if (use_result_reg && !preset_result_reg) {
1511     b(done);
1512   }
1513 
1514   bind(failed_int);
1515   if (use_result_reg && !preset_result_reg) {
1516     li(int_flag_success, 0);
1517   }
1518 
1519   bind(done);
1520   // (flag == ne) => (dest_current_value != compare_value), (!swapped)
1521   // (flag == eq) => (dest_current_value == compare_value), ( swapped)
1522 }
1523 
1524 // Look up the method for a megamorphic invokeinterface call.
1525 // The target method is determined by <intf_klass, itable_index>.
1526 // The receiver klass is in recv_klass.
1527 // On success, the result will be in method_result, and execution falls through.
1528 // On failure, execution transfers to the given label.
1529 void MacroAssembler::lookup_interface_method(Register recv_klass,
1530                                              Register intf_klass,
1531                                              RegisterOrConstant itable_index,
1532                                              Register method_result,
1533                                              Register scan_temp,
1534                                              Register temp2,
1535                                              Label& L_no_such_interface,
1536                                              bool return_method) {
1537   assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
1538 
1539   // Compute start of first itableOffsetEntry (which is at the end of the vtable).
1540   int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
1541   int itentry_off = itableMethodEntry::method_offset_in_bytes();
1542   int logMEsize   = exact_log2(itableMethodEntry::size() * wordSize);
1543   int scan_step   = itableOffsetEntry::size() * wordSize;
1544   int log_vte_size= exact_log2(vtableEntry::size() * wordSize);
1545 
1546   lwz(scan_temp, InstanceKlass::vtable_length_offset() * wordSize, recv_klass);
1547   // %%% We should store the aligned, prescaled offset in the klassoop.
1548   // Then the next several instructions would fold away.
1549 
1550   sldi(scan_temp, scan_temp, log_vte_size);
1551   addi(scan_temp, scan_temp, vtable_base);
1552   add(scan_temp, recv_klass, scan_temp);
1553 
1554   // Adjust recv_klass by scaled itable_index, so we can free itable_index.
1555   if (return_method) {
1556     if (itable_index.is_register()) {
1557       Register itable_offset = itable_index.as_register();
1558       sldi(method_result, itable_offset, logMEsize);
1559       if (itentry_off) { addi(method_result, method_result, itentry_off); }
1560       add(method_result, method_result, recv_klass);
1561     } else {
1562       long itable_offset = (long)itable_index.as_constant();
1563       // static address, no relocation
1564       load_const_optimized(temp2, (itable_offset << logMEsize) + itentry_off); // static address, no relocation
1565       add(method_result, temp2, recv_klass);
1566     }
1567   }
1568 
1569   // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
1570   //   if (scan->interface() == intf) {
1571   //     result = (klass + scan->offset() + itable_index);
1572   //   }
1573   // }
1574   Label search, found_method;
1575 
1576   for (int peel = 1; peel >= 0; peel--) {
1577     // %%%% Could load both offset and interface in one ldx, if they were
1578     // in the opposite order. This would save a load.
1579     ld(temp2, itableOffsetEntry::interface_offset_in_bytes(), scan_temp);
1580 
1581     // Check that this entry is non-null. A null entry means that
1582     // the receiver class doesn't implement the interface, and wasn't the
1583     // same as when the caller was compiled.
1584     cmpd(CCR0, temp2, intf_klass);
1585 
1586     if (peel) {
1587       beq(CCR0, found_method);
1588     } else {
1589       bne(CCR0, search);
1590       // (invert the test to fall through to found_method...)
1591     }
1592 
1593     if (!peel) break;
1594 
1595     bind(search);
1596 
1597     cmpdi(CCR0, temp2, 0);
1598     beq(CCR0, L_no_such_interface);
1599     addi(scan_temp, scan_temp, scan_step);
1600   }
1601 
1602   bind(found_method);
1603 
1604   // Got a hit.
1605   if (return_method) {
1606     int ito_offset = itableOffsetEntry::offset_offset_in_bytes();
1607     lwz(scan_temp, ito_offset, scan_temp);
1608     ldx(method_result, scan_temp, method_result);
1609   }
1610 }
1611 
1612 // virtual method calling
1613 void MacroAssembler::lookup_virtual_method(Register recv_klass,
1614                                            RegisterOrConstant vtable_index,
1615                                            Register method_result) {
1616 
1617   assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg());
1618 
1619   const int base = InstanceKlass::vtable_start_offset() * wordSize;
1620   assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
1621 
1622   if (vtable_index.is_register()) {
1623     sldi(vtable_index.as_register(), vtable_index.as_register(), LogBytesPerWord);
1624     add(recv_klass, vtable_index.as_register(), recv_klass);
1625   } else {
1626     addi(recv_klass, recv_klass, vtable_index.as_constant() << LogBytesPerWord);
1627   }
1628   ld(R19_method, base + vtableEntry::method_offset_in_bytes(), recv_klass);
1629 }
1630 
1631 /////////////////////////////////////////// subtype checking ////////////////////////////////////////////
1632 
1633 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
1634                                                    Register super_klass,
1635                                                    Register temp1_reg,
1636                                                    Register temp2_reg,
1637                                                    Label& L_success,
1638                                                    Label& L_failure) {
1639 
1640   const Register check_cache_offset = temp1_reg;
1641   const Register cached_super       = temp2_reg;
1642 
1643   assert_different_registers(sub_klass, super_klass, check_cache_offset, cached_super);
1644 
1645   int sco_offset = in_bytes(Klass::super_check_offset_offset());
1646   int sc_offset  = in_bytes(Klass::secondary_super_cache_offset());
1647 
1648   // If the pointers are equal, we are done (e.g., String[] elements).
1649   // This self-check enables sharing of secondary supertype arrays among
1650   // non-primary types such as array-of-interface. Otherwise, each such
1651   // type would need its own customized SSA.
1652   // We move this check to the front of the fast path because many
1653   // type checks are in fact trivially successful in this manner,
1654   // so we get a nicely predicted branch right at the start of the check.
1655   cmpd(CCR0, sub_klass, super_klass);
1656   beq(CCR0, L_success);
1657 
1658   // Check the supertype display:
1659   lwz(check_cache_offset, sco_offset, super_klass);
1660   // The loaded value is the offset from KlassOopDesc.
1661 
1662   ldx(cached_super, check_cache_offset, sub_klass);
1663   cmpd(CCR0, cached_super, super_klass);
1664   beq(CCR0, L_success);
1665 
1666   // This check has worked decisively for primary supers.
1667   // Secondary supers are sought in the super_cache ('super_cache_addr').
1668   // (Secondary supers are interfaces and very deeply nested subtypes.)
1669   // This works in the same check above because of a tricky aliasing
1670   // between the super_cache and the primary super display elements.
1671   // (The 'super_check_addr' can address either, as the case requires.)
1672   // Note that the cache is updated below if it does not help us find
1673   // what we need immediately.
1674   // So if it was a primary super, we can just fail immediately.
1675   // Otherwise, it's the slow path for us (no success at this point).
1676 
1677   cmpwi(CCR0, check_cache_offset, sc_offset);
1678   bne(CCR0, L_failure);
1679   // bind(slow_path); // fallthru
1680 }
1681 
1682 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
1683                                                    Register super_klass,
1684                                                    Register temp1_reg,
1685                                                    Register temp2_reg,
1686                                                    Label* L_success,
1687                                                    Register result_reg) {
1688   const Register array_ptr = temp1_reg; // current value from cache array
1689   const Register temp      = temp2_reg;
1690 
1691   assert_different_registers(sub_klass, super_klass, array_ptr, temp);
1692 
1693   int source_offset = in_bytes(Klass::secondary_supers_offset());
1694   int target_offset = in_bytes(Klass::secondary_super_cache_offset());
1695 
1696   int length_offset = Array<Klass*>::length_offset_in_bytes();
1697   int base_offset   = Array<Klass*>::base_offset_in_bytes();
1698 
1699   Label hit, loop, failure, fallthru;
1700 
1701   ld(array_ptr, source_offset, sub_klass);
1702 
1703   //assert(4 == arrayOopDesc::length_length_in_bytes(), "precondition violated.");
1704   lwz(temp, length_offset, array_ptr);
1705   cmpwi(CCR0, temp, 0);
1706   beq(CCR0, result_reg!=noreg ? failure : fallthru); // length 0
1707 
1708   mtctr(temp); // load ctr
1709 
1710   bind(loop);
1711   // Oops in table are NO MORE compressed.
1712   ld(temp, base_offset, array_ptr);
1713   cmpd(CCR0, temp, super_klass);
1714   beq(CCR0, hit);
1715   addi(array_ptr, array_ptr, BytesPerWord);
1716   bdnz(loop);
1717 
1718   bind(failure);
1719   if (result_reg!=noreg) li(result_reg, 1); // load non-zero result (indicates a miss)
1720   b(fallthru);
1721 
1722   bind(hit);
1723   std(super_klass, target_offset, sub_klass); // save result to cache
1724   if (result_reg != noreg) li(result_reg, 0); // load zero result (indicates a hit)
1725   if (L_success != NULL) b(*L_success);
1726 
1727   bind(fallthru);
1728 }
1729 
1730 // Try fast path, then go to slow one if not successful
1731 void MacroAssembler::check_klass_subtype(Register sub_klass,
1732                          Register super_klass,
1733                          Register temp1_reg,
1734                          Register temp2_reg,
1735                          Label& L_success) {
1736   Label L_failure;
1737   check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg, temp2_reg, L_success, L_failure);
1738   check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, temp2_reg, &L_success);
1739   bind(L_failure); // Fallthru if not successful.
1740 }
1741 
1742 void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
1743                                               Register temp_reg,
1744                                               Label& wrong_method_type) {
1745   assert_different_registers(mtype_reg, mh_reg, temp_reg);
1746   // Compare method type against that of the receiver.
1747   load_heap_oop_not_null(temp_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg), mh_reg);
1748   cmpd(CCR0, temp_reg, mtype_reg);
1749   bne(CCR0, wrong_method_type);
1750 }
1751 
1752 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
1753                                                    Register temp_reg,
1754                                                    int extra_slot_offset) {
1755   // cf. TemplateTable::prepare_invoke(), if (load_receiver).
1756   int stackElementSize = Interpreter::stackElementSize;
1757   int offset = extra_slot_offset * stackElementSize;
1758   if (arg_slot.is_constant()) {
1759     offset += arg_slot.as_constant() * stackElementSize;
1760     return offset;
1761   } else {
1762     assert(temp_reg != noreg, "must specify");
1763     sldi(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize));
1764     if (offset != 0)
1765       addi(temp_reg, temp_reg, offset);
1766     return temp_reg;
1767   }
1768 }
1769 
1770 void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj_reg,
1771                                           Register mark_reg, Register temp_reg,
1772                                           Register temp2_reg, Label& done, Label* slow_case) {
1773   assert(UseBiasedLocking, "why call this otherwise?");
1774 
1775 #ifdef ASSERT
1776   assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg);
1777 #endif
1778 
1779   Label cas_label;
1780 
1781   // Branch to done if fast path fails and no slow_case provided.
1782   Label *slow_case_int = (slow_case != NULL) ? slow_case : &done;
1783 
1784   // Biased locking
1785   // See whether the lock is currently biased toward our thread and
1786   // whether the epoch is still valid
1787   // Note that the runtime guarantees sufficient alignment of JavaThread
1788   // pointers to allow age to be placed into low bits
1789   assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits,
1790          "biased locking makes assumptions about bit layout");
1791 
1792   if (PrintBiasedLockingStatistics) {
1793     load_const(temp_reg, (address) BiasedLocking::total_entry_count_addr(), temp2_reg);
1794     lwz(temp2_reg, 0, temp_reg);
1795     addi(temp2_reg, temp2_reg, 1);
1796     stw(temp2_reg, 0, temp_reg);
1797   }
1798 
1799   andi(temp_reg, mark_reg, markOopDesc::biased_lock_mask_in_place);
1800   cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
1801   bne(cr_reg, cas_label);
1802 
1803   load_klass(temp_reg, obj_reg);
1804 
1805   load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
1806   ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
1807   orr(temp_reg, R16_thread, temp_reg);
1808   xorr(temp_reg, mark_reg, temp_reg);
1809   andr(temp_reg, temp_reg, temp2_reg);
1810   cmpdi(cr_reg, temp_reg, 0);
1811   if (PrintBiasedLockingStatistics) {
1812     Label l;
1813     bne(cr_reg, l);
1814     load_const(mark_reg, (address) BiasedLocking::biased_lock_entry_count_addr());
1815     lwz(temp2_reg, 0, mark_reg);
1816     addi(temp2_reg, temp2_reg, 1);
1817     stw(temp2_reg, 0, mark_reg);
1818     // restore mark_reg
1819     ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
1820     bind(l);
1821   }
1822   beq(cr_reg, done);
1823 
1824   Label try_revoke_bias;
1825   Label try_rebias;
1826 
1827   // At this point we know that the header has the bias pattern and
1828   // that we are not the bias owner in the current epoch. We need to
1829   // figure out more details about the state of the header in order to
1830   // know what operations can be legally performed on the object's
1831   // header.
1832 
1833   // If the low three bits in the xor result aren't clear, that means
1834   // the prototype header is no longer biased and we have to revoke
1835   // the bias on this object.
1836   andi(temp2_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
1837   cmpwi(cr_reg, temp2_reg, 0);
1838   bne(cr_reg, try_revoke_bias);
1839 
1840   // Biasing is still enabled for this data type. See whether the
1841   // epoch of the current bias is still valid, meaning that the epoch
1842   // bits of the mark word are equal to the epoch bits of the
1843   // prototype header. (Note that the prototype header's epoch bits
1844   // only change at a safepoint.) If not, attempt to rebias the object
1845   // toward the current thread. Note that we must be absolutely sure
1846   // that the current epoch is invalid in order to do this because
1847   // otherwise the manipulations it performs on the mark word are
1848   // illegal.
1849 
1850   int shift_amount = 64 - markOopDesc::epoch_shift;
1851   // rotate epoch bits to right (little) end and set other bits to 0
1852   // [ big part | epoch | little part ] -> [ 0..0 | epoch ]
1853   rldicl_(temp2_reg, temp_reg, shift_amount, 64 - markOopDesc::epoch_bits);
1854   // branch if epoch bits are != 0, i.e. they differ, because the epoch has been incremented
1855   bne(CCR0, try_rebias);
1856 
1857   // The epoch of the current bias is still valid but we know nothing
1858   // about the owner; it might be set or it might be clear. Try to
1859   // acquire the bias of the object using an atomic operation. If this
1860   // fails we will go in to the runtime to revoke the object's bias.
1861   // Note that we first construct the presumed unbiased header so we
1862   // don't accidentally blow away another thread's valid bias.
1863   andi(mark_reg, mark_reg, (markOopDesc::biased_lock_mask_in_place |
1864                                 markOopDesc::age_mask_in_place |
1865                                 markOopDesc::epoch_mask_in_place));
1866   orr(temp_reg, R16_thread, mark_reg);
1867 
1868   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
1869 
1870   // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
1871   fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
1872   cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
1873            /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
1874            /*where=*/obj_reg,
1875            MacroAssembler::MemBarAcq,
1876            MacroAssembler::cmpxchgx_hint_acquire_lock(),
1877            noreg, slow_case_int); // bail out if failed
1878 
1879   // If the biasing toward our thread failed, this means that
1880   // another thread succeeded in biasing it toward itself and we
1881   // need to revoke that bias. The revocation will occur in the
1882   // interpreter runtime in the slow case.
1883   if (PrintBiasedLockingStatistics) {
1884     load_const(temp_reg, (address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), temp2_reg);
1885     lwz(temp2_reg, 0, temp_reg);
1886     addi(temp2_reg, temp2_reg, 1);
1887     stw(temp2_reg, 0, temp_reg);
1888   }
1889   b(done);
1890 
1891   bind(try_rebias);
1892   // At this point we know the epoch has expired, meaning that the
1893   // current "bias owner", if any, is actually invalid. Under these
1894   // circumstances _only_, we are allowed to use the current header's
1895   // value as the comparison value when doing the cas to acquire the
1896   // bias in the current epoch. In other words, we allow transfer of
1897   // the bias from one thread to another directly in this situation.
1898   andi(temp_reg, mark_reg, markOopDesc::age_mask_in_place);
1899   orr(temp_reg, R16_thread, temp_reg);
1900   load_klass(temp2_reg, obj_reg);
1901   ld(temp2_reg, in_bytes(Klass::prototype_header_offset()), temp2_reg);
1902   orr(temp_reg, temp_reg, temp2_reg);
1903 
1904   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
1905 
1906   // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
1907   fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
1908   cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
1909                  /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
1910                  /*where=*/obj_reg,
1911                  MacroAssembler::MemBarAcq,
1912                  MacroAssembler::cmpxchgx_hint_acquire_lock(),
1913                  noreg, slow_case_int); // bail out if failed
1914 
1915   // If the biasing toward our thread failed, this means that
1916   // another thread succeeded in biasing it toward itself and we
1917   // need to revoke that bias. The revocation will occur in the
1918   // interpreter runtime in the slow case.
1919   if (PrintBiasedLockingStatistics) {
1920     load_const(temp_reg, (address) BiasedLocking::rebiased_lock_entry_count_addr(), temp2_reg);
1921     lwz(temp2_reg, 0, temp_reg);
1922     addi(temp2_reg, temp2_reg, 1);
1923     stw(temp2_reg, 0, temp_reg);
1924   }
1925   b(done);
1926 
1927   bind(try_revoke_bias);
1928   // The prototype mark in the klass doesn't have the bias bit set any
1929   // more, indicating that objects of this data type are not supposed
1930   // to be biased any more. We are going to try to reset the mark of
1931   // this object to the prototype value and fall through to the
1932   // CAS-based locking scheme. Note that if our CAS fails, it means
1933   // that another thread raced us for the privilege of revoking the
1934   // bias of this particular object, so it's okay to continue in the
1935   // normal locking code.
1936   load_klass(temp_reg, obj_reg);
1937   ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
1938   andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place);
1939   orr(temp_reg, temp_reg, temp2_reg);
1940 
1941   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
1942 
1943   // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
1944   fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
1945   cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
1946                  /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
1947                  /*where=*/obj_reg,
1948                  MacroAssembler::MemBarAcq,
1949                  MacroAssembler::cmpxchgx_hint_acquire_lock());
1950 
1951   // reload markOop in mark_reg before continuing with lightweight locking
1952   ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
1953 
1954   // Fall through to the normal CAS-based lock, because no matter what
1955   // the result of the above CAS, some thread must have succeeded in
1956   // removing the bias bit from the object's header.
1957   if (PrintBiasedLockingStatistics) {
1958     Label l;
1959     bne(cr_reg, l);
1960     load_const(temp_reg, (address) BiasedLocking::revoked_lock_entry_count_addr(), temp2_reg);
1961     lwz(temp2_reg, 0, temp_reg);
1962     addi(temp2_reg, temp2_reg, 1);
1963     stw(temp2_reg, 0, temp_reg);
1964     bind(l);
1965   }
1966 
1967   bind(cas_label);
1968 }
1969 
1970 void MacroAssembler::biased_locking_exit (ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done) {
1971   // Check for biased locking unlock case, which is a no-op
1972   // Note: we do not have to check the thread ID for two reasons.
1973   // First, the interpreter checks for IllegalMonitorStateException at
1974   // a higher level. Second, if the bias was revoked while we held the
1975   // lock, the object could not be rebiased toward another thread, so
1976   // the bias bit would be clear.
1977 
1978   ld(temp_reg, 0, mark_addr);
1979   andi(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
1980 
1981   cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
1982   beq(cr_reg, done);
1983 }
1984 
1985 // "The box" is the space on the stack where we copy the object mark.
1986 void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box,
1987                                                Register temp, Register displaced_header, Register current_header) {
1988   assert_different_registers(oop, box, temp, displaced_header, current_header);
1989   assert(flag != CCR0, "bad condition register");
1990   Label cont;
1991   Label object_has_monitor;
1992   Label cas_failed;
1993 
1994   // Load markOop from object into displaced_header.
1995   ld(displaced_header, oopDesc::mark_offset_in_bytes(), oop);
1996 
1997 
1998   // Always do locking in runtime.
1999   if (EmitSync & 0x01) {
2000     cmpdi(flag, oop, 0); // Oop can't be 0 here => always false.
2001     return;
2002   }
2003 
2004   if (UseBiasedLocking) {
2005     biased_locking_enter(flag, oop, displaced_header, temp, current_header, cont);
2006   }
2007 
2008   // Handle existing monitor.
2009   if ((EmitSync & 0x02) == 0) {
2010     // The object has an existing monitor iff (mark & monitor_value) != 0.
2011     andi_(temp, displaced_header, markOopDesc::monitor_value);
2012     bne(CCR0, object_has_monitor);
2013   }
2014 
2015   // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
2016   ori(displaced_header, displaced_header, markOopDesc::unlocked_value);
2017 
2018   // Load Compare Value application register.
2019 
2020   // Initialize the box. (Must happen before we update the object mark!)
2021   std(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
2022 
2023   // Must fence, otherwise, preceding store(s) may float below cmpxchg.
2024   // Compare object markOop with mark and if equal exchange scratch1 with object markOop.
2025   // CmpxchgX sets cr_reg to cmpX(current, displaced).
2026   membar(Assembler::StoreStore);
2027   cmpxchgd(/*flag=*/flag,
2028            /*current_value=*/current_header,
2029            /*compare_value=*/displaced_header,
2030            /*exchange_value=*/box,
2031            /*where=*/oop,
2032            MacroAssembler::MemBarAcq,
2033            MacroAssembler::cmpxchgx_hint_acquire_lock(),
2034            noreg,
2035            &cas_failed);
2036   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
2037 
2038   // If the compare-and-exchange succeeded, then we found an unlocked
2039   // object and we have now locked it.
2040   b(cont);
2041 
2042   bind(cas_failed);
2043   // We did not see an unlocked object so try the fast recursive case.
2044 
2045   // Check if the owner is self by comparing the value in the markOop of object
2046   // (current_header) with the stack pointer.
2047   sub(current_header, current_header, R1_SP);
2048   load_const_optimized(temp, (address) (~(os::vm_page_size()-1) |
2049                                         markOopDesc::lock_mask_in_place));
2050 
2051   and_(R0/*==0?*/, current_header, temp);
2052   // If condition is true we are cont and hence we can store 0 as the
2053   // displaced header in the box, which indicates that it is a recursive lock.
2054   mcrf(flag,CCR0);
2055   std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), box);
2056 
2057   // Handle existing monitor.
2058   if ((EmitSync & 0x02) == 0) {
2059     b(cont);
2060 
2061     bind(object_has_monitor);
2062     // The object's monitor m is unlocked iff m->owner == NULL,
2063     // otherwise m->owner may contain a thread or a stack address.
2064     //
2065     // Try to CAS m->owner from NULL to current thread.
2066     addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value);
2067     li(displaced_header, 0);
2068     // CmpxchgX sets flag to cmpX(current, displaced).
2069     cmpxchgd(/*flag=*/flag,
2070              /*current_value=*/current_header,
2071              /*compare_value=*/displaced_header,
2072              /*exchange_value=*/R16_thread,
2073              /*where=*/temp,
2074              MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
2075              MacroAssembler::cmpxchgx_hint_acquire_lock());
2076 
2077     // Store a non-null value into the box.
2078     std(box, BasicLock::displaced_header_offset_in_bytes(), box);
2079 
2080 #   ifdef ASSERT
2081     bne(flag, cont);
2082     // We have acquired the monitor, check some invariants.
2083     addi(/*monitor=*/temp, temp, -ObjectMonitor::owner_offset_in_bytes());
2084     // Invariant 1: _recursions should be 0.
2085     //assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
2086     asm_assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), temp,
2087                             "monitor->_recursions should be 0", -1);
2088     // Invariant 2: OwnerIsThread shouldn't be 0.
2089     //assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
2090     //asm_assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), temp,
2091     //                           "monitor->OwnerIsThread shouldn't be 0", -1);
2092 #   endif
2093   }
2094 
2095   bind(cont);
2096   // flag == EQ indicates success
2097   // flag == NE indicates failure
2098 }
2099 
2100 void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box,
2101                                                  Register temp, Register displaced_header, Register current_header) {
2102   assert_different_registers(oop, box, temp, displaced_header, current_header);
2103   assert(flag != CCR0, "bad condition register");
2104   Label cont;
2105   Label object_has_monitor;
2106 
2107   // Always do locking in runtime.
2108   if (EmitSync & 0x01) {
2109     cmpdi(flag, oop, 0); // Oop can't be 0 here => always false.
2110     return;
2111   }
2112 
2113   if (UseBiasedLocking) {
2114     biased_locking_exit(flag, oop, current_header, cont);
2115   }
2116 
2117   // Find the lock address and load the displaced header from the stack.
2118   ld(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
2119 
2120   // If the displaced header is 0, we have a recursive unlock.
2121   cmpdi(flag, displaced_header, 0);
2122   beq(flag, cont);
2123 
2124   // Handle existing monitor.
2125   if ((EmitSync & 0x02) == 0) {
2126     // The object has an existing monitor iff (mark & monitor_value) != 0.
2127     ld(current_header, oopDesc::mark_offset_in_bytes(), oop);
2128     andi(temp, current_header, markOopDesc::monitor_value);
2129     cmpdi(flag, temp, 0);
2130     bne(flag, object_has_monitor);
2131   }
2132 
2133 
2134   // Check if it is still a light weight lock, this is is true if we see
2135   // the stack address of the basicLock in the markOop of the object.
2136   // Cmpxchg sets flag to cmpd(current_header, box).
2137   cmpxchgd(/*flag=*/flag,
2138            /*current_value=*/current_header,
2139            /*compare_value=*/box,
2140            /*exchange_value=*/displaced_header,
2141            /*where=*/oop,
2142            MacroAssembler::MemBarRel,
2143            MacroAssembler::cmpxchgx_hint_release_lock(),
2144            noreg,
2145            &cont);
2146 
2147   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
2148 
2149   // Handle existing monitor.
2150   if ((EmitSync & 0x02) == 0) {
2151     b(cont);
2152 
2153     bind(object_has_monitor);
2154     addi(current_header, current_header, -markOopDesc::monitor_value); // monitor
2155     ld(temp,             ObjectMonitor::owner_offset_in_bytes(), current_header);
2156     ld(displaced_header, ObjectMonitor::recursions_offset_in_bytes(), current_header);
2157     xorr(temp, R16_thread, temp);      // Will be 0 if we are the owner.
2158     orr(temp, temp, displaced_header); // Will be 0 if there are 0 recursions.
2159     cmpdi(flag, temp, 0);
2160     bne(flag, cont);
2161 
2162     ld(temp,             ObjectMonitor::EntryList_offset_in_bytes(), current_header);
2163     ld(displaced_header, ObjectMonitor::cxq_offset_in_bytes(), current_header);
2164     orr(temp, temp, displaced_header); // Will be 0 if both are 0.
2165     cmpdi(flag, temp, 0);
2166     bne(flag, cont);
2167     release();
2168     std(temp, ObjectMonitor::owner_offset_in_bytes(), current_header);
2169   }
2170 
2171   bind(cont);
2172   // flag == EQ indicates success
2173   // flag == NE indicates failure
2174 }
2175 
2176 // Write serialization page so VM thread can do a pseudo remote membar.
2177 // We use the current thread pointer to calculate a thread specific
2178 // offset to write to within the page. This minimizes bus traffic
2179 // due to cache line collision.
2180 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
2181   srdi(tmp2, thread, os::get_serialize_page_shift_count());
2182 
2183   int mask = os::vm_page_size() - sizeof(int);
2184   if (Assembler::is_simm(mask, 16)) {
2185     andi(tmp2, tmp2, mask);
2186   } else {
2187     lis(tmp1, (int)((signed short) (mask >> 16)));
2188     ori(tmp1, tmp1, mask & 0x0000ffff);
2189     andr(tmp2, tmp2, tmp1);
2190   }
2191 
2192   load_const(tmp1, (long) os::get_memory_serialize_page());
2193   release();
2194   stwx(R0, tmp1, tmp2);
2195 }
2196 
2197 
2198 // GC barrier helper macros
2199 
2200 // Write the card table byte if needed.
2201 void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
2202   CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
2203   assert(bs->kind() == BarrierSet::CardTableModRef ||
2204          bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
2205 #ifdef ASSERT
2206   cmpdi(CCR0, Rnew_val, 0);
2207   asm_assert_ne("null oop not allowed", 0x321);
2208 #endif
2209   card_table_write(bs->byte_map_base, Rtmp, Rstore_addr);
2210 }
2211 
2212 // Write the card table byte.
2213 void MacroAssembler::card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj) {
2214   assert_different_registers(Robj, Rtmp, R0);
2215   load_const_optimized(Rtmp, (address)byte_map_base, R0);
2216   srdi(Robj, Robj, CardTableModRefBS::card_shift);
2217   li(R0, 0); // dirty
2218   if (UseConcMarkSweepGC) membar(Assembler::StoreStore);
2219   stbx(R0, Rtmp, Robj);
2220 }
2221 
2222 #if INCLUDE_ALL_GCS
2223 // General G1 pre-barrier generator.
2224 // Goal: record the previous value if it is not null.
2225 void MacroAssembler::g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
2226                                           Register Rtmp1, Register Rtmp2, bool needs_frame) {
2227   Label runtime, filtered;
2228 
2229   // Is marking active?
2230   if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
2231     lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
2232   } else {
2233     guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
2234     lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
2235   }
2236   cmpdi(CCR0, Rtmp1, 0);
2237   beq(CCR0, filtered);
2238 
2239   // Do we need to load the previous value?
2240   if (Robj != noreg) {
2241     // Load the previous value...
2242     if (UseCompressedOops) {
2243       lwz(Rpre_val, offset, Robj);
2244     } else {
2245       ld(Rpre_val, offset, Robj);
2246     }
2247     // Previous value has been loaded into Rpre_val.
2248   }
2249   assert(Rpre_val != noreg, "must have a real register");
2250 
2251   // Is the previous value null?
2252   cmpdi(CCR0, Rpre_val, 0);
2253   beq(CCR0, filtered);
2254 
2255   if (Robj != noreg && UseCompressedOops) {
2256     decode_heap_oop_not_null(Rpre_val);
2257   }
2258 
2259   // OK, it's not filtered, so we'll need to call enqueue. In the normal
2260   // case, pre_val will be a scratch G-reg, but there are some cases in
2261   // which it's an O-reg. In the first case, do a normal call. In the
2262   // latter, do a save here and call the frameless version.
2263 
2264   // Can we store original value in the thread's buffer?
2265   // Is index == 0?
2266   // (The index field is typed as size_t.)
2267   const Register Rbuffer = Rtmp1, Rindex = Rtmp2;
2268 
2269   ld(Rindex, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
2270   cmpdi(CCR0, Rindex, 0);
2271   beq(CCR0, runtime); // If index == 0, goto runtime.
2272   ld(Rbuffer, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_buf()), R16_thread);
2273 
2274   addi(Rindex, Rindex, -wordSize); // Decrement index.
2275   std(Rindex, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
2276 
2277   // Record the previous value.
2278   stdx(Rpre_val, Rbuffer, Rindex);
2279   b(filtered);
2280 
2281   bind(runtime);
2282 
2283   // VM call need frame to access(write) O register.
2284   if (needs_frame) {
2285     save_LR_CR(Rtmp1);
2286     push_frame_reg_args(0, Rtmp2);
2287   }
2288 
2289   if (Rpre_val->is_volatile() && Robj == noreg) mr(R31, Rpre_val); // Save pre_val across C call if it was preloaded.
2290   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_val, R16_thread);
2291   if (Rpre_val->is_volatile() && Robj == noreg) mr(Rpre_val, R31); // restore
2292 
2293   if (needs_frame) {
2294     pop_frame();
2295     restore_LR_CR(Rtmp1);
2296   }
2297 
2298   bind(filtered);
2299 }
2300 
2301 // General G1 post-barrier generator
2302 // Store cross-region card.
2303 void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1, Register Rtmp2, Register Rtmp3, Label *filtered_ext) {
2304   Label runtime, filtered_int;
2305   Label& filtered = (filtered_ext != NULL) ? *filtered_ext : filtered_int;
2306   assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2);
2307 
2308   G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
2309   assert(bs->kind() == BarrierSet::G1SATBCT ||
2310          bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
2311 
2312   // Does store cross heap regions?
2313   if (G1RSBarrierRegionFilter) {
2314     xorr(Rtmp1, Rstore_addr, Rnew_val);
2315     srdi_(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes);
2316     beq(CCR0, filtered);
2317   }
2318 
2319   // Crosses regions, storing NULL?
2320 #ifdef ASSERT
2321   cmpdi(CCR0, Rnew_val, 0);
2322   asm_assert_ne("null oop not allowed (G1)", 0x322); // Checked by caller on PPC64, so following branch is obsolete:
2323   //beq(CCR0, filtered);
2324 #endif
2325 
2326   // Storing region crossing non-NULL, is card already dirty?
2327   assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code");
2328   const Register Rcard_addr = Rtmp1;
2329   Register Rbase = Rtmp2;
2330   load_const_optimized(Rbase, (address)bs->byte_map_base, /*temp*/ Rtmp3);
2331 
2332   srdi(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift);
2333 
2334   // Get the address of the card.
2335   lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr);
2336   cmpwi(CCR0, Rtmp3, (int)G1SATBCardTableModRefBS::g1_young_card_val());
2337   beq(CCR0, filtered);
2338 
2339   membar(Assembler::StoreLoad);
2340   lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr);  // Reload after membar.
2341   cmpwi(CCR0, Rtmp3 /* card value */, CardTableModRefBS::dirty_card_val());
2342   beq(CCR0, filtered);
2343 
2344   // Storing a region crossing, non-NULL oop, card is clean.
2345   // Dirty card and log.
2346   li(Rtmp3, CardTableModRefBS::dirty_card_val());
2347   //release(); // G1: oops are allowed to get visible after dirty marking.
2348   stbx(Rtmp3, Rbase, Rcard_addr);
2349 
2350   add(Rcard_addr, Rbase, Rcard_addr); // This is the address which needs to get enqueued.
2351   Rbase = noreg; // end of lifetime
2352 
2353   const Register Rqueue_index = Rtmp2,
2354                  Rqueue_buf   = Rtmp3;
2355   ld(Rqueue_index, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
2356   cmpdi(CCR0, Rqueue_index, 0);
2357   beq(CCR0, runtime); // index == 0 then jump to runtime
2358   ld(Rqueue_buf, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_buf()), R16_thread);
2359 
2360   addi(Rqueue_index, Rqueue_index, -wordSize); // decrement index
2361   std(Rqueue_index, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
2362 
2363   stdx(Rcard_addr, Rqueue_buf, Rqueue_index); // store card
2364   b(filtered);
2365 
2366   bind(runtime);
2367 
2368   // Save the live input values.
2369   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr, R16_thread);
2370 
2371   bind(filtered_int);
2372 }
2373 #endif // INCLUDE_ALL_GCS
2374 
2375 // Values for last_Java_pc, and last_Java_sp must comply to the rules
2376 // in frame_ppc.hpp.
2377 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc) {
2378   // Always set last_Java_pc and flags first because once last_Java_sp
2379   // is visible has_last_Java_frame is true and users will look at the
2380   // rest of the fields. (Note: flags should always be zero before we
2381   // get here so doesn't need to be set.)
2382 
2383   // Verify that last_Java_pc was zeroed on return to Java
2384   asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()), R16_thread,
2385                           "last_Java_pc not zeroed before leaving Java", 0x200);
2386 
2387   // When returning from calling out from Java mode the frame anchor's
2388   // last_Java_pc will always be set to NULL. It is set here so that
2389   // if we are doing a call to native (not VM) that we capture the
2390   // known pc and don't have to rely on the native call having a
2391   // standard frame linkage where we can find the pc.
2392   if (last_Java_pc != noreg)
2393     std(last_Java_pc, in_bytes(JavaThread::last_Java_pc_offset()), R16_thread);
2394 
2395   // Set last_Java_sp last.
2396   std(last_Java_sp, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread);
2397 }
2398 
2399 void MacroAssembler::reset_last_Java_frame(void) {
2400   asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()),
2401                              R16_thread, "SP was not set, still zero", 0x202);
2402 
2403   BLOCK_COMMENT("reset_last_Java_frame {");
2404   li(R0, 0);
2405 
2406   // _last_Java_sp = 0
2407   std(R0, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread);
2408 
2409   // _last_Java_pc = 0
2410   std(R0, in_bytes(JavaThread::last_Java_pc_offset()), R16_thread);
2411   BLOCK_COMMENT("} reset_last_Java_frame");
2412 }
2413 
2414 void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1) {
2415   assert_different_registers(sp, tmp1);
2416 
2417   // sp points to a TOP_IJAVA_FRAME, retrieve frame's PC via
2418   // TOP_IJAVA_FRAME_ABI.
2419   // FIXME: assert that we really have a TOP_IJAVA_FRAME here!
2420 #ifdef CC_INTERP
2421   ld(tmp1/*pc*/, _top_ijava_frame_abi(frame_manager_lr), sp);
2422 #else
2423   address entry = pc();
2424   load_const_optimized(tmp1, entry);
2425 #endif
2426 
2427   set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1);
2428 }
2429 
2430 void MacroAssembler::get_vm_result(Register oop_result) {
2431   // Read:
2432   //   R16_thread
2433   //   R16_thread->in_bytes(JavaThread::vm_result_offset())
2434   //
2435   // Updated:
2436   //   oop_result
2437   //   R16_thread->in_bytes(JavaThread::vm_result_offset())
2438 
2439   ld(oop_result, in_bytes(JavaThread::vm_result_offset()), R16_thread);
2440   li(R0, 0);
2441   std(R0, in_bytes(JavaThread::vm_result_offset()), R16_thread);
2442 
2443   verify_oop(oop_result);
2444 }
2445 
2446 void MacroAssembler::get_vm_result_2(Register metadata_result) {
2447   // Read:
2448   //   R16_thread
2449   //   R16_thread->in_bytes(JavaThread::vm_result_2_offset())
2450   //
2451   // Updated:
2452   //   metadata_result
2453   //   R16_thread->in_bytes(JavaThread::vm_result_2_offset())
2454 
2455   ld(metadata_result, in_bytes(JavaThread::vm_result_2_offset()), R16_thread);
2456   li(R0, 0);
2457   std(R0, in_bytes(JavaThread::vm_result_2_offset()), R16_thread);
2458 }
2459 
2460 
2461 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
2462   Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided.
2463   if (Universe::narrow_klass_base() != 0) {
2464     // Use dst as temp if it is free.
2465     load_const(R0, Universe::narrow_klass_base(), (dst != current && dst != R0) ? dst : noreg);
2466     sub(dst, current, R0);
2467     current = dst;
2468   }
2469   if (Universe::narrow_klass_shift() != 0) {
2470     srdi(dst, current, Universe::narrow_klass_shift());
2471     current = dst;
2472   }
2473   mr_if_needed(dst, current); // Move may be required.
2474 }
2475 
2476 void MacroAssembler::store_klass(Register dst_oop, Register klass, Register ck) {
2477   if (UseCompressedClassPointers) {
2478     encode_klass_not_null(ck, klass);
2479     stw(ck, oopDesc::klass_offset_in_bytes(), dst_oop);
2480   } else {
2481     std(klass, oopDesc::klass_offset_in_bytes(), dst_oop);
2482   }
2483 }
2484 
2485 void MacroAssembler::store_klass_gap(Register dst_oop, Register val) {
2486   if (UseCompressedClassPointers) {
2487     if (val == noreg) {
2488       val = R0;
2489       li(val, 0);
2490     }
2491     stw(val, oopDesc::klass_gap_offset_in_bytes(), dst_oop); // klass gap if compressed
2492   }
2493 }
2494 
2495 int MacroAssembler::instr_size_for_decode_klass_not_null() {
2496   if (!UseCompressedClassPointers) return 0;
2497   int num_instrs = 1;  // shift or move
2498   if (Universe::narrow_klass_base() != 0) num_instrs = 7;  // shift + load const + add
2499   return num_instrs * BytesPerInstWord;
2500 }
2501 
2502 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
2503   assert(dst != R0, "Dst reg may not be R0, as R0 is used here.");
2504   if (src == noreg) src = dst;
2505   Register shifted_src = src;
2506   if (Universe::narrow_klass_shift() != 0 ||
2507       Universe::narrow_klass_base() == 0 && src != dst) {  // Move required.
2508     shifted_src = dst;
2509     sldi(shifted_src, src, Universe::narrow_klass_shift());
2510   }
2511   if (Universe::narrow_klass_base() != 0) {
2512     load_const(R0, Universe::narrow_klass_base());
2513     add(dst, shifted_src, R0);
2514   }
2515 }
2516 
2517 void MacroAssembler::load_klass(Register dst, Register src) {
2518   if (UseCompressedClassPointers) {
2519     lwz(dst, oopDesc::klass_offset_in_bytes(), src);
2520     // Attention: no null check here!
2521     decode_klass_not_null(dst, dst);
2522   } else {
2523     ld(dst, oopDesc::klass_offset_in_bytes(), src);
2524   }
2525 }
2526 
2527 void MacroAssembler::load_klass_with_trap_null_check(Register dst, Register src) {
2528   if (!os::zero_page_read_protected()) {
2529     if (TrapBasedNullChecks) {
2530       trap_null_check(src);
2531     }
2532   }
2533   load_klass(dst, src);
2534 }
2535 
2536 void MacroAssembler::reinit_heapbase(Register d, Register tmp) {
2537   if (Universe::heap() != NULL) {
2538     load_const_optimized(R30, Universe::narrow_ptrs_base(), tmp);
2539   } else {
2540     // Heap not yet allocated. Load indirectly.
2541     int simm16_offset = load_const_optimized(R30, Universe::narrow_ptrs_base_addr(), tmp, true);
2542     ld(R30, simm16_offset, R30);
2543   }
2544 }
2545 
2546 // Clear Array
2547 // Kills both input registers. tmp == R0 is allowed.
2548 void MacroAssembler::clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp) {
2549   // Procedure for large arrays (uses data cache block zero instruction).
2550     Label startloop, fast, fastloop, small_rest, restloop, done;
2551     const int cl_size         = VM_Version::get_cache_line_size(),
2552               cl_dwords       = cl_size>>3,
2553               cl_dw_addr_bits = exact_log2(cl_dwords),
2554               dcbz_min        = 1;                     // Min count of dcbz executions, needs to be >0.
2555 
2556 //2:
2557     cmpdi(CCR1, cnt_dwords, ((dcbz_min+1)<<cl_dw_addr_bits)-1); // Big enough? (ensure >=dcbz_min lines included).
2558     blt(CCR1, small_rest);                                      // Too small.
2559     rldicl_(tmp, base_ptr, 64-3, 64-cl_dw_addr_bits);           // Extract dword offset within first cache line.
2560     beq(CCR0, fast);                                            // Already 128byte aligned.
2561 
2562     subfic(tmp, tmp, cl_dwords);
2563     mtctr(tmp);                        // Set ctr to hit 128byte boundary (0<ctr<cl_dwords).
2564     subf(cnt_dwords, tmp, cnt_dwords); // rest.
2565     li(tmp, 0);
2566 //10:
2567   bind(startloop);                     // Clear at the beginning to reach 128byte boundary.
2568     std(tmp, 0, base_ptr);             // Clear 8byte aligned block.
2569     addi(base_ptr, base_ptr, 8);
2570     bdnz(startloop);
2571 //13:
2572   bind(fast);                                  // Clear 128byte blocks.
2573     srdi(tmp, cnt_dwords, cl_dw_addr_bits);    // Loop count for 128byte loop (>0).
2574     andi(cnt_dwords, cnt_dwords, cl_dwords-1); // Rest in dwords.
2575     mtctr(tmp);                                // Load counter.
2576 //16:
2577   bind(fastloop);
2578     dcbz(base_ptr);                    // Clear 128byte aligned block.
2579     addi(base_ptr, base_ptr, cl_size);
2580     bdnz(fastloop);
2581     if (InsertEndGroupPPC64) { endgroup(); } else { nop(); }
2582 //20:
2583   bind(small_rest);
2584     cmpdi(CCR0, cnt_dwords, 0);        // size 0?
2585     beq(CCR0, done);                   // rest == 0
2586     li(tmp, 0);
2587     mtctr(cnt_dwords);                 // Load counter.
2588 //24:
2589   bind(restloop);                      // Clear rest.
2590     std(tmp, 0, base_ptr);             // Clear 8byte aligned block.
2591     addi(base_ptr, base_ptr, 8);
2592     bdnz(restloop);
2593 //27:
2594   bind(done);
2595 }
2596 
2597 /////////////////////////////////////////// String intrinsics ////////////////////////////////////////////
2598 
2599 // Search for a single jchar in an jchar[].
2600 //
2601 // Assumes that result differs from all other registers.
2602 //
2603 // Haystack, needle are the addresses of jchar-arrays.
2604 // NeedleChar is needle[0] if it is known at compile time.
2605 // Haycnt is the length of the haystack. We assume haycnt >=1.
2606 //
2607 // Preserves haystack, haycnt, kills all other registers.
2608 //
2609 // If needle == R0, we search for the constant needleChar.
2610 void MacroAssembler::string_indexof_1(Register result, Register haystack, Register haycnt,
2611                                       Register needle, jchar needleChar,
2612                                       Register tmp1, Register tmp2) {
2613 
2614   assert_different_registers(result, haystack, haycnt, needle, tmp1, tmp2);
2615 
2616   Label L_InnerLoop, L_FinalCheck, L_Found1, L_Found2, L_Found3, L_NotFound, L_End;
2617   Register needle0 = needle, // Contains needle[0].
2618            addr = tmp1,
2619            ch1 = tmp2,
2620            ch2 = R0;
2621 
2622 //2 (variable) or 3 (const):
2623    if (needle != R0) lhz(needle0, 0, needle); // Preload needle character, needle has len==1.
2624    dcbtct(haystack, 0x00);                        // Indicate R/O access to haystack.
2625 
2626    srwi_(tmp2, haycnt, 1);   // Shift right by exact_log2(UNROLL_FACTOR).
2627    mr(addr, haystack);
2628    beq(CCR0, L_FinalCheck);
2629    mtctr(tmp2);              // Move to count register.
2630 //8:
2631   bind(L_InnerLoop);             // Main work horse (2x unrolled search loop).
2632    lhz(ch1, 0, addr);        // Load characters from haystack.
2633    lhz(ch2, 2, addr);
2634    (needle != R0) ? cmpw(CCR0, ch1, needle0) : cmplwi(CCR0, ch1, needleChar);
2635    (needle != R0) ? cmpw(CCR1, ch2, needle0) : cmplwi(CCR1, ch2, needleChar);
2636    beq(CCR0, L_Found1);   // Did we find the needle?
2637    beq(CCR1, L_Found2);
2638    addi(addr, addr, 4);
2639    bdnz(L_InnerLoop);
2640 //16:
2641   bind(L_FinalCheck);
2642    andi_(R0, haycnt, 1);
2643    beq(CCR0, L_NotFound);
2644    lhz(ch1, 0, addr);        // One position left at which we have to compare.
2645    (needle != R0) ? cmpw(CCR1, ch1, needle0) : cmplwi(CCR1, ch1, needleChar);
2646    beq(CCR1, L_Found3);
2647 //21:
2648   bind(L_NotFound);
2649    li(result, -1);           // Not found.
2650    b(L_End);
2651 
2652   bind(L_Found2);
2653    addi(addr, addr, 2);
2654 //24:
2655   bind(L_Found1);
2656   bind(L_Found3);                  // Return index ...
2657    subf(addr, haystack, addr); // relative to haystack,
2658    srdi(result, addr, 1);      // in characters.
2659   bind(L_End);
2660 }
2661 
2662 
2663 // Implementation of IndexOf for jchar arrays.
2664 //
2665 // The length of haystack and needle are not constant, i.e. passed in a register.
2666 //
2667 // Preserves registers haystack, needle.
2668 // Kills registers haycnt, needlecnt.
2669 // Assumes that result differs from all other registers.
2670 // Haystack, needle are the addresses of jchar-arrays.
2671 // Haycnt, needlecnt are the lengths of them, respectively.
2672 //
2673 // Needlecntval must be zero or 15-bit unsigned immediate and > 1.
2674 void MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt,
2675                                     Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
2676                                     Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
2677 
2678   // Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite!
2679   Label L_TooShort, L_Found, L_NotFound, L_End;
2680   Register last_addr = haycnt, // Kill haycnt at the beginning.
2681            addr      = tmp1,
2682            n_start   = tmp2,
2683            ch1       = tmp3,
2684            ch2       = R0;
2685 
2686   // **************************************************************************************************
2687   // Prepare for main loop: optimized for needle count >=2, bail out otherwise.
2688   // **************************************************************************************************
2689 
2690 //1 (variable) or 3 (const):
2691    dcbtct(needle, 0x00);    // Indicate R/O access to str1.
2692    dcbtct(haystack, 0x00);  // Indicate R/O access to str2.
2693 
2694   // Compute last haystack addr to use if no match gets found.
2695   if (needlecntval == 0) { // variable needlecnt
2696 //3:
2697    subf(ch1, needlecnt, haycnt);      // Last character index to compare is haycnt-needlecnt.
2698    addi(addr, haystack, -2);          // Accesses use pre-increment.
2699    cmpwi(CCR6, needlecnt, 2);
2700    blt(CCR6, L_TooShort);          // Variable needlecnt: handle short needle separately.
2701    slwi(ch1, ch1, 1);                 // Scale to number of bytes.
2702    lwz(n_start, 0, needle);           // Load first 2 characters of needle.
2703    add(last_addr, haystack, ch1);     // Point to last address to compare (haystack+2*(haycnt-needlecnt)).
2704    addi(needlecnt, needlecnt, -2);    // Rest of needle.
2705   } else { // constant needlecnt
2706   guarantee(needlecntval != 1, "IndexOf with single-character needle must be handled separately");
2707   assert((needlecntval & 0x7fff) == needlecntval, "wrong immediate");
2708 //5:
2709    addi(ch1, haycnt, -needlecntval);  // Last character index to compare is haycnt-needlecnt.
2710    lwz(n_start, 0, needle);           // Load first 2 characters of needle.
2711    addi(addr, haystack, -2);          // Accesses use pre-increment.
2712    slwi(ch1, ch1, 1);                 // Scale to number of bytes.
2713    add(last_addr, haystack, ch1);     // Point to last address to compare (haystack+2*(haycnt-needlecnt)).
2714    li(needlecnt, needlecntval-2);     // Rest of needle.
2715   }
2716 
2717   // Main Loop (now we have at least 3 characters).
2718 //11:
2719   Label L_OuterLoop, L_InnerLoop, L_FinalCheck, L_Comp1, L_Comp2, L_Comp3;
2720   bind(L_OuterLoop); // Search for 1st 2 characters.
2721   Register addr_diff = tmp4;
2722    subf(addr_diff, addr, last_addr); // Difference between already checked address and last address to check.
2723    addi(addr, addr, 2);              // This is the new address we want to use for comparing.
2724    srdi_(ch2, addr_diff, 2);
2725    beq(CCR0, L_FinalCheck);       // 2 characters left?
2726    mtctr(ch2);                       // addr_diff/4
2727 //16:
2728   bind(L_InnerLoop);                // Main work horse (2x unrolled search loop)
2729    lwz(ch1, 0, addr);           // Load 2 characters of haystack (ignore alignment).
2730    lwz(ch2, 2, addr);
2731    cmpw(CCR0, ch1, n_start); // Compare 2 characters (1 would be sufficient but try to reduce branches to CompLoop).
2732    cmpw(CCR1, ch2, n_start);
2733    beq(CCR0, L_Comp1);       // Did we find the needle start?
2734    beq(CCR1, L_Comp2);
2735    addi(addr, addr, 4);
2736    bdnz(L_InnerLoop);
2737 //24:
2738   bind(L_FinalCheck);
2739    rldicl_(addr_diff, addr_diff, 64-1, 63); // Remaining characters not covered by InnerLoop: (addr_diff>>1)&1.
2740    beq(CCR0, L_NotFound);
2741    lwz(ch1, 0, addr);                       // One position left at which we have to compare.
2742    cmpw(CCR1, ch1, n_start);
2743    beq(CCR1, L_Comp3);
2744 //29:
2745   bind(L_NotFound);
2746    li(result, -1); // not found
2747    b(L_End);
2748 
2749 
2750    // **************************************************************************************************
2751    // Special Case: unfortunately, the variable needle case can be called with needlecnt<2
2752    // **************************************************************************************************
2753 //31:
2754  if ((needlecntval>>1) !=1 ) { // Const needlecnt is 2 or 3? Reduce code size.
2755   int nopcnt = 5;
2756   if (needlecntval !=0 ) ++nopcnt; // Balance alignment (other case: see below).
2757   if (needlecntval == 0) {         // We have to handle these cases separately.
2758   Label L_OneCharLoop;
2759   bind(L_TooShort);
2760    mtctr(haycnt);
2761    lhz(n_start, 0, needle);    // First character of needle
2762   bind(L_OneCharLoop);
2763    lhzu(ch1, 2, addr);
2764    cmpw(CCR1, ch1, n_start);
2765    beq(CCR1, L_Found);      // Did we find the one character needle?
2766    bdnz(L_OneCharLoop);
2767    li(result, -1);             // Not found.
2768    b(L_End);
2769   } // 8 instructions, so no impact on alignment.
2770   for (int x = 0; x < nopcnt; ++x) nop();
2771  }
2772 
2773   // **************************************************************************************************
2774   // Regular Case Part II: compare rest of needle (first 2 characters have been compared already)
2775   // **************************************************************************************************
2776 
2777   // Compare the rest
2778 //36 if needlecntval==0, else 37:
2779   bind(L_Comp2);
2780    addi(addr, addr, 2); // First comparison has failed, 2nd one hit.
2781   bind(L_Comp1);            // Addr points to possible needle start.
2782   bind(L_Comp3);            // Could have created a copy and use a different return address but saving code size here.
2783   if (needlecntval != 2) {  // Const needlecnt==2?
2784    if (needlecntval != 3) {
2785     if (needlecntval == 0) beq(CCR6, L_Found); // Variable needlecnt==2?
2786     Register ind_reg = tmp4;
2787     li(ind_reg, 2*2);   // First 2 characters are already compared, use index 2.
2788     mtctr(needlecnt);   // Decremented by 2, still > 0.
2789 //40:
2790    Label L_CompLoop;
2791    bind(L_CompLoop);
2792     lhzx(ch2, needle, ind_reg);
2793     lhzx(ch1, addr, ind_reg);
2794     cmpw(CCR1, ch1, ch2);
2795     bne(CCR1, L_OuterLoop);
2796     addi(ind_reg, ind_reg, 2);
2797     bdnz(L_CompLoop);
2798    } else { // No loop required if there's only one needle character left.
2799     lhz(ch2, 2*2, needle);
2800     lhz(ch1, 2*2, addr);
2801     cmpw(CCR1, ch1, ch2);
2802     bne(CCR1, L_OuterLoop);
2803    }
2804   }
2805   // Return index ...
2806 //46:
2807   bind(L_Found);
2808    subf(addr, haystack, addr); // relative to haystack, ...
2809    srdi(result, addr, 1);      // in characters.
2810 //48:
2811   bind(L_End);
2812 }
2813 
2814 // Implementation of Compare for jchar arrays.
2815 //
2816 // Kills the registers str1, str2, cnt1, cnt2.
2817 // Kills cr0, ctr.
2818 // Assumes that result differes from the input registers.
2819 void MacroAssembler::string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg,
2820                                     Register result_reg, Register tmp_reg) {
2821    assert_different_registers(result_reg, str1_reg, str2_reg, cnt1_reg, cnt2_reg, tmp_reg);
2822 
2823    Label Ldone, Lslow_case, Lslow_loop, Lfast_loop;
2824    Register cnt_diff = R0,
2825             limit_reg = cnt1_reg,
2826             chr1_reg = result_reg,
2827             chr2_reg = cnt2_reg,
2828             addr_diff = str2_reg;
2829 
2830    // Offset 0 should be 32 byte aligned.
2831 //-4:
2832     dcbtct(str1_reg, 0x00);  // Indicate R/O access to str1.
2833     dcbtct(str2_reg, 0x00);  // Indicate R/O access to str2.
2834 //-2:
2835    // Compute min(cnt1, cnt2) and check if 0 (bail out if we don't need to compare characters).
2836     subf(result_reg, cnt2_reg, cnt1_reg);  // difference between cnt1/2
2837     subf_(addr_diff, str1_reg, str2_reg);  // alias?
2838     beq(CCR0, Ldone);                   // return cnt difference if both ones are identical
2839     srawi(limit_reg, result_reg, 31);      // generate signmask (cnt1/2 must be non-negative so cnt_diff can't overflow)
2840     mr(cnt_diff, result_reg);
2841     andr(limit_reg, result_reg, limit_reg); // difference or zero (negative): cnt1<cnt2 ? cnt1-cnt2 : 0
2842     add_(limit_reg, cnt2_reg, limit_reg);  // min(cnt1, cnt2)==0?
2843     beq(CCR0, Ldone);                   // return cnt difference if one has 0 length
2844 
2845     lhz(chr1_reg, 0, str1_reg);            // optional: early out if first characters mismatch
2846     lhzx(chr2_reg, str1_reg, addr_diff);   // optional: early out if first characters mismatch
2847     addi(tmp_reg, limit_reg, -1);          // min(cnt1, cnt2)-1
2848     subf_(result_reg, chr2_reg, chr1_reg); // optional: early out if first characters mismatch
2849     bne(CCR0, Ldone);                   // optional: early out if first characters mismatch
2850 
2851    // Set loop counter by scaling down tmp_reg
2852     srawi_(chr2_reg, tmp_reg, exact_log2(4)); // (min(cnt1, cnt2)-1)/4
2853     ble(CCR0, Lslow_case);                 // need >4 characters for fast loop
2854     andi(limit_reg, tmp_reg, 4-1);            // remaining characters
2855 
2856    // Adapt str1_reg str2_reg for the first loop iteration
2857     mtctr(chr2_reg);                 // (min(cnt1, cnt2)-1)/4
2858     addi(limit_reg, limit_reg, 4+1); // compare last 5-8 characters in slow_case if mismatch found in fast_loop
2859 //16:
2860    // Compare the rest of the characters
2861    bind(Lfast_loop);
2862     ld(chr1_reg, 0, str1_reg);
2863     ldx(chr2_reg, str1_reg, addr_diff);
2864     cmpd(CCR0, chr2_reg, chr1_reg);
2865     bne(CCR0, Lslow_case); // return chr1_reg
2866     addi(str1_reg, str1_reg, 4*2);
2867     bdnz(Lfast_loop);
2868     addi(limit_reg, limit_reg, -4); // no mismatch found in fast_loop, only 1-4 characters missing
2869 //23:
2870    bind(Lslow_case);
2871     mtctr(limit_reg);
2872 //24:
2873    bind(Lslow_loop);
2874     lhz(chr1_reg, 0, str1_reg);
2875     lhzx(chr2_reg, str1_reg, addr_diff);
2876     subf_(result_reg, chr2_reg, chr1_reg);
2877     bne(CCR0, Ldone); // return chr1_reg
2878     addi(str1_reg, str1_reg, 1*2);
2879     bdnz(Lslow_loop);
2880 //30:
2881    // If strings are equal up to min length, return the length difference.
2882     mr(result_reg, cnt_diff);
2883     nop(); // alignment
2884 //32:
2885    // Otherwise, return the difference between the first mismatched chars.
2886    bind(Ldone);
2887 }
2888 
2889 
2890 // Compare char[] arrays.
2891 //
2892 // str1_reg   USE only
2893 // str2_reg   USE only
2894 // cnt_reg    USE_DEF, due to tmp reg shortage
2895 // result_reg DEF only, might compromise USE only registers
2896 void MacroAssembler::char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg,
2897                                         Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg,
2898                                         Register tmp5_reg) {
2899 
2900   // Str1 may be the same register as str2 which can occur e.g. after scalar replacement.
2901   assert_different_registers(result_reg, str1_reg, cnt_reg, tmp1_reg, tmp2_reg, tmp3_reg, tmp4_reg, tmp5_reg);
2902   assert_different_registers(result_reg, str2_reg, cnt_reg, tmp1_reg, tmp2_reg, tmp3_reg, tmp4_reg, tmp5_reg);
2903 
2904   // Offset 0 should be 32 byte aligned.
2905   Label Linit_cbc, Lcbc, Lloop, Ldone_true, Ldone_false;
2906   Register index_reg = tmp5_reg;
2907   Register cbc_iter  = tmp4_reg;
2908 
2909 //-1:
2910   dcbtct(str1_reg, 0x00);  // Indicate R/O access to str1.
2911   dcbtct(str2_reg, 0x00);  // Indicate R/O access to str2.
2912 //1:
2913   andi(cbc_iter, cnt_reg, 4-1);            // Remaining iterations after 4 java characters per iteration loop.
2914   li(index_reg, 0); // init
2915   li(result_reg, 0); // assume false
2916   srwi_(tmp2_reg, cnt_reg, exact_log2(4)); // Div: 4 java characters per iteration (main loop).
2917 
2918   cmpwi(CCR1, cbc_iter, 0);             // CCR1 = (cbc_iter==0)
2919   beq(CCR0, Linit_cbc);                 // too short
2920     mtctr(tmp2_reg);
2921 //8:
2922     bind(Lloop);
2923       ldx(tmp1_reg, str1_reg, index_reg);
2924       ldx(tmp2_reg, str2_reg, index_reg);
2925       cmpd(CCR0, tmp1_reg, tmp2_reg);
2926       bne(CCR0, Ldone_false);  // Unequal char pair found -> done.
2927       addi(index_reg, index_reg, 4*sizeof(jchar));
2928       bdnz(Lloop);
2929 //14:
2930   bind(Linit_cbc);
2931   beq(CCR1, Ldone_true);
2932     mtctr(cbc_iter);
2933 //16:
2934     bind(Lcbc);
2935       lhzx(tmp1_reg, str1_reg, index_reg);
2936       lhzx(tmp2_reg, str2_reg, index_reg);
2937       cmpw(CCR0, tmp1_reg, tmp2_reg);
2938       bne(CCR0, Ldone_false);  // Unequal char pair found -> done.
2939       addi(index_reg, index_reg, 1*sizeof(jchar));
2940       bdnz(Lcbc);
2941     nop();
2942   bind(Ldone_true);
2943   li(result_reg, 1);
2944 //24:
2945   bind(Ldone_false);
2946 }
2947 
2948 
2949 void MacroAssembler::char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg,
2950                                            Register tmp1_reg, Register tmp2_reg) {
2951   // Str1 may be the same register as str2 which can occur e.g. after scalar replacement.
2952   assert_different_registers(result_reg, str1_reg, tmp1_reg, tmp2_reg);
2953   assert_different_registers(result_reg, str2_reg, tmp1_reg, tmp2_reg);
2954   assert(sizeof(jchar) == 2, "must be");
2955   assert(cntval >= 0 && ((cntval & 0x7fff) == cntval), "wrong immediate");
2956 
2957   Label Ldone_false;
2958 
2959   if (cntval < 16) { // short case
2960     if (cntval != 0) li(result_reg, 0); // assume false
2961 
2962     const int num_bytes = cntval*sizeof(jchar);
2963     int index = 0;
2964     for (int next_index; (next_index = index + 8) <= num_bytes; index = next_index) {
2965       ld(tmp1_reg, index, str1_reg);
2966       ld(tmp2_reg, index, str2_reg);
2967       cmpd(CCR0, tmp1_reg, tmp2_reg);
2968       bne(CCR0, Ldone_false);
2969     }
2970     if (cntval & 2) {
2971       lwz(tmp1_reg, index, str1_reg);
2972       lwz(tmp2_reg, index, str2_reg);
2973       cmpw(CCR0, tmp1_reg, tmp2_reg);
2974       bne(CCR0, Ldone_false);
2975       index += 4;
2976     }
2977     if (cntval & 1) {
2978       lhz(tmp1_reg, index, str1_reg);
2979       lhz(tmp2_reg, index, str2_reg);
2980       cmpw(CCR0, tmp1_reg, tmp2_reg);
2981       bne(CCR0, Ldone_false);
2982     }
2983     // fallthrough: true
2984   } else {
2985     Label Lloop;
2986     Register index_reg = tmp1_reg;
2987     const int loopcnt = cntval/4;
2988     assert(loopcnt > 0, "must be");
2989     // Offset 0 should be 32 byte aligned.
2990     //2:
2991     dcbtct(str1_reg, 0x00);  // Indicate R/O access to str1.
2992     dcbtct(str2_reg, 0x00);  // Indicate R/O access to str2.
2993     li(tmp2_reg, loopcnt);
2994     li(index_reg, 0); // init
2995     li(result_reg, 0); // assume false
2996     mtctr(tmp2_reg);
2997     //8:
2998     bind(Lloop);
2999     ldx(R0, str1_reg, index_reg);
3000     ldx(tmp2_reg, str2_reg, index_reg);
3001     cmpd(CCR0, R0, tmp2_reg);
3002     bne(CCR0, Ldone_false);  // Unequal char pair found -> done.
3003     addi(index_reg, index_reg, 4*sizeof(jchar));
3004     bdnz(Lloop);
3005     //14:
3006     if (cntval & 2) {
3007       lwzx(R0, str1_reg, index_reg);
3008       lwzx(tmp2_reg, str2_reg, index_reg);
3009       cmpw(CCR0, R0, tmp2_reg);
3010       bne(CCR0, Ldone_false);
3011       if (cntval & 1) addi(index_reg, index_reg, 2*sizeof(jchar));
3012     }
3013     if (cntval & 1) {
3014       lhzx(R0, str1_reg, index_reg);
3015       lhzx(tmp2_reg, str2_reg, index_reg);
3016       cmpw(CCR0, R0, tmp2_reg);
3017       bne(CCR0, Ldone_false);
3018     }
3019     // fallthru: true
3020   }
3021   li(result_reg, 1);
3022   bind(Ldone_false);
3023 }
3024 
3025 
3026 void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) {
3027 #ifdef ASSERT
3028   Label ok;
3029   if (check_equal) {
3030     beq(CCR0, ok);
3031   } else {
3032     bne(CCR0, ok);
3033   }
3034   stop(msg, id);
3035   bind(ok);
3036 #endif
3037 }
3038 
3039 void MacroAssembler::asm_assert_mems_zero(bool check_equal, int size, int mem_offset,
3040                                           Register mem_base, const char* msg, int id) {
3041 #ifdef ASSERT
3042   switch (size) {
3043     case 4:
3044       lwz(R0, mem_offset, mem_base);
3045       cmpwi(CCR0, R0, 0);
3046       break;
3047     case 8:
3048       ld(R0, mem_offset, mem_base);
3049       cmpdi(CCR0, R0, 0);
3050       break;
3051     default:
3052       ShouldNotReachHere();
3053   }
3054   asm_assert(check_equal, msg, id);
3055 #endif // ASSERT
3056 }
3057 
3058 void MacroAssembler::verify_thread() {
3059   if (VerifyThread) {
3060     unimplemented("'VerifyThread' currently not implemented on PPC");
3061   }
3062 }
3063 
3064 // READ: oop. KILL: R0. Volatile floats perhaps.
3065 void MacroAssembler::verify_oop(Register oop, const char* msg) {
3066   if (!VerifyOops) {
3067     return;
3068   }
3069 
3070   address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address();
3071   const Register tmp = R11; // Will be preserved.
3072   const int nbytes_save = 11*8; // Volatile gprs except R0.
3073   save_volatile_gprs(R1_SP, -nbytes_save); // except R0
3074 
3075   if (oop == tmp) mr(R4_ARG2, oop);
3076   save_LR_CR(tmp); // save in old frame
3077   push_frame_reg_args(nbytes_save, tmp);
3078   // load FunctionDescriptor** / entry_address *
3079   load_const_optimized(tmp, fd, R0);
3080   // load FunctionDescriptor* / entry_address
3081   ld(tmp, 0, tmp);
3082   if (oop != tmp) mr_if_needed(R4_ARG2, oop);
3083   load_const_optimized(R3_ARG1, (address)msg, R0);
3084   // Call destination for its side effect.
3085   call_c(tmp);
3086 
3087   pop_frame();
3088   restore_LR_CR(tmp);
3089   restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
3090 }
3091 
3092 const char* stop_types[] = {
3093   "stop",
3094   "untested",
3095   "unimplemented",
3096   "shouldnotreachhere"
3097 };
3098 
3099 static void stop_on_request(int tp, const char* msg) {
3100   tty->print("PPC assembly code requires stop: (%s) %s\n", stop_types[tp%/*stop_end*/4], msg);
3101   guarantee(false, err_msg("PPC assembly code requires stop: %s", msg));
3102 }
3103 
3104 // Call a C-function that prints output.
3105 void MacroAssembler::stop(int type, const char* msg, int id) {
3106 #ifndef PRODUCT
3107   block_comment(err_msg("stop: %s %s {", stop_types[type%stop_end], msg));
3108 #else
3109   block_comment("stop {");
3110 #endif
3111 
3112   // setup arguments
3113   load_const_optimized(R3_ARG1, type);
3114   load_const_optimized(R4_ARG2, (void *)msg, /*tmp=*/R0);
3115   call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), R3_ARG1, R4_ARG2);
3116   illtrap();
3117   emit_int32(id);
3118   block_comment("} stop;");
3119 }
3120 
3121 #ifndef PRODUCT
3122 // Write pattern 0x0101010101010101 in memory region [low-before, high+after].
3123 // Val, addr are temp registers.
3124 // If low == addr, addr is killed.
3125 // High is preserved.
3126 void MacroAssembler::zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) {
3127   if (!ZapMemory) return;
3128 
3129   assert_different_registers(low, val);
3130 
3131   BLOCK_COMMENT("zap memory region {");
3132   load_const_optimized(val, 0x0101010101010101);
3133   int size = before + after;
3134   if (low == high && size < 5 && size > 0) {
3135     int offset = -before*BytesPerWord;
3136     for (int i = 0; i < size; ++i) {
3137       std(val, offset, low);
3138       offset += (1*BytesPerWord);
3139     }
3140   } else {
3141     addi(addr, low, -before*BytesPerWord);
3142     assert_different_registers(high, val);
3143     if (after) addi(high, high, after * BytesPerWord);
3144     Label loop;
3145     bind(loop);
3146     std(val, 0, addr);
3147     addi(addr, addr, 8);
3148     cmpd(CCR6, addr, high);
3149     ble(CCR6, loop);
3150     if (after) addi(high, high, -after * BytesPerWord);  // Correct back to old value.
3151   }
3152   BLOCK_COMMENT("} zap memory region");
3153 }
3154 
3155 #endif // !PRODUCT
3156 
3157 SkipIfEqualZero::SkipIfEqualZero(MacroAssembler* masm, Register temp, const bool* flag_addr) : _masm(masm), _label() {
3158   int simm16_offset = masm->load_const_optimized(temp, (address)flag_addr, R0, true);
3159   assert(sizeof(bool) == 1, "PowerPC ABI");
3160   masm->lbz(temp, simm16_offset, temp);
3161   masm->cmpwi(CCR0, temp, 0);
3162   masm->beq(CCR0, _label);
3163 }
3164 
3165 SkipIfEqualZero::~SkipIfEqualZero() {
3166   _masm->bind(_label);
3167 }