1 /*
   2  * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2012, 2013 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP
  27 #define CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP
  28 
  29 #include "asm/assembler.inline.hpp"
  30 #include "asm/macroAssembler.hpp"
  31 #include "asm/codeBuffer.hpp"
  32 #include "code/codeCache.hpp"
  33 
  34 inline bool MacroAssembler::is_ld_largeoffset(address a) {
  35   const int inst1 = *(int *)a;
  36   const int inst2 = *(int *)(a+4);
  37   return (is_ld(inst1)) ||
  38          (is_addis(inst1) && is_ld(inst2) && inv_ra_field(inst2) == inv_rt_field(inst1));
  39 }
  40 
  41 inline int MacroAssembler::get_ld_largeoffset_offset(address a) {
  42   assert(MacroAssembler::is_ld_largeoffset(a), "must be ld with large offset");
  43 
  44   const int inst1 = *(int *)a;
  45   if (is_ld(inst1)) {
  46     return inv_d1_field(inst1);
  47   } else {
  48     const int inst2 = *(int *)(a+4);
  49     return (inv_d1_field(inst1) << 16) + inv_d1_field(inst2);
  50   }
  51 }
  52 
  53 inline void MacroAssembler::round_to(Register r, int modulus) {
  54   assert(is_power_of_2_long((jlong)modulus), "must be power of 2");
  55   addi(r, r, modulus-1);
  56   clrrdi(r, r, log2_long((jlong)modulus));
  57 }
  58 
  59 // Move register if destination register and target register are different.
  60 inline void MacroAssembler::mr_if_needed(Register rd, Register rs) {
  61   if(rs !=rd) mr(rd, rs);
  62 }
  63 
  64 // Address of the global TOC.
  65 inline address MacroAssembler::global_toc() {
  66   return CodeCache::low_bound();
  67 }
  68 
  69 // Offset of given address to the global TOC.
  70 inline int MacroAssembler::offset_to_global_toc(const address addr) {
  71   intptr_t offset = (intptr_t)addr - (intptr_t)MacroAssembler::global_toc();
  72   assert(Assembler::is_simm((long)offset, 31) && offset >= 0, "must be in range");
  73   return (int)offset;
  74 }
  75 
  76 // Address of current method's TOC.
  77 inline address MacroAssembler::method_toc() {
  78   return code()->consts()->start();
  79 }
  80 
  81 // Offset of given address to current method's TOC.
  82 inline int MacroAssembler::offset_to_method_toc(address addr) {
  83   intptr_t offset = (intptr_t)addr - (intptr_t)method_toc();
  84   assert(is_simm((long)offset, 31) && offset >= 0, "must be in range");
  85   return (int)offset;
  86 }
  87 
  88 inline bool MacroAssembler::is_calculate_address_from_global_toc_at(address a, address bound) {
  89   const address inst2_addr = a;
  90   const int inst2 = *(int *) a;
  91 
  92   // The relocation points to the second instruction, the addi.
  93   if (!is_addi(inst2)) return false;
  94 
  95   // The addi reads and writes the same register dst.
  96   const int dst = inv_rt_field(inst2);
  97   if (inv_ra_field(inst2) != dst) return false;
  98 
  99   // Now, find the preceding addis which writes to dst.
 100   int inst1 = 0;
 101   address inst1_addr = inst2_addr - BytesPerInstWord;
 102   while (inst1_addr >= bound) {
 103     inst1 = *(int *) inst1_addr;
 104     if (is_addis(inst1) && inv_rt_field(inst1) == dst) {
 105       // stop, found the addis which writes dst
 106       break;
 107     }
 108     inst1_addr -= BytesPerInstWord;
 109   }
 110 
 111   if (!(inst1 == 0 || inv_ra_field(inst1) == 29 /* R29 */)) return false;
 112   return is_addis(inst1);
 113 }
 114 
 115 #ifdef _LP64
 116 // Detect narrow oop constants.
 117 inline bool MacroAssembler::is_set_narrow_oop(address a, address bound) {
 118   const address inst2_addr = a;
 119   const int inst2 = *(int *)a;
 120 
 121   // The relocation points to the second instruction, the addi.
 122   if (!is_addi(inst2)) return false;
 123 
 124   // The addi reads and writes the same register dst.
 125   const int dst = inv_rt_field(inst2);
 126   if (inv_ra_field(inst2) != dst) return false;
 127 
 128   // Now, find the preceding addis which writes to dst.
 129   int inst1 = 0;
 130   address inst1_addr = inst2_addr - BytesPerInstWord;
 131   while (inst1_addr >= bound) {
 132     inst1 = *(int *) inst1_addr;
 133     if (is_lis(inst1) && inv_rs_field(inst1) == dst) return true;
 134     inst1_addr -= BytesPerInstWord;
 135   }
 136   return false;
 137 }
 138 #endif
 139 
 140 
 141 inline bool MacroAssembler::is_load_const_at(address a) {
 142   const int* p_inst = (int *) a;
 143   bool b = is_lis(*p_inst++);
 144   if (is_ori(*p_inst)) {
 145     p_inst++;
 146     b = b && is_rldicr(*p_inst++); // TODO: could be made more precise: `sldi'!
 147     b = b && is_oris(*p_inst++);
 148     b = b && is_ori(*p_inst);
 149   } else if (is_lis(*p_inst)) {
 150     p_inst++;
 151     b = b && is_ori(*p_inst++);
 152     b = b && is_ori(*p_inst);
 153     // TODO: could enhance reliability by adding is_insrdi
 154   } else return false;
 155   return b;
 156 }
 157 
 158 inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
 159   set_oop(constant_oop_address(obj), d);
 160 }
 161 
 162 inline void MacroAssembler::set_oop(AddressLiteral obj_addr, Register d) {
 163   assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
 164   load_const(d, obj_addr);
 165 }
 166 
 167 inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
 168   jint& stub_inst = *(jint*) branch;
 169   stub_inst = patched_branch(target - branch, stub_inst, 0);
 170 }
 171 
 172 // Relocation of conditional far branches.
 173 inline bool MacroAssembler::is_bc_far_variant1_at(address instruction_addr) {
 174   // Variant 1, the 1st instruction contains the destination address:
 175   //
 176   //    bcxx  DEST
 177   //    endgroup
 178   //
 179   const int instruction_1 = *(int*)(instruction_addr);
 180   const int instruction_2 = *(int*)(instruction_addr + 4);
 181   return is_bcxx(instruction_1) &&
 182          (inv_bd_field(instruction_1, (intptr_t)instruction_addr) != (intptr_t)(instruction_addr + 2*4)) &&
 183          is_endgroup(instruction_2);
 184 }
 185 
 186 // Relocation of conditional far branches.
 187 inline bool MacroAssembler::is_bc_far_variant2_at(address instruction_addr) {
 188   // Variant 2, the 2nd instruction contains the destination address:
 189   //
 190   //    b!cxx SKIP
 191   //    bxx   DEST
 192   //  SKIP:
 193   //
 194   const int instruction_1 = *(int*)(instruction_addr);
 195   const int instruction_2 = *(int*)(instruction_addr + 4);
 196   return is_bcxx(instruction_1) &&
 197          (inv_bd_field(instruction_1, (intptr_t)instruction_addr) == (intptr_t)(instruction_addr + 2*4)) &&
 198          is_bxx(instruction_2);
 199 }
 200 
 201 // Relocation for conditional branches
 202 inline bool MacroAssembler::is_bc_far_variant3_at(address instruction_addr) {
 203   // Variant 3, far cond branch to the next instruction, already patched to nops:
 204   //
 205   //    nop
 206   //    endgroup
 207   //  SKIP/DEST:
 208   //
 209   const int instruction_1 = *(int*)(instruction_addr);
 210   const int instruction_2 = *(int*)(instruction_addr + 4);
 211   return is_nop(instruction_1) &&
 212          is_endgroup(instruction_2);
 213 }
 214 
 215 
 216 // Convenience bc_far versions
 217 inline void MacroAssembler::blt_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, less), L, optimize); }
 218 inline void MacroAssembler::bgt_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, greater), L, optimize); }
 219 inline void MacroAssembler::beq_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, equal), L, optimize); }
 220 inline void MacroAssembler::bso_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, summary_overflow), L, optimize); }
 221 inline void MacroAssembler::bge_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, less), L, optimize); }
 222 inline void MacroAssembler::ble_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, greater), L, optimize); }
 223 inline void MacroAssembler::bne_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, equal), L, optimize); }
 224 inline void MacroAssembler::bns_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, summary_overflow), L, optimize); }
 225 
 226 inline address MacroAssembler::call_stub(Register function_entry) {
 227   mtctr(function_entry);
 228   bctrl();
 229   return pc();
 230 }
 231 
 232 inline void MacroAssembler::call_stub_and_return_to(Register function_entry, Register return_pc) {
 233   assert_different_registers(function_entry, return_pc);
 234   mtlr(return_pc);
 235   mtctr(function_entry);
 236   bctr();
 237 }
 238 
 239 // Get the pc where the last emitted call will return to.
 240 inline address MacroAssembler::last_calls_return_pc() {
 241   return _last_calls_return_pc;
 242 }
 243 
 244 // Read from the polling page, its address is already in a register.
 245 inline void MacroAssembler::load_from_polling_page(Register polling_page_address, int offset) {
 246   ld(R0, offset, polling_page_address);
 247 }
 248 
 249 // Trap-instruction-based checks.
 250 
 251 inline void MacroAssembler::trap_null_check(Register a, trap_to_bits cmp) {
 252   assert(TrapBasedNullChecks, "sanity");
 253   tdi(cmp, a/*reg a*/, 0);
 254 }
 255 inline void MacroAssembler::trap_zombie_not_entrant() {
 256   tdi(traptoUnconditional, 0/*reg 0*/, 1);
 257 }
 258 inline void MacroAssembler::trap_should_not_reach_here() {
 259   tdi_unchecked(traptoUnconditional, 0/*reg 0*/, 2);
 260 }
 261 
 262 inline void MacroAssembler::trap_ic_miss_check(Register a, Register b) {
 263   td(traptoGreaterThanUnsigned | traptoLessThanUnsigned, a, b);
 264 }
 265 
 266 // Do an explicit null check if access to a+offset will not raise a SIGSEGV.
 267 // Either issue a trap instruction that raises SIGTRAP, or do a compare that
 268 // branches to exception_entry.
 269 // No support for compressed oops (base page of heap).  Does not distinguish
 270 // loads and stores.
 271 inline void MacroAssembler::null_check_throw(Register a, int offset, Register temp_reg, address exception_entry) {
 272   if (!ImplicitNullChecks || needs_explicit_null_check(offset) NOT_LINUX(|| true) /*!os::zero_page_read_protected()*/) {
 273     if (TrapBasedNullChecks) {
 274       assert(UseSIGTRAP, "sanity");
 275       trap_null_check(a);
 276     } else {
 277       Label ok;
 278       cmpdi(CCR0, a, 0);
 279       bne(CCR0, ok);
 280       load_const_optimized(temp_reg, exception_entry);
 281       mtctr(temp_reg);
 282       bctr();
 283       bind(ok);
 284     }
 285   }
 286 }
 287 
 288 inline void MacroAssembler::ld_with_trap_null_check(Register d, int si16, Register s1) {
 289   if ( NOT_LINUX(true) LINUX_ONLY(false)/*!os::zero_page_read_protected()*/) {
 290     if (TrapBasedNullChecks) {
 291       trap_null_check(s1);
 292     }
 293   }
 294   ld(d, si16, s1);
 295 }
 296 
 297 // Attention: No null check for loaded uncompressed OOP. Can be used for loading klass field.
 298 inline void MacroAssembler::load_heap_oop_with_trap_null_check(Register d, RegisterOrConstant si16,
 299                                                                    Register s1) {
 300   if ( NOT_LINUX(true)LINUX_ONLY(false) /*!os::zero_page_read_protected()*/) {
 301     if (TrapBasedNullChecks) {
 302       trap_null_check(s1);
 303     }
 304   }
 305   load_heap_oop_not_null(d, si16, s1);
 306 }
 307 
 308 inline void MacroAssembler::load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1) {
 309   if (UseCompressedOops) {
 310     lwz(d, offs, s1);
 311     // Attention: no null check here!
 312     decode_heap_oop_not_null(d);
 313   } else {
 314     ld(d, offs, s1);
 315   }
 316 }
 317 
 318 inline void MacroAssembler::load_heap_oop(Register d, RegisterOrConstant offs, Register s1) {
 319   if (UseCompressedOops) {
 320     lwz(d, offs, s1);
 321     decode_heap_oop(d);
 322   } else {
 323     ld(d, offs, s1);
 324   }
 325 }
 326 
 327 inline void MacroAssembler::encode_heap_oop_not_null(Register d) {
 328   if (Universe::narrow_oop_base() != NULL) {
 329     sub(d, d, R30);
 330   }
 331   if (Universe::narrow_oop_shift() != 0) {
 332     srdi(d, d, LogMinObjAlignmentInBytes);
 333   }
 334 }
 335 
 336 inline void MacroAssembler::decode_heap_oop_not_null(Register d) {
 337   if (Universe::narrow_oop_shift() != 0) {
 338     assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
 339     sldi(d, d, LogMinObjAlignmentInBytes);
 340   }
 341   if (Universe::narrow_oop_base() != NULL) {
 342     add(d, d, R30);
 343   }
 344 }
 345 
 346 inline void MacroAssembler::decode_heap_oop(Register d) {
 347   Label isNull;
 348   if (Universe::narrow_oop_base() != NULL) {
 349     cmpwi(CCR0, d, 0);
 350     beq(CCR0, isNull);
 351   }
 352   if (Universe::narrow_oop_shift() != 0) {
 353     assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
 354     sldi(d, d, LogMinObjAlignmentInBytes);
 355   }
 356   if (Universe::narrow_oop_base() != NULL) {
 357     add(d, d, R30);
 358   }
 359   bind(isNull);
 360 }
 361 
 362 // SIGTRAP-based range checks for arrays.
 363 inline void MacroAssembler::trap_range_check_l(Register a, Register b) {
 364   tw (traptoLessThanUnsigned,                  a/*reg a*/, b/*reg b*/);
 365 }
 366 inline void MacroAssembler::trap_range_check_l(Register a, int si16) {
 367   twi(traptoLessThanUnsigned,                  a/*reg a*/, si16);
 368 }
 369 inline void MacroAssembler::trap_range_check_le(Register a, int si16) {
 370   twi(traptoEqual | traptoLessThanUnsigned,    a/*reg a*/, si16);
 371 }
 372 inline void MacroAssembler::trap_range_check_g(Register a, int si16) {
 373   twi(traptoGreaterThanUnsigned,               a/*reg a*/, si16);
 374 }
 375 inline void MacroAssembler::trap_range_check_ge(Register a, Register b) {
 376   tw (traptoEqual | traptoGreaterThanUnsigned, a/*reg a*/, b/*reg b*/);
 377 }
 378 inline void MacroAssembler::trap_range_check_ge(Register a, int si16) {
 379   twi(traptoEqual | traptoGreaterThanUnsigned, a/*reg a*/, si16);
 380 }
 381 
 382 #endif // CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP