1 /*
   2  * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef CPU_PPC_VM_NATIVEINST_PPC_HPP
  27 #define CPU_PPC_VM_NATIVEINST_PPC_HPP
  28 
  29 #include "asm/assembler.hpp"
  30 #include "asm/macroAssembler.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "runtime/icache.hpp"
  33 #include "runtime/os.hpp"
  34 #include "runtime/safepointMechanism.hpp"
  35 
  36 // We have interfaces for the following instructions:
  37 //
  38 // - NativeInstruction
  39 //   - NativeCall
  40 //   - NativeFarCall
  41 //   - NativeMovConstReg
  42 //   - NativeJump
  43 //   - NativeIllegalInstruction
  44 //   - NativeConditionalFarBranch
  45 //   - NativeCallTrampolineStub
  46 
  47 // The base class for different kinds of native instruction abstractions.
  48 // It provides the primitive operations to manipulate code relative to this.
  49 class NativeInstruction VALUE_OBJ_CLASS_SPEC {
  50   friend class Relocation;
  51 
  52  public:
  53   bool is_jump() { return Assembler::is_b(long_at(0)); } // See NativeGeneralJump.
  54 
  55   bool is_sigtrap_ic_miss_check() {
  56     assert(UseSIGTRAP, "precondition");
  57     return MacroAssembler::is_trap_ic_miss_check(long_at(0));
  58   }
  59 
  60   bool is_sigtrap_null_check() {
  61     assert(UseSIGTRAP && TrapBasedNullChecks, "precondition");
  62     return MacroAssembler::is_trap_null_check(long_at(0));
  63   }
  64 
  65   // We use a special trap for marking a method as not_entrant or zombie
  66   // iff UseSIGTRAP.
  67   bool is_sigtrap_zombie_not_entrant() {
  68     assert(UseSIGTRAP, "precondition");
  69     return MacroAssembler::is_trap_zombie_not_entrant(long_at(0));
  70   }
  71 
  72   // We use an illtrap for marking a method as not_entrant or zombie
  73   // iff !UseSIGTRAP.
  74   bool is_sigill_zombie_not_entrant() {
  75     assert(!UseSIGTRAP, "precondition");
  76     // Work around a C++ compiler bug which changes 'this'.
  77     return NativeInstruction::is_sigill_zombie_not_entrant_at(addr_at(0));
  78   }
  79   static bool is_sigill_zombie_not_entrant_at(address addr);
  80 
  81 #ifdef COMPILER2
  82   // SIGTRAP-based implicit range checks
  83   bool is_sigtrap_range_check() {
  84     assert(UseSIGTRAP && TrapBasedRangeChecks, "precondition");
  85     return MacroAssembler::is_trap_range_check(long_at(0));
  86   }
  87 #endif
  88 
  89   // 'should not reach here'.
  90   bool is_sigtrap_should_not_reach_here() {
  91     return MacroAssembler::is_trap_should_not_reach_here(long_at(0));
  92   }
  93 
  94   bool is_safepoint_poll() {
  95     // Is the current instruction a POTENTIAL read access to the polling page?
  96     // The current arguments of the instruction are not checked!
  97     if (SafepointMechanism::uses_thread_local_poll() && USE_POLL_BIT_ONLY) {
  98       int encoding = SafepointMechanism::poll_bit();
  99       return MacroAssembler::is_tdi(long_at(0), Assembler::traptoGreaterThanUnsigned | Assembler::traptoEqual,
 100                                     -1, encoding);
 101     }
 102     return MacroAssembler::is_load_from_polling_page(long_at(0), NULL);
 103   }
 104 
 105   bool is_memory_serialization(JavaThread *thread, void *ucontext) {
 106     // Is the current instruction a write access of thread to the
 107     // memory serialization page?
 108     return MacroAssembler::is_memory_serialization(long_at(0), thread, ucontext);
 109   }
 110 
 111   address get_stack_bang_address(void *ucontext) {
 112     // If long_at(0) is not a stack bang, return 0. Otherwise, return
 113     // banged address.
 114     return MacroAssembler::get_stack_bang_address(long_at(0), ucontext);
 115   }
 116 
 117  protected:
 118   address  addr_at(int offset) const    { return address(this) + offset; }
 119   int      long_at(int offset) const    { return *(int*)addr_at(offset); }
 120 
 121  public:
 122   void verify() NOT_DEBUG_RETURN;
 123 };
 124 
 125 inline NativeInstruction* nativeInstruction_at(address address) {
 126   NativeInstruction* inst = (NativeInstruction*)address;
 127   inst->verify();
 128   return inst;
 129 }
 130 
 131 // The NativeCall is an abstraction for accessing/manipulating call
 132 // instructions. It is used to manipulate inline caches, primitive &
 133 // dll calls, etc.
 134 //
 135 // Sparc distinguishes `NativeCall' and `NativeFarCall'. On PPC64,
 136 // at present, we provide a single class `NativeCall' representing the
 137 // sequence `load_const, mtctr, bctrl' or the sequence 'ld_from_toc,
 138 // mtctr, bctrl'.
 139 class NativeCall: public NativeInstruction {
 140  public:
 141 
 142   enum ppc_specific_constants {
 143     load_const_instruction_size                 = 28,
 144     load_const_from_method_toc_instruction_size = 16,
 145     instruction_size                            = 16 // Used in shared code for calls with reloc_info.
 146   };
 147 
 148   static bool is_call_at(address a) {
 149     return Assembler::is_bl(*(int*)(a));
 150   }
 151 
 152   static bool is_call_before(address return_address) {
 153     return NativeCall::is_call_at(return_address - 4);
 154   }
 155 
 156   address instruction_address() const {
 157     return addr_at(0);
 158   }
 159 
 160   address next_instruction_address() const {
 161     // We have only bl.
 162     assert(MacroAssembler::is_bl(*(int*)instruction_address()), "Should be bl instruction!");
 163     return addr_at(4);
 164   }
 165 
 166   address return_address() const {
 167     return next_instruction_address();
 168   }
 169 
 170   address destination() const;
 171 
 172   // The parameter assert_lock disables the assertion during code generation.
 173   void set_destination_mt_safe(address dest, bool assert_lock = true);
 174 
 175   address get_trampoline();
 176 
 177   void verify_alignment() {} // do nothing on ppc
 178   void verify() NOT_DEBUG_RETURN;
 179 };
 180 
 181 inline NativeCall* nativeCall_at(address instr) {
 182   NativeCall* call = (NativeCall*)instr;
 183   call->verify();
 184   return call;
 185 }
 186 
 187 inline NativeCall* nativeCall_before(address return_address) {
 188   NativeCall* call = NULL;
 189   if (MacroAssembler::is_bl(*(int*)(return_address - 4)))
 190     call = (NativeCall*)(return_address - 4);
 191   call->verify();
 192   return call;
 193 }
 194 
 195 // The NativeFarCall is an abstraction for accessing/manipulating native
 196 // call-anywhere instructions.
 197 // Used to call native methods which may be loaded anywhere in the address
 198 // space, possibly out of reach of a call instruction.
 199 class NativeFarCall: public NativeInstruction {
 200  public:
 201   // We use MacroAssembler::bl64_patchable() for implementing a
 202   // call-anywhere instruction.
 203 
 204   // Checks whether instr points at a NativeFarCall instruction.
 205   static bool is_far_call_at(address instr) {
 206     return MacroAssembler::is_bl64_patchable_at(instr);
 207   }
 208 
 209   // Does the NativeFarCall implementation use a pc-relative encoding
 210   // of the call destination?
 211   // Used when relocating code.
 212   bool is_pcrelative() {
 213     assert(MacroAssembler::is_bl64_patchable_at((address)this),
 214            "unexpected call type");
 215     return MacroAssembler::is_bl64_patchable_pcrelative_at((address)this);
 216   }
 217 
 218   // Returns the NativeFarCall's destination.
 219   address destination() const {
 220     assert(MacroAssembler::is_bl64_patchable_at((address)this),
 221            "unexpected call type");
 222     return MacroAssembler::get_dest_of_bl64_patchable_at((address)this);
 223   }
 224 
 225   // Sets the NativeCall's destination, not necessarily mt-safe.
 226   // Used when relocating code.
 227   void set_destination(address dest) {
 228     // Set new destination (implementation of call may change here).
 229     assert(MacroAssembler::is_bl64_patchable_at((address)this),
 230            "unexpected call type");
 231     MacroAssembler::set_dest_of_bl64_patchable_at((address)this, dest);
 232   }
 233 
 234   void verify() NOT_DEBUG_RETURN;
 235 };
 236 
 237 // Instantiates a NativeFarCall object starting at the given instruction
 238 // address and returns the NativeFarCall object.
 239 inline NativeFarCall* nativeFarCall_at(address instr) {
 240   NativeFarCall* call = (NativeFarCall*)instr;
 241   call->verify();
 242   return call;
 243 }
 244 
 245 // An interface for accessing/manipulating native set_oop imm, reg instructions
 246 // (used to manipulate inlined data references, etc.).
 247 class NativeMovConstReg: public NativeInstruction {
 248  public:
 249 
 250   enum ppc_specific_constants {
 251     load_const_instruction_size                 = 20,
 252     load_const_from_method_toc_instruction_size =  8,
 253     instruction_size                            =  8 // Used in shared code for calls with reloc_info.
 254   };
 255 
 256   address instruction_address() const {
 257     return addr_at(0);
 258   }
 259 
 260   address next_instruction_address() const;
 261 
 262   // (The [set_]data accessor respects oop_type relocs also.)
 263   intptr_t data() const;
 264 
 265   // Patch the code stream.
 266   address set_data_plain(intptr_t x, CodeBlob *code);
 267   // Patch the code stream and oop pool.
 268   void set_data(intptr_t x);
 269 
 270   // Patch narrow oop constants. Use this also for narrow klass.
 271   void set_narrow_oop(narrowOop data, CodeBlob *code = NULL);
 272 
 273   void verify() NOT_DEBUG_RETURN;
 274 };
 275 
 276 inline NativeMovConstReg* nativeMovConstReg_at(address address) {
 277   NativeMovConstReg* test = (NativeMovConstReg*)address;
 278   test->verify();
 279   return test;
 280 }
 281 
 282 // The NativeJump is an abstraction for accessing/manipulating native
 283 // jump-anywhere instructions.
 284 class NativeJump: public NativeInstruction {
 285  public:
 286   // We use MacroAssembler::b64_patchable() for implementing a
 287   // jump-anywhere instruction.
 288 
 289   enum ppc_specific_constants {
 290     instruction_size = MacroAssembler::b64_patchable_size
 291   };
 292 
 293   // Checks whether instr points at a NativeJump instruction.
 294   static bool is_jump_at(address instr) {
 295     return MacroAssembler::is_b64_patchable_at(instr)
 296       || (   MacroAssembler::is_load_const_from_method_toc_at(instr)
 297           && Assembler::is_mtctr(*(int*)(instr + 2 * 4))
 298           && Assembler::is_bctr(*(int*)(instr + 3 * 4)));
 299   }
 300 
 301   // Does the NativeJump implementation use a pc-relative encoding
 302   // of the call destination?
 303   // Used when relocating code or patching jumps.
 304   bool is_pcrelative() {
 305     return MacroAssembler::is_b64_patchable_pcrelative_at((address)this);
 306   }
 307 
 308   // Returns the NativeJump's destination.
 309   address jump_destination() const {
 310     if (MacroAssembler::is_b64_patchable_at((address)this)) {
 311       return MacroAssembler::get_dest_of_b64_patchable_at((address)this);
 312     } else if (MacroAssembler::is_load_const_from_method_toc_at((address)this)
 313                && Assembler::is_mtctr(*(int*)((address)this + 2 * 4))
 314                && Assembler::is_bctr(*(int*)((address)this + 3 * 4))) {
 315       return (address)((NativeMovConstReg *)this)->data();
 316     } else {
 317       ShouldNotReachHere();
 318       return NULL;
 319     }
 320   }
 321 
 322   // Sets the NativeJump's destination, not necessarily mt-safe.
 323   // Used when relocating code or patching jumps.
 324   void set_jump_destination(address dest) {
 325     // Set new destination (implementation of call may change here).
 326     if (MacroAssembler::is_b64_patchable_at((address)this)) {
 327       MacroAssembler::set_dest_of_b64_patchable_at((address)this, dest);
 328     } else if (MacroAssembler::is_load_const_from_method_toc_at((address)this)
 329                && Assembler::is_mtctr(*(int*)((address)this + 2 * 4))
 330                && Assembler::is_bctr(*(int*)((address)this + 3 * 4))) {
 331       ((NativeMovConstReg *)this)->set_data((intptr_t)dest);
 332     } else {
 333       ShouldNotReachHere();
 334     }
 335   }
 336 
 337   // MT-safe insertion of native jump at verified method entry
 338   static void patch_verified_entry(address entry, address verified_entry, address dest);
 339 
 340   void verify() NOT_DEBUG_RETURN;
 341 
 342   static void check_verified_entry_alignment(address entry, address verified_entry) {
 343     // We just patch one instruction on ppc64, so the jump doesn't have to
 344     // be aligned. Nothing to do here.
 345   }
 346 };
 347 
 348 // Instantiates a NativeJump object starting at the given instruction
 349 // address and returns the NativeJump object.
 350 inline NativeJump* nativeJump_at(address instr) {
 351   NativeJump* call = (NativeJump*)instr;
 352   call->verify();
 353   return call;
 354 }
 355 
 356 // NativeConditionalFarBranch is abstraction for accessing/manipulating
 357 // conditional far branches.
 358 class NativeConditionalFarBranch : public NativeInstruction {
 359  public:
 360 
 361   static bool is_conditional_far_branch_at(address instr) {
 362     return MacroAssembler::is_bc_far_at(instr);
 363   }
 364 
 365   address branch_destination() const {
 366     return MacroAssembler::get_dest_of_bc_far_at((address)this);
 367   }
 368 
 369   void set_branch_destination(address dest) {
 370     MacroAssembler::set_dest_of_bc_far_at((address)this, dest);
 371   }
 372 };
 373 
 374 inline NativeConditionalFarBranch* NativeConditionalFarBranch_at(address address) {
 375   assert(NativeConditionalFarBranch::is_conditional_far_branch_at(address),
 376          "must be a conditional far branch");
 377   return (NativeConditionalFarBranch*)address;
 378 }
 379 
 380 // Call trampoline stubs.
 381 class NativeCallTrampolineStub : public NativeInstruction {
 382  private:
 383 
 384   address encoded_destination_addr() const;
 385 
 386  public:
 387 
 388   address destination(nmethod *nm = NULL) const;
 389   int destination_toc_offset() const;
 390 
 391   void set_destination(address new_destination);
 392 };
 393 
 394 // Note: Other stubs must not begin with this pattern.
 395 inline bool is_NativeCallTrampolineStub_at(address address) {
 396   int first_instr = *(int*)address;
 397   // calculate_address_from_global_toc and long form of ld_largeoffset_unchecked begin with addis with target R12
 398   if (Assembler::is_addis(first_instr) &&
 399       (Register)(intptr_t)Assembler::inv_rt_field(first_instr) == R12_scratch2) return true;
 400 
 401   // short form of ld_largeoffset_unchecked is ld which is followed by mtctr
 402   int second_instr = *((int*)address + 1);
 403   if (Assembler::is_ld(first_instr) &&
 404       (Register)(intptr_t)Assembler::inv_rt_field(first_instr) == R12_scratch2 &&
 405       Assembler::is_mtctr(second_instr) &&
 406       (Register)(intptr_t)Assembler::inv_rs_field(second_instr) == R12_scratch2) return true;
 407 
 408   return false;
 409 }
 410 
 411 inline NativeCallTrampolineStub* NativeCallTrampolineStub_at(address address) {
 412   assert(is_NativeCallTrampolineStub_at(address), "no call trampoline found");
 413   return (NativeCallTrampolineStub*)address;
 414 }
 415 
 416 ///////////////////////////////////////////////////////////////////////////////////////////////////
 417 
 418 //-------------------------------------
 419 //  N a t i v e G e n e r a l J u m p
 420 //-------------------------------------
 421 
 422 // Despite the name, handles only simple branches.
 423 class NativeGeneralJump;
 424 inline NativeGeneralJump* nativeGeneralJump_at(address address);
 425 
 426 // Currently only implemented as single unconditional branch.
 427 class NativeGeneralJump: public NativeInstruction {
 428  public:
 429 
 430   enum PPC64_specific_constants {
 431     instruction_size = 4
 432   };
 433 
 434   address instruction_address() const { return addr_at(0); }
 435 
 436   // Creation.
 437   friend inline NativeGeneralJump* nativeGeneralJump_at(address addr) {
 438     NativeGeneralJump* jump = (NativeGeneralJump*)(addr);
 439     DEBUG_ONLY( jump->verify(); )
 440     return jump;
 441   }
 442 
 443   // Insertion of native general jump instruction.
 444   static void insert_unconditional(address code_pos, address entry);
 445 
 446   address jump_destination() const {
 447     DEBUG_ONLY( verify(); )
 448     return addr_at(0) + Assembler::inv_li_field(long_at(0));
 449   }
 450 
 451   void set_jump_destination(address dest) {
 452     DEBUG_ONLY( verify(); )
 453     insert_unconditional(addr_at(0), dest);
 454   }
 455 
 456   static void replace_mt_safe(address instr_addr, address code_buffer);
 457 
 458   void verify() const { guarantee(Assembler::is_b(long_at(0)), "invalid NativeGeneralJump"); }
 459 };
 460 
 461 // An interface for accessing/manipulating native load int (load_const32).
 462 class NativeMovRegMem;
 463 inline NativeMovRegMem* nativeMovRegMem_at(address address);
 464 class NativeMovRegMem: public NativeInstruction {
 465  public:
 466 
 467   enum PPC64_specific_constants {
 468     instruction_size = 8
 469   };
 470 
 471   address instruction_address() const { return addr_at(0); }
 472 
 473   intptr_t offset() const {
 474 #ifdef VM_LITTLE_ENDIAN
 475     short *hi_ptr = (short*)(addr_at(0));
 476     short *lo_ptr = (short*)(addr_at(4));
 477 #else
 478     short *hi_ptr = (short*)(addr_at(0) + 2);
 479     short *lo_ptr = (short*)(addr_at(4) + 2);
 480 #endif
 481     return ((*hi_ptr) << 16) | ((*lo_ptr) & 0xFFFF);
 482   }
 483 
 484   void set_offset(intptr_t x) {
 485 #ifdef VM_LITTLE_ENDIAN
 486     short *hi_ptr = (short*)(addr_at(0));
 487     short *lo_ptr = (short*)(addr_at(4));
 488 #else
 489     short *hi_ptr = (short*)(addr_at(0) + 2);
 490     short *lo_ptr = (short*)(addr_at(4) + 2);
 491 #endif
 492     *hi_ptr = x >> 16;
 493     *lo_ptr = x & 0xFFFF;
 494     ICache::ppc64_flush_icache_bytes(addr_at(0), NativeMovRegMem::instruction_size);
 495   }
 496 
 497   void add_offset_in_bytes(intptr_t radd_offset) {
 498     set_offset(offset() + radd_offset);
 499   }
 500 
 501   void verify() const {
 502     guarantee(Assembler::is_lis(long_at(0)), "load_const32 1st instr");
 503     guarantee(Assembler::is_ori(long_at(4)), "load_const32 2nd instr");
 504   }
 505 
 506  private:
 507   friend inline NativeMovRegMem* nativeMovRegMem_at(address address) {
 508     NativeMovRegMem* test = (NativeMovRegMem*)address;
 509     DEBUG_ONLY( test->verify(); )
 510     return test;
 511   }
 512 };
 513 
 514 #endif // CPU_PPC_VM_NATIVEINST_PPC_HPP