1 /*
   2  * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2018 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef CPU_PPC_VM_NATIVEINST_PPC_HPP
  27 #define CPU_PPC_VM_NATIVEINST_PPC_HPP
  28 
  29 #include "asm/macroAssembler.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "runtime/icache.hpp"
  32 #include "runtime/os.hpp"
  33 #include "runtime/safepointMechanism.hpp"
  34 
  35 // We have interfaces for the following instructions:
  36 //
  37 // - NativeInstruction
  38 //   - NativeCall
  39 //   - NativeFarCall
  40 //   - NativeMovConstReg
  41 //   - NativeJump
  42 //   - NativeIllegalInstruction
  43 //   - NativeConditionalFarBranch
  44 //   - NativeCallTrampolineStub
  45 
  46 // The base class for different kinds of native instruction abstractions.
  47 // It provides the primitive operations to manipulate code relative to this.
  48 class NativeInstruction {
  49   friend class Relocation;
  50 
  51  public:
  52   bool is_jump() { return Assembler::is_b(long_at(0)); } // See NativeGeneralJump.
  53 
  54   bool is_sigtrap_ic_miss_check() {
  55     assert(UseSIGTRAP, "precondition");
  56     return MacroAssembler::is_trap_ic_miss_check(long_at(0));
  57   }
  58 
  59   bool is_sigtrap_null_check() {
  60     assert(UseSIGTRAP && TrapBasedNullChecks, "precondition");
  61     return MacroAssembler::is_trap_null_check(long_at(0));
  62   }
  63 
  64   // We use a special trap for marking a method as not_entrant or zombie
  65   // iff UseSIGTRAP.
  66   bool is_sigtrap_zombie_not_entrant() {
  67     assert(UseSIGTRAP, "precondition");
  68     return MacroAssembler::is_trap_zombie_not_entrant(long_at(0));
  69   }
  70 
  71   // We use an illtrap for marking a method as not_entrant or zombie
  72   // iff !UseSIGTRAP.
  73   bool is_sigill_zombie_not_entrant() {
  74     assert(!UseSIGTRAP, "precondition");
  75     // Work around a C++ compiler bug which changes 'this'.
  76     return NativeInstruction::is_sigill_zombie_not_entrant_at(addr_at(0));
  77   }
  78   static bool is_sigill_zombie_not_entrant_at(address addr);
  79 
  80 #ifdef COMPILER2
  81   // SIGTRAP-based implicit range checks
  82   bool is_sigtrap_range_check() {
  83     assert(UseSIGTRAP && TrapBasedRangeChecks, "precondition");
  84     return MacroAssembler::is_trap_range_check(long_at(0));
  85   }
  86 #endif
  87 
  88   // 'should not reach here'.
  89   bool is_sigtrap_should_not_reach_here() {
  90     return MacroAssembler::is_trap_should_not_reach_here(long_at(0));
  91   }
  92 
  93   bool is_safepoint_poll() {
  94     // Is the current instruction a POTENTIAL read access to the polling page?
  95     // The current arguments of the instruction are not checked!
  96     if (SafepointMechanism::uses_thread_local_poll() && USE_POLL_BIT_ONLY) {
  97       int encoding = SafepointMechanism::poll_bit();
  98       return MacroAssembler::is_tdi(long_at(0), Assembler::traptoGreaterThanUnsigned | Assembler::traptoEqual,
  99                                     -1, encoding);
 100     }
 101     return MacroAssembler::is_load_from_polling_page(long_at(0), NULL);
 102   }
 103 
 104   bool is_memory_serialization(JavaThread *thread, void *ucontext) {
 105     // Is the current instruction a write access of thread to the
 106     // memory serialization page?
 107     return MacroAssembler::is_memory_serialization(long_at(0), thread, ucontext);
 108   }
 109 
 110   address get_stack_bang_address(void *ucontext) {
 111     // If long_at(0) is not a stack bang, return 0. Otherwise, return
 112     // banged address.
 113     return MacroAssembler::get_stack_bang_address(long_at(0), ucontext);
 114   }
 115 
 116  protected:
 117   address  addr_at(int offset) const    { return address(this) + offset; }
 118   int      long_at(int offset) const    { return *(int*)addr_at(offset); }
 119 
 120  public:
 121   void verify() NOT_DEBUG_RETURN;
 122 };
 123 
 124 inline NativeInstruction* nativeInstruction_at(address address) {
 125   NativeInstruction* inst = (NativeInstruction*)address;
 126   inst->verify();
 127   return inst;
 128 }
 129 
 130 // The NativeCall is an abstraction for accessing/manipulating call
 131 // instructions. It is used to manipulate inline caches, primitive &
 132 // dll calls, etc.
 133 //
 134 // Sparc distinguishes `NativeCall' and `NativeFarCall'. On PPC64,
 135 // at present, we provide a single class `NativeCall' representing the
 136 // sequence `load_const, mtctr, bctrl' or the sequence 'ld_from_toc,
 137 // mtctr, bctrl'.
 138 class NativeCall: public NativeInstruction {
 139  public:
 140 
 141   enum ppc_specific_constants {
 142     load_const_instruction_size                 = 28,
 143     load_const_from_method_toc_instruction_size = 16,
 144     instruction_size                            = 16 // Used in shared code for calls with reloc_info.
 145   };
 146 
 147   static bool is_call_at(address a) {
 148     return Assembler::is_bl(*(int*)(a));
 149   }
 150 
 151   static bool is_call_before(address return_address) {
 152     return NativeCall::is_call_at(return_address - 4);
 153   }
 154 
 155   address instruction_address() const {
 156     return addr_at(0);
 157   }
 158 
 159   address next_instruction_address() const {
 160     // We have only bl.
 161     assert(MacroAssembler::is_bl(*(int*)instruction_address()), "Should be bl instruction!");
 162     return addr_at(4);
 163   }
 164 
 165   address return_address() const {
 166     return next_instruction_address();
 167   }
 168 
 169   address destination() const;
 170 
 171   // The parameter assert_lock disables the assertion during code generation.
 172   void set_destination_mt_safe(address dest, bool assert_lock = true);
 173 
 174   address get_trampoline();
 175 
 176   void verify_alignment() {} // do nothing on ppc
 177   void verify() NOT_DEBUG_RETURN;
 178 };
 179 
 180 inline NativeCall* nativeCall_at(address instr) {
 181   NativeCall* call = (NativeCall*)instr;
 182   call->verify();
 183   return call;
 184 }
 185 
 186 inline NativeCall* nativeCall_before(address return_address) {
 187   NativeCall* call = NULL;
 188   if (MacroAssembler::is_bl(*(int*)(return_address - 4)))
 189     call = (NativeCall*)(return_address - 4);
 190   call->verify();
 191   return call;
 192 }
 193 
 194 // The NativeFarCall is an abstraction for accessing/manipulating native
 195 // call-anywhere instructions.
 196 // Used to call native methods which may be loaded anywhere in the address
 197 // space, possibly out of reach of a call instruction.
 198 class NativeFarCall: public NativeInstruction {
 199  public:
 200   // We use MacroAssembler::bl64_patchable() for implementing a
 201   // call-anywhere instruction.
 202 
 203   // Checks whether instr points at a NativeFarCall instruction.
 204   static bool is_far_call_at(address instr) {
 205     return MacroAssembler::is_bl64_patchable_at(instr);
 206   }
 207 
 208   // Does the NativeFarCall implementation use a pc-relative encoding
 209   // of the call destination?
 210   // Used when relocating code.
 211   bool is_pcrelative() {
 212     assert(MacroAssembler::is_bl64_patchable_at((address)this),
 213            "unexpected call type");
 214     return MacroAssembler::is_bl64_patchable_pcrelative_at((address)this);
 215   }
 216 
 217   // Returns the NativeFarCall's destination.
 218   address destination() const {
 219     assert(MacroAssembler::is_bl64_patchable_at((address)this),
 220            "unexpected call type");
 221     return MacroAssembler::get_dest_of_bl64_patchable_at((address)this);
 222   }
 223 
 224   // Sets the NativeCall's destination, not necessarily mt-safe.
 225   // Used when relocating code.
 226   void set_destination(address dest) {
 227     // Set new destination (implementation of call may change here).
 228     assert(MacroAssembler::is_bl64_patchable_at((address)this),
 229            "unexpected call type");
 230     MacroAssembler::set_dest_of_bl64_patchable_at((address)this, dest);
 231   }
 232 
 233   void verify() NOT_DEBUG_RETURN;
 234 };
 235 
 236 // Instantiates a NativeFarCall object starting at the given instruction
 237 // address and returns the NativeFarCall object.
 238 inline NativeFarCall* nativeFarCall_at(address instr) {
 239   NativeFarCall* call = (NativeFarCall*)instr;
 240   call->verify();
 241   return call;
 242 }
 243 
 244 // An interface for accessing/manipulating native set_oop imm, reg instructions
 245 // (used to manipulate inlined data references, etc.).
 246 class NativeMovConstReg: public NativeInstruction {
 247  public:
 248 
 249   enum ppc_specific_constants {
 250     load_const_instruction_size                 = 20,
 251     load_const_from_method_toc_instruction_size =  8,
 252     instruction_size                            =  8 // Used in shared code for calls with reloc_info.
 253   };
 254 
 255   address instruction_address() const {
 256     return addr_at(0);
 257   }
 258 
 259   address next_instruction_address() const;
 260 
 261   // (The [set_]data accessor respects oop_type relocs also.)
 262   intptr_t data() const;
 263 
 264   // Patch the code stream.
 265   address set_data_plain(intptr_t x, CodeBlob *code);
 266   // Patch the code stream and oop pool.
 267   void set_data(intptr_t x);
 268 
 269   // Patch narrow oop constants. Use this also for narrow klass.
 270   void set_narrow_oop(narrowOop data, CodeBlob *code = NULL);
 271 
 272   void verify() NOT_DEBUG_RETURN;
 273 };
 274 
 275 inline NativeMovConstReg* nativeMovConstReg_at(address address) {
 276   NativeMovConstReg* test = (NativeMovConstReg*)address;
 277   test->verify();
 278   return test;
 279 }
 280 
 281 // The NativeJump is an abstraction for accessing/manipulating native
 282 // jump-anywhere instructions.
 283 class NativeJump: public NativeInstruction {
 284  public:
 285   // We use MacroAssembler::b64_patchable() for implementing a
 286   // jump-anywhere instruction.
 287 
 288   enum ppc_specific_constants {
 289     instruction_size = MacroAssembler::b64_patchable_size
 290   };
 291 
 292   // Checks whether instr points at a NativeJump instruction.
 293   static bool is_jump_at(address instr) {
 294     return MacroAssembler::is_b64_patchable_at(instr)
 295       || (   MacroAssembler::is_load_const_from_method_toc_at(instr)
 296           && Assembler::is_mtctr(*(int*)(instr + 2 * 4))
 297           && Assembler::is_bctr(*(int*)(instr + 3 * 4)));
 298   }
 299 
 300   // Does the NativeJump implementation use a pc-relative encoding
 301   // of the call destination?
 302   // Used when relocating code or patching jumps.
 303   bool is_pcrelative() {
 304     return MacroAssembler::is_b64_patchable_pcrelative_at((address)this);
 305   }
 306 
 307   // Returns the NativeJump's destination.
 308   address jump_destination() const {
 309     if (MacroAssembler::is_b64_patchable_at((address)this)) {
 310       return MacroAssembler::get_dest_of_b64_patchable_at((address)this);
 311     } else if (MacroAssembler::is_load_const_from_method_toc_at((address)this)
 312                && Assembler::is_mtctr(*(int*)((address)this + 2 * 4))
 313                && Assembler::is_bctr(*(int*)((address)this + 3 * 4))) {
 314       return (address)((NativeMovConstReg *)this)->data();
 315     } else {
 316       ShouldNotReachHere();
 317       return NULL;
 318     }
 319   }
 320 
 321   // Sets the NativeJump's destination, not necessarily mt-safe.
 322   // Used when relocating code or patching jumps.
 323   void set_jump_destination(address dest) {
 324     // Set new destination (implementation of call may change here).
 325     if (MacroAssembler::is_b64_patchable_at((address)this)) {
 326       MacroAssembler::set_dest_of_b64_patchable_at((address)this, dest);
 327     } else if (MacroAssembler::is_load_const_from_method_toc_at((address)this)
 328                && Assembler::is_mtctr(*(int*)((address)this + 2 * 4))
 329                && Assembler::is_bctr(*(int*)((address)this + 3 * 4))) {
 330       ((NativeMovConstReg *)this)->set_data((intptr_t)dest);
 331     } else {
 332       ShouldNotReachHere();
 333     }
 334   }
 335 
 336   // MT-safe insertion of native jump at verified method entry
 337   static void patch_verified_entry(address entry, address verified_entry, address dest);
 338 
 339   void verify() NOT_DEBUG_RETURN;
 340 
 341   static void check_verified_entry_alignment(address entry, address verified_entry) {
 342     // We just patch one instruction on ppc64, so the jump doesn't have to
 343     // be aligned. Nothing to do here.
 344   }
 345 };
 346 
 347 // Instantiates a NativeJump object starting at the given instruction
 348 // address and returns the NativeJump object.
 349 inline NativeJump* nativeJump_at(address instr) {
 350   NativeJump* call = (NativeJump*)instr;
 351   call->verify();
 352   return call;
 353 }
 354 
 355 // NativeConditionalFarBranch is abstraction for accessing/manipulating
 356 // conditional far branches.
 357 class NativeConditionalFarBranch : public NativeInstruction {
 358  public:
 359 
 360   static bool is_conditional_far_branch_at(address instr) {
 361     return MacroAssembler::is_bc_far_at(instr);
 362   }
 363 
 364   address branch_destination() const {
 365     return MacroAssembler::get_dest_of_bc_far_at((address)this);
 366   }
 367 
 368   void set_branch_destination(address dest) {
 369     MacroAssembler::set_dest_of_bc_far_at((address)this, dest);
 370   }
 371 };
 372 
 373 inline NativeConditionalFarBranch* NativeConditionalFarBranch_at(address address) {
 374   assert(NativeConditionalFarBranch::is_conditional_far_branch_at(address),
 375          "must be a conditional far branch");
 376   return (NativeConditionalFarBranch*)address;
 377 }
 378 
 379 // Call trampoline stubs.
 380 class NativeCallTrampolineStub : public NativeInstruction {
 381  private:
 382 
 383   address encoded_destination_addr() const;
 384 
 385  public:
 386 
 387   address destination(nmethod *nm = NULL) const;
 388   int destination_toc_offset() const;
 389 
 390   void set_destination(address new_destination);
 391 };
 392 
 393 // Note: Other stubs must not begin with this pattern.
 394 inline bool is_NativeCallTrampolineStub_at(address address) {
 395   int first_instr = *(int*)address;
 396   // calculate_address_from_global_toc and long form of ld_largeoffset_unchecked begin with addis with target R12
 397   if (Assembler::is_addis(first_instr) &&
 398       (Register)(intptr_t)Assembler::inv_rt_field(first_instr) == R12_scratch2) return true;
 399 
 400   // short form of ld_largeoffset_unchecked is ld which is followed by mtctr
 401   int second_instr = *((int*)address + 1);
 402   if (Assembler::is_ld(first_instr) &&
 403       (Register)(intptr_t)Assembler::inv_rt_field(first_instr) == R12_scratch2 &&
 404       Assembler::is_mtctr(second_instr) &&
 405       (Register)(intptr_t)Assembler::inv_rs_field(second_instr) == R12_scratch2) return true;
 406 
 407   return false;
 408 }
 409 
 410 inline NativeCallTrampolineStub* NativeCallTrampolineStub_at(address address) {
 411   assert(is_NativeCallTrampolineStub_at(address), "no call trampoline found");
 412   return (NativeCallTrampolineStub*)address;
 413 }
 414 
 415 ///////////////////////////////////////////////////////////////////////////////////////////////////
 416 
 417 //-------------------------------------
 418 //  N a t i v e G e n e r a l J u m p
 419 //-------------------------------------
 420 
 421 // Despite the name, handles only simple branches.
 422 class NativeGeneralJump;
 423 inline NativeGeneralJump* nativeGeneralJump_at(address address);
 424 
 425 // Currently only implemented as single unconditional branch.
 426 class NativeGeneralJump: public NativeInstruction {
 427  public:
 428 
 429   enum PPC64_specific_constants {
 430     instruction_size = 4
 431   };
 432 
 433   address instruction_address() const { return addr_at(0); }
 434 
 435   // Creation.
 436   friend inline NativeGeneralJump* nativeGeneralJump_at(address addr) {
 437     NativeGeneralJump* jump = (NativeGeneralJump*)(addr);
 438     DEBUG_ONLY( jump->verify(); )
 439     return jump;
 440   }
 441 
 442   // Insertion of native general jump instruction.
 443   static void insert_unconditional(address code_pos, address entry);
 444 
 445   address jump_destination() const {
 446     DEBUG_ONLY( verify(); )
 447     return addr_at(0) + Assembler::inv_li_field(long_at(0));
 448   }
 449 
 450   void set_jump_destination(address dest) {
 451     DEBUG_ONLY( verify(); )
 452     insert_unconditional(addr_at(0), dest);
 453   }
 454 
 455   static void replace_mt_safe(address instr_addr, address code_buffer);
 456 
 457   void verify() const { guarantee(Assembler::is_b(long_at(0)), "invalid NativeGeneralJump"); }
 458 };
 459 
 460 // An interface for accessing/manipulating native load int (load_const32).
 461 class NativeMovRegMem;
 462 inline NativeMovRegMem* nativeMovRegMem_at(address address);
 463 class NativeMovRegMem: public NativeInstruction {
 464  public:
 465 
 466   enum PPC64_specific_constants {
 467     instruction_size = 8
 468   };
 469 
 470   address instruction_address() const { return addr_at(0); }
 471 
 472   intptr_t offset() const {
 473 #ifdef VM_LITTLE_ENDIAN
 474     short *hi_ptr = (short*)(addr_at(0));
 475     short *lo_ptr = (short*)(addr_at(4));
 476 #else
 477     short *hi_ptr = (short*)(addr_at(0) + 2);
 478     short *lo_ptr = (short*)(addr_at(4) + 2);
 479 #endif
 480     return ((*hi_ptr) << 16) | ((*lo_ptr) & 0xFFFF);
 481   }
 482 
 483   void set_offset(intptr_t x) {
 484 #ifdef VM_LITTLE_ENDIAN
 485     short *hi_ptr = (short*)(addr_at(0));
 486     short *lo_ptr = (short*)(addr_at(4));
 487 #else
 488     short *hi_ptr = (short*)(addr_at(0) + 2);
 489     short *lo_ptr = (short*)(addr_at(4) + 2);
 490 #endif
 491     *hi_ptr = x >> 16;
 492     *lo_ptr = x & 0xFFFF;
 493     ICache::ppc64_flush_icache_bytes(addr_at(0), NativeMovRegMem::instruction_size);
 494   }
 495 
 496   void add_offset_in_bytes(intptr_t radd_offset) {
 497     set_offset(offset() + radd_offset);
 498   }
 499 
 500   void verify() const {
 501     guarantee(Assembler::is_lis(long_at(0)), "load_const32 1st instr");
 502     guarantee(Assembler::is_ori(long_at(4)), "load_const32 2nd instr");
 503   }
 504 
 505  private:
 506   friend inline NativeMovRegMem* nativeMovRegMem_at(address address) {
 507     NativeMovRegMem* test = (NativeMovRegMem*)address;
 508     DEBUG_ONLY( test->verify(); )
 509     return test;
 510   }
 511 };
 512 
 513 #endif // CPU_PPC_VM_NATIVEINST_PPC_HPP