1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_ARM_VM_NATIVEINST_ARM_32_HPP
  26 #define CPU_ARM_VM_NATIVEINST_ARM_32_HPP
  27 
  28 #include "asm/macroAssembler.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "runtime/icache.hpp"
  32 #include "runtime/os.hpp"
  33 #include "runtime/thread.hpp"
  34 #include "register_arm.hpp"
  35 
  36 // -------------------------------------------------------------------
  37 
  38 // Some experimental projects extend the ARM back-end by implementing
  39 // what the front-end usually assumes is a single native instruction
  40 // with a sequence of instructions.
  41 //
  42 // The 'Raw' variants are the low level initial code (usually one
  43 // instruction wide but some of them were already composed
  44 // instructions). They should be used only by the back-end.
  45 //
  46 // The non-raw classes are the front-end entry point, hiding potential
  47 // back-end extensions or the actual instructions size.
  48 class NativeInstruction;
  49 
  50 class RawNativeInstruction {
  51  public:
  52 
  53   enum ARM_specific {
  54     instruction_size = Assembler::InstructionSize
  55   };
  56 
  57   enum InstructionKind {
  58     instr_ldr_str    = 0x50,
  59     instr_ldrh_strh  = 0x10,
  60     instr_fld_fst    = 0xd0
  61   };
  62 
  63   // illegal instruction used by NativeJump::patch_verified_entry
  64   // permanently undefined (UDF): 0xe << 28 | 0b1111111 << 20 | 0b1111 << 4
  65   static const int zombie_illegal_instruction = 0xe7f000f0;
  66 
  67   static int decode_rotated_imm12(int encoding) {
  68     int base = encoding & 0xff;
  69     int right_rotation = (encoding & 0xf00) >> 7;
  70     int left_rotation = 32 - right_rotation;
  71     int val = (base >> right_rotation) | (base << left_rotation);
  72     return val;
  73   }
  74 
  75   address addr_at(int offset)        const { return (address)this + offset; }
  76   address instruction_address()      const { return addr_at(0); }
  77   address next_raw_instruction_address() const { return addr_at(instruction_size); }
  78 
  79   static RawNativeInstruction* at(address address) {
  80     return (RawNativeInstruction*)address;
  81   }
  82   RawNativeInstruction* next_raw() const {
  83     return at(next_raw_instruction_address());
  84   }
  85 
  86  public:
  87   int encoding()                     const { return *(int*)this; }
  88 
  89   void set_encoding(int value) {
  90     int old = *(int*)this;
  91     if (old != value) {
  92       *(int*)this = value;
  93       ICache::invalidate_word((address)this);
  94     }
  95   }
  96 
  97   InstructionKind kind() const {
  98     return (InstructionKind) ((encoding() >> 20) & 0xf2);
  99   }
 100 
 101   bool is_nop()            const { return encoding() == (int)0xe1a00000; }
 102   bool is_b()              const { return (encoding() & 0x0f000000) == 0x0a000000; }
 103   bool is_bx()             const { return (encoding() & 0x0ffffff0) == 0x012fff10; }
 104   bool is_bl()             const { return (encoding() & 0x0f000000) == 0x0b000000; }
 105   bool is_blx()            const { return (encoding() & 0x0ffffff0) == 0x012fff30; }
 106   bool is_fat_call()       const {
 107     return (is_add_lr() && next_raw()->is_jump());
 108   }
 109   bool is_ldr_call()       const {
 110     return (is_add_lr() && next_raw()->is_ldr_pc());
 111   }
 112   bool is_jump()           const { return is_b() || is_ldr_pc(); }
 113   bool is_call()           const { return is_bl() || is_fat_call(); }
 114   bool is_branch()         const { return is_b() || is_bl(); }
 115   bool is_far_branch()     const { return is_movw() || is_ldr_literal(); }
 116   bool is_ldr_literal()    const {
 117     // ldr Rx, [PC, #offset] for positive or negative offsets
 118     return (encoding() & 0x0f7f0000) == 0x051f0000;
 119   }
 120   bool is_ldr()    const {
 121     // ldr Rd, [Rn, #offset] for positive or negative offsets
 122     return (encoding() & 0x0f700000) == 0x05100000;
 123   }
 124   int ldr_offset() const {
 125     assert(is_ldr(), "must be");
 126     int offset = encoding() & 0xfff;
 127     if (encoding() & (1 << 23)) {
 128       // positive offset
 129     } else {
 130       // negative offset
 131       offset = -offset;
 132     }
 133     return offset;
 134   }
 135   // is_ldr_pc: ldr PC, PC, #offset
 136   bool is_ldr_pc()         const { return (encoding() & 0x0f7ff000) == 0x051ff000; }
 137   // is_setting_pc(): ldr PC, Rxx, #offset
 138   bool is_setting_pc()         const { return (encoding() & 0x0f70f000) == 0x0510f000; }
 139   bool is_add_lr()         const { return (encoding() & 0x0ffff000) == 0x028fe000; }
 140   bool is_add_pc()         const { return (encoding() & 0x0fff0000) == 0x028f0000; }
 141   bool is_sub_pc()         const { return (encoding() & 0x0fff0000) == 0x024f0000; }
 142   bool is_pc_rel()         const { return is_add_pc() || is_sub_pc(); }
 143   bool is_movw()           const { return (encoding() & 0x0ff00000) == 0x03000000; }
 144   bool is_movt()           const { return (encoding() & 0x0ff00000) == 0x03400000; }
 145   // c2 doesn't use fixed registers for safepoint poll address
 146   bool is_safepoint_poll() const { return (encoding() & 0xfff0ffff) == 0xe590c000; }
 147   // For unit tests
 148   static void test() {}
 149 
 150 };
 151 
 152 inline RawNativeInstruction* rawNativeInstruction_at(address address) {
 153   return (RawNativeInstruction*)address;
 154 }
 155 
 156 // Base class exported to the front-end
 157 class NativeInstruction: public RawNativeInstruction {
 158 public:
 159   static NativeInstruction* at(address address) {
 160     return (NativeInstruction*)address;
 161   }
 162 
 163 public:
 164   // No need to consider indirections while parsing NativeInstruction
 165   address next_instruction_address() const {
 166     return next_raw_instruction_address();
 167   }
 168 
 169   // next() is no longer defined to avoid confusion.
 170   //
 171   // The front end and most classes except for those defined in nativeInst_arm
 172   // or relocInfo_arm should only use next_instruction_address(), skipping
 173   // over composed instruction and ignoring back-end extensions.
 174   //
 175   // The back-end can use next_raw() when it knows the instruction sequence
 176   // and only wants to skip a single native instruction.
 177 };
 178 
 179 inline NativeInstruction* nativeInstruction_at(address address) {
 180   return (NativeInstruction*)address;
 181 }
 182 
 183 // -------------------------------------------------------------------
 184 // Raw b() or bl() instructions, not used by the front-end.
 185 class RawNativeBranch: public RawNativeInstruction {
 186  public:
 187 
 188   address destination(int adj = 0) const {
 189     return instruction_address() + (encoding() << 8 >> 6) + 8 + adj;
 190   }
 191 
 192   void set_destination(address dest) {
 193     int new_offset = (int)(dest - instruction_address() - 8);
 194     assert(new_offset < 0x2000000 && new_offset > -0x2000000, "encoding constraint");
 195     set_encoding((encoding() & 0xff000000) | ((unsigned int)new_offset << 6 >> 8));
 196   }
 197 };
 198 
 199 inline RawNativeBranch* rawNativeBranch_at(address address) {
 200   assert(rawNativeInstruction_at(address)->is_branch(), "must be");
 201   return (RawNativeBranch*)address;
 202 }
 203 
 204 class NativeBranch: public RawNativeBranch {
 205 };
 206 
 207 inline NativeBranch* nativeBranch_at(address address) {
 208   return (NativeBranch *) rawNativeBranch_at(address);
 209 }
 210 
 211 // -------------------------------------------------------------------
 212 // NativeGeneralJump is for patchable internal (near) jumps
 213 // It is used directly by the front-end and must be a single instruction wide
 214 // (to support patching to other kind of instructions).
 215 class NativeGeneralJump: public RawNativeInstruction {
 216  public:
 217 
 218   address jump_destination() const {
 219     return rawNativeBranch_at(instruction_address())->destination();
 220   }
 221 
 222   void set_jump_destination(address dest) {
 223     return rawNativeBranch_at(instruction_address())->set_destination(dest);
 224   }
 225 
 226   static void insert_unconditional(address code_pos, address entry);
 227 
 228   static void replace_mt_safe(address instr_addr, address code_buffer) {
 229     assert(((int)instr_addr & 3) == 0 && ((int)code_buffer & 3) == 0, "must be aligned");
 230     // Writing a word is atomic on ARM, so no MT-safe tricks are needed
 231     rawNativeInstruction_at(instr_addr)->set_encoding(*(int*)code_buffer);
 232   }
 233 };
 234 
 235 inline NativeGeneralJump* nativeGeneralJump_at(address address) {
 236   assert(rawNativeInstruction_at(address)->is_jump(), "must be");
 237   return (NativeGeneralJump*)address;
 238 }
 239 
 240 // -------------------------------------------------------------------
 241 class RawNativeJump: public NativeInstruction {
 242  public:
 243 
 244   address jump_destination(int adj = 0) const {
 245     address a;
 246     if (is_b()) {
 247       a = rawNativeBranch_at(instruction_address())->destination(adj);
 248       // Jump destination -1 is encoded as a jump to self
 249       if (a == instruction_address()) {
 250         return (address)-1;
 251       }
 252     } else {
 253       assert(is_ldr_pc(), "must be");
 254       int offset = this->ldr_offset();
 255       a = *(address*)(instruction_address() + 8 + offset);
 256     }
 257     return a;
 258   }
 259 
 260   void set_jump_destination(address dest) {
 261     address a;
 262     if (is_b()) {
 263       // Jump destination -1 is encoded as a jump to self
 264       if (dest == (address)-1) {
 265         dest = instruction_address();
 266       }
 267       rawNativeBranch_at(instruction_address())->set_destination(dest);
 268     } else {
 269       assert(is_ldr_pc(), "must be");
 270       int offset = this->ldr_offset();
 271       *(address*)(instruction_address() + 8 + offset) = dest;
 272       OrderAccess::storeload(); // overkill if caller holds lock?
 273     }
 274   }
 275 
 276   static void check_verified_entry_alignment(address entry, address verified_entry);
 277 
 278   static void patch_verified_entry(address entry, address verified_entry, address dest);
 279 
 280 };
 281 
 282 inline RawNativeJump* rawNativeJump_at(address address) {
 283   assert(rawNativeInstruction_at(address)->is_jump(), "must be");
 284   return (RawNativeJump*)address;
 285 }
 286 
 287 // -------------------------------------------------------------------
 288 class RawNativeCall: public NativeInstruction {
 289   // See IC calls in LIR_Assembler::ic_call(): ARM v5/v6 doesn't use a
 290   // single bl for IC calls.
 291 
 292  public:
 293 
 294   address return_address() const {
 295     if (is_bl()) {
 296       return addr_at(instruction_size);
 297     } else {
 298       assert(is_fat_call(), "must be");
 299       int offset = encoding() & 0xff;
 300       return addr_at(offset + 8);
 301     }
 302   }
 303 
 304   address destination(int adj = 0) const {
 305     if (is_bl()) {
 306       return rawNativeBranch_at(instruction_address())->destination(adj);
 307     } else {
 308       assert(is_add_lr(), "must be"); // fat_call
 309       RawNativeJump *next = rawNativeJump_at(next_raw_instruction_address());
 310       return next->jump_destination(adj);
 311     }
 312   }
 313 
 314   void set_destination(address dest) {
 315     if (is_bl()) {
 316       return rawNativeBranch_at(instruction_address())->set_destination(dest);
 317     } else {
 318       assert(is_add_lr(), "must be"); // fat_call
 319       RawNativeJump *next = rawNativeJump_at(next_raw_instruction_address());
 320       return next->set_jump_destination(dest);
 321     }
 322   }
 323 
 324   void set_destination_mt_safe(address dest) {
 325     assert(CodeCache::contains(dest), "external destination might be too far");
 326     set_destination(dest);
 327   }
 328 
 329   void verify() {
 330     assert(RawNativeInstruction::is_call() || (!VM_Version::supports_movw() && RawNativeInstruction::is_jump()), "must be");
 331   }
 332 
 333   void verify_alignment() {
 334     // Nothing to do on ARM
 335   }
 336 
 337   static bool is_call_before(address return_address);
 338 };
 339 
 340 inline RawNativeCall* rawNativeCall_at(address address) {
 341   assert(rawNativeInstruction_at(address)->is_call(), "must be");
 342   return (RawNativeCall*)address;
 343 }
 344 
 345 NativeCall* rawNativeCall_before(address return_address);
 346 
 347 // -------------------------------------------------------------------
 348 // NativeMovRegMem need not be extended with indirection support.
 349 // (field access patching is handled differently in that case)
 350 class NativeMovRegMem: public NativeInstruction {
 351  public:
 352 
 353   int offset() const;
 354   void set_offset(int x);
 355 
 356   void add_offset_in_bytes(int add_offset) {
 357     set_offset(offset() + add_offset);
 358   }
 359 
 360 };
 361 
 362 inline NativeMovRegMem* nativeMovRegMem_at(address address) {
 363   NativeMovRegMem* instr = (NativeMovRegMem*)address;
 364   assert(instr->kind() == NativeInstruction::instr_ldr_str   ||
 365          instr->kind() == NativeInstruction::instr_ldrh_strh ||
 366          instr->kind() == NativeInstruction::instr_fld_fst, "must be");
 367   return instr;
 368 }
 369 
 370 // -------------------------------------------------------------------
 371 // NativeMovConstReg is primarily for loading oops and metadata
 372 class NativeMovConstReg: public NativeInstruction {
 373  public:
 374 
 375   intptr_t data() const;
 376   void set_data(intptr_t x, address pc = 0);
 377   bool is_pc_relative() {
 378     return !is_movw();
 379   }
 380   void set_pc_relative_offset(address addr, address pc);
 381   address next_instruction_address() const {
 382     // NOTE: CompiledStaticCall::set_to_interpreted() calls this but
 383     // are restricted to single-instruction ldr. No need to jump over
 384     // several instructions.
 385     assert(is_ldr_literal(), "Should only use single-instructions load");
 386     return next_raw_instruction_address();
 387   }
 388 };
 389 
 390 inline NativeMovConstReg* nativeMovConstReg_at(address address) {
 391   NativeInstruction* ni = nativeInstruction_at(address);
 392   assert(ni->is_ldr_literal() || ni->is_pc_rel() ||
 393          ni->is_movw() && VM_Version::supports_movw(), "must be");
 394   return (NativeMovConstReg*)address;
 395 }
 396 
 397 // -------------------------------------------------------------------
 398 // Front end classes, hiding experimental back-end extensions.
 399 
 400 // Extension to support indirections
 401 class NativeJump: public RawNativeJump {
 402  public:
 403 };
 404 
 405 inline NativeJump* nativeJump_at(address address) {
 406   assert(nativeInstruction_at(address)->is_jump(), "must be");
 407   return (NativeJump*)address;
 408 }
 409 
 410 class NativeCall: public RawNativeCall {
 411 public:
 412   // NativeCall::next_instruction_address() is used only to define the
 413   // range where to look for the relocation information. We need not
 414   // walk over composed instructions (as long as the relocation information
 415   // is associated to the first instruction).
 416   address next_instruction_address() const {
 417     return next_raw_instruction_address();
 418   }
 419 
 420 };
 421 
 422 inline NativeCall* nativeCall_at(address address) {
 423   assert(nativeInstruction_at(address)->is_call() ||
 424          (!VM_Version::supports_movw() && nativeInstruction_at(address)->is_jump()), "must be");
 425   return (NativeCall*)address;
 426 }
 427 
 428 inline NativeCall* nativeCall_before(address return_address) {
 429   return (NativeCall *) rawNativeCall_before(return_address);
 430 }
 431 
 432 #endif // CPU_ARM_VM_NATIVEINST_ARM_32_HPP