1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_ARM_VM_NATIVEINST_ARM_32_HPP
  26 #define CPU_ARM_VM_NATIVEINST_ARM_32_HPP
  27 
  28 #include "asm/macroAssembler.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "runtime/icache.hpp"
  31 #include "runtime/os.hpp"
  32 #include "runtime/thread.hpp"
  33 #include "register_arm.hpp"
  34 
  35 // -------------------------------------------------------------------
  36 
  37 // Some experimental projects extend the ARM back-end by implementing
  38 // what the front-end usually assumes is a single native instruction
  39 // with a sequence of instructions.
  40 //
  41 // The 'Raw' variants are the low level initial code (usually one
  42 // instruction wide but some of them were already composed
  43 // instructions). They should be used only by the back-end.
  44 //
  45 // The non-raw classes are the front-end entry point, hiding potential
  46 // back-end extensions or the actual instructions size.
  47 class NativeInstruction;
  48 
  49 class RawNativeInstruction {
  50  public:
  51 
  52   enum ARM_specific {
  53     instruction_size = Assembler::InstructionSize
  54   };
  55 
  56   enum InstructionKind {
  57     instr_ldr_str    = 0x50,
  58     instr_ldrh_strh  = 0x10,
  59     instr_fld_fst    = 0xd0
  60   };
  61 
  62   // illegal instruction used by NativeJump::patch_verified_entry
  63   // permanently undefined (UDF): 0xe << 28 | 0b1111111 << 20 | 0b1111 << 4
  64   static const int zombie_illegal_instruction = 0xe7f000f0;
  65 
  66   static int decode_rotated_imm12(int encoding) {
  67     int base = encoding & 0xff;
  68     int right_rotation = (encoding & 0xf00) >> 7;
  69     int left_rotation = 32 - right_rotation;
  70     int val = (base >> right_rotation) | (base << left_rotation);
  71     return val;
  72   }
  73 
  74   address addr_at(int offset)        const { return (address)this + offset; }
  75   address instruction_address()      const { return addr_at(0); }
  76   address next_raw_instruction_address() const { return addr_at(instruction_size); }
  77 
  78   static RawNativeInstruction* at(address address) {
  79     return (RawNativeInstruction*)address;
  80   }
  81   RawNativeInstruction* next_raw() const {
  82     return at(next_raw_instruction_address());
  83   }
  84 
  85  public:
  86   int encoding()                     const { return *(int*)this; }
  87 
  88   void set_encoding(int value) {
  89     int old = *(int*)this;
  90     if (old != value) {
  91       *(int*)this = value;
  92       ICache::invalidate_word((address)this);
  93     }
  94   }
  95 
  96   InstructionKind kind() const {
  97     return (InstructionKind) ((encoding() >> 20) & 0xf2);
  98   }
  99 
 100   bool is_nop()            const { return encoding() == (int)0xe1a00000; }
 101   bool is_b()              const { return (encoding() & 0x0f000000) == 0x0a000000; }
 102   bool is_bx()             const { return (encoding() & 0x0ffffff0) == 0x012fff10; }
 103   bool is_bl()             const { return (encoding() & 0x0f000000) == 0x0b000000; }
 104   bool is_blx()            const { return (encoding() & 0x0ffffff0) == 0x012fff30; }
 105   bool is_fat_call()       const {
 106     return (is_add_lr() && next_raw()->is_jump());
 107   }
 108   bool is_ldr_call()       const {
 109     return (is_add_lr() && next_raw()->is_ldr_pc());
 110   }
 111   bool is_jump()           const { return is_b() || is_ldr_pc(); }
 112   bool is_call()           const { return is_bl() || is_fat_call(); }
 113   bool is_branch()         const { return is_b() || is_bl(); }
 114   bool is_far_branch()     const { return is_movw() || is_ldr_literal(); }
 115   bool is_ldr_literal()    const {
 116     // ldr Rx, [PC, #offset] for positive or negative offsets
 117     return (encoding() & 0x0f7f0000) == 0x051f0000;
 118   }
 119   bool is_ldr()    const {
 120     // ldr Rd, [Rn, #offset] for positive or negative offsets
 121     return (encoding() & 0x0f700000) == 0x05100000;
 122   }
 123   int ldr_offset() const {
 124     assert(is_ldr(), "must be");
 125     int offset = encoding() & 0xfff;
 126     if (encoding() & (1 << 23)) {
 127       // positive offset
 128     } else {
 129       // negative offset
 130       offset = -offset;
 131     }
 132     return offset;
 133   }
 134   // is_ldr_pc: ldr PC, PC, #offset
 135   bool is_ldr_pc()         const { return (encoding() & 0x0f7ff000) == 0x051ff000; }
 136   // is_setting_pc(): ldr PC, Rxx, #offset
 137   bool is_setting_pc()         const { return (encoding() & 0x0f70f000) == 0x0510f000; }
 138   bool is_add_lr()         const { return (encoding() & 0x0ffff000) == 0x028fe000; }
 139   bool is_add_pc()         const { return (encoding() & 0x0fff0000) == 0x028f0000; }
 140   bool is_sub_pc()         const { return (encoding() & 0x0fff0000) == 0x024f0000; }
 141   bool is_pc_rel()         const { return is_add_pc() || is_sub_pc(); }
 142   bool is_movw()           const { return (encoding() & 0x0ff00000) == 0x03000000; }
 143   bool is_movt()           const { return (encoding() & 0x0ff00000) == 0x03400000; }
 144   // c2 doesn't use fixed registers for safepoint poll address
 145   bool is_safepoint_poll() const { return (encoding() & 0xfff0ffff) == 0xe590c000; }
 146   // For unit tests
 147   static void test() {}
 148 
 149 };
 150 
 151 inline RawNativeInstruction* rawNativeInstruction_at(address address) {
 152   return (RawNativeInstruction*)address;
 153 }
 154 
 155 // Base class exported to the front-end
 156 class NativeInstruction: public RawNativeInstruction {
 157 public:
 158   static NativeInstruction* at(address address) {
 159     return (NativeInstruction*)address;
 160   }
 161 
 162 public:
 163   // No need to consider indirections while parsing NativeInstruction
 164   address next_instruction_address() const {
 165     return next_raw_instruction_address();
 166   }
 167 
 168   // next() is no longer defined to avoid confusion.
 169   //
 170   // The front end and most classes except for those defined in nativeInst_arm
 171   // or relocInfo_arm should only use next_instruction_address(), skipping
 172   // over composed instruction and ignoring back-end extensions.
 173   //
 174   // The back-end can use next_raw() when it knows the instruction sequence
 175   // and only wants to skip a single native instruction.
 176 };
 177 
 178 inline NativeInstruction* nativeInstruction_at(address address) {
 179   return (NativeInstruction*)address;
 180 }
 181 
 182 // -------------------------------------------------------------------
 183 // Raw b() or bl() instructions, not used by the front-end.
 184 class RawNativeBranch: public RawNativeInstruction {
 185  public:
 186 
 187   address destination(int adj = 0) const {
 188     return instruction_address() + (encoding() << 8 >> 6) + 8 + adj;
 189   }
 190 
 191   void set_destination(address dest) {
 192     int new_offset = (int)(dest - instruction_address() - 8);
 193     assert(new_offset < 0x2000000 && new_offset > -0x2000000, "encoding constraint");
 194     set_encoding((encoding() & 0xff000000) | ((unsigned int)new_offset << 6 >> 8));
 195   }
 196 };
 197 
 198 inline RawNativeBranch* rawNativeBranch_at(address address) {
 199   assert(rawNativeInstruction_at(address)->is_branch(), "must be");
 200   return (RawNativeBranch*)address;
 201 }
 202 
 203 class NativeBranch: public RawNativeBranch {
 204 };
 205 
 206 inline NativeBranch* nativeBranch_at(address address) {
 207   return (NativeBranch *) rawNativeBranch_at(address);
 208 }
 209 
 210 // -------------------------------------------------------------------
 211 // NativeGeneralJump is for patchable internal (near) jumps
 212 // It is used directly by the front-end and must be a single instruction wide
 213 // (to support patching to other kind of instructions).
 214 class NativeGeneralJump: public RawNativeInstruction {
 215  public:
 216 
 217   address jump_destination() const {
 218     return rawNativeBranch_at(instruction_address())->destination();
 219   }
 220 
 221   void set_jump_destination(address dest) {
 222     return rawNativeBranch_at(instruction_address())->set_destination(dest);
 223   }
 224 
 225   static void insert_unconditional(address code_pos, address entry);
 226 
 227   static void replace_mt_safe(address instr_addr, address code_buffer) {
 228     assert(((int)instr_addr & 3) == 0 && ((int)code_buffer & 3) == 0, "must be aligned");
 229     // Writing a word is atomic on ARM, so no MT-safe tricks are needed
 230     rawNativeInstruction_at(instr_addr)->set_encoding(*(int*)code_buffer);
 231   }
 232 };
 233 
 234 inline NativeGeneralJump* nativeGeneralJump_at(address address) {
 235   assert(rawNativeInstruction_at(address)->is_jump(), "must be");
 236   return (NativeGeneralJump*)address;
 237 }
 238 
 239 // -------------------------------------------------------------------
 240 class RawNativeJump: public NativeInstruction {
 241  public:
 242 
 243   address jump_destination(int adj = 0) const {
 244     address a;
 245     if (is_b()) {
 246       a = rawNativeBranch_at(instruction_address())->destination(adj);
 247       // Jump destination -1 is encoded as a jump to self
 248       if (a == instruction_address()) {
 249         return (address)-1;
 250       }
 251     } else {
 252       assert(is_ldr_pc(), "must be");
 253       int offset = this->ldr_offset();
 254       a = *(address*)(instruction_address() + 8 + offset);
 255     }
 256     return a;
 257   }
 258 
 259   void set_jump_destination(address dest) {
 260     address a;
 261     if (is_b()) {
 262       // Jump destination -1 is encoded as a jump to self
 263       if (dest == (address)-1) {
 264         dest = instruction_address();
 265       }
 266       rawNativeBranch_at(instruction_address())->set_destination(dest);
 267     } else {
 268       assert(is_ldr_pc(), "must be");
 269       int offset = this->ldr_offset();
 270       *(address*)(instruction_address() + 8 + offset) = dest;
 271       OrderAccess::storeload(); // overkill if caller holds lock?
 272     }
 273   }
 274 
 275   static void check_verified_entry_alignment(address entry, address verified_entry);
 276 
 277   static void patch_verified_entry(address entry, address verified_entry, address dest);
 278 
 279 };
 280 
 281 inline RawNativeJump* rawNativeJump_at(address address) {
 282   assert(rawNativeInstruction_at(address)->is_jump(), "must be");
 283   return (RawNativeJump*)address;
 284 }
 285 
 286 // -------------------------------------------------------------------
 287 class RawNativeCall: public NativeInstruction {
 288   // See IC calls in LIR_Assembler::ic_call(): ARM v5/v6 doesn't use a
 289   // single bl for IC calls.
 290 
 291  public:
 292 
 293   address return_address() const {
 294     if (is_bl()) {
 295       return addr_at(instruction_size);
 296     } else {
 297       assert(is_fat_call(), "must be");
 298       int offset = encoding() & 0xff;
 299       return addr_at(offset + 8);
 300     }
 301   }
 302 
 303   address destination(int adj = 0) const {
 304     if (is_bl()) {
 305       return rawNativeBranch_at(instruction_address())->destination(adj);
 306     } else {
 307       assert(is_add_lr(), "must be"); // fat_call
 308       RawNativeJump *next = rawNativeJump_at(next_raw_instruction_address());
 309       return next->jump_destination(adj);
 310     }
 311   }
 312 
 313   void set_destination(address dest) {
 314     if (is_bl()) {
 315       return rawNativeBranch_at(instruction_address())->set_destination(dest);
 316     } else {
 317       assert(is_add_lr(), "must be"); // fat_call
 318       RawNativeJump *next = rawNativeJump_at(next_raw_instruction_address());
 319       return next->set_jump_destination(dest);
 320     }
 321   }
 322 
 323   void set_destination_mt_safe(address dest) {
 324     assert(CodeCache::contains(dest), "external destination might be too far");
 325     set_destination(dest);
 326   }
 327 
 328   void verify() {
 329     assert(RawNativeInstruction::is_call() || (!VM_Version::supports_movw() && RawNativeInstruction::is_jump()), "must be");
 330   }
 331 
 332   void verify_alignment() {
 333     // Nothing to do on ARM
 334   }
 335 
 336   static bool is_call_before(address return_address);
 337 };
 338 
 339 inline RawNativeCall* rawNativeCall_at(address address) {
 340   assert(rawNativeInstruction_at(address)->is_call(), "must be");
 341   return (RawNativeCall*)address;
 342 }
 343 
 344 NativeCall* rawNativeCall_before(address return_address);
 345 
 346 // -------------------------------------------------------------------
 347 // NativeMovRegMem need not be extended with indirection support.
 348 // (field access patching is handled differently in that case)
 349 class NativeMovRegMem: public NativeInstruction {
 350  public:
 351 
 352   int offset() const;
 353   void set_offset(int x);
 354 
 355   void add_offset_in_bytes(int add_offset) {
 356     set_offset(offset() + add_offset);
 357   }
 358 
 359 };
 360 
 361 inline NativeMovRegMem* nativeMovRegMem_at(address address) {
 362   NativeMovRegMem* instr = (NativeMovRegMem*)address;
 363   assert(instr->kind() == NativeInstruction::instr_ldr_str   ||
 364          instr->kind() == NativeInstruction::instr_ldrh_strh ||
 365          instr->kind() == NativeInstruction::instr_fld_fst, "must be");
 366   return instr;
 367 }
 368 
 369 // -------------------------------------------------------------------
 370 // NativeMovConstReg is primarily for loading oops and metadata
 371 class NativeMovConstReg: public NativeInstruction {
 372  public:
 373 
 374   intptr_t data() const;
 375   void set_data(intptr_t x, address pc = 0);
 376   bool is_pc_relative() {
 377     return !is_movw();
 378   }
 379   void set_pc_relative_offset(address addr, address pc);
 380   address next_instruction_address() const {
 381     // NOTE: CompiledStaticCall::set_to_interpreted() calls this but
 382     // are restricted to single-instruction ldr. No need to jump over
 383     // several instructions.
 384     assert(is_ldr_literal(), "Should only use single-instructions load");
 385     return next_raw_instruction_address();
 386   }
 387 };
 388 
 389 inline NativeMovConstReg* nativeMovConstReg_at(address address) {
 390   NativeInstruction* ni = nativeInstruction_at(address);
 391   assert(ni->is_ldr_literal() || ni->is_pc_rel() ||
 392          ni->is_movw() && VM_Version::supports_movw(), "must be");
 393   return (NativeMovConstReg*)address;
 394 }
 395 
 396 // -------------------------------------------------------------------
 397 // Front end classes, hiding experimental back-end extensions.
 398 
 399 // Extension to support indirections
 400 class NativeJump: public RawNativeJump {
 401  public:
 402 };
 403 
 404 inline NativeJump* nativeJump_at(address address) {
 405   assert(nativeInstruction_at(address)->is_jump(), "must be");
 406   return (NativeJump*)address;
 407 }
 408 
 409 class NativeCall: public RawNativeCall {
 410 public:
 411   // NativeCall::next_instruction_address() is used only to define the
 412   // range where to look for the relocation information. We need not
 413   // walk over composed instructions (as long as the relocation information
 414   // is associated to the first instruction).
 415   address next_instruction_address() const {
 416     return next_raw_instruction_address();
 417   }
 418 
 419 };
 420 
 421 inline NativeCall* nativeCall_at(address address) {
 422   assert(nativeInstruction_at(address)->is_call() ||
 423          (!VM_Version::supports_movw() && nativeInstruction_at(address)->is_jump()), "must be");
 424   return (NativeCall*)address;
 425 }
 426 
 427 inline NativeCall* nativeCall_before(address return_address) {
 428   return (NativeCall *) rawNativeCall_before(return_address);
 429 }
 430 
 431 #endif // CPU_ARM_VM_NATIVEINST_ARM_32_HPP