1 /*
   2  * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_ARM_VM_NATIVEINST_ARM_32_HPP
  26 #define CPU_ARM_VM_NATIVEINST_ARM_32_HPP
  27 
  28 #include "asm/macroAssembler.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/codeCacheExtensions.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "runtime/icache.hpp"
  33 #include "runtime/os.hpp"
  34 #include "runtime/thread.hpp"
  35 #include "register_arm.hpp"
  36 
  37 // -------------------------------------------------------------------
  38 
  39 // Some experimental projects extend the ARM back-end by implementing
  40 // what the front-end usually assumes is a single native instruction
  41 // with a sequence of instructions.
  42 //
  43 // The 'Raw' variants are the low level initial code (usually one
  44 // instruction wide but some of them were already composed
  45 // instructions). They should be used only by the back-end.
  46 //
  47 // The non-raw classes are the front-end entry point, hiding potential
  48 // back-end extensions or the actual instructions size.
  49 class NativeInstruction;
  50 
  51 class RawNativeInstruction VALUE_OBJ_CLASS_SPEC {
  52  public:
  53 
  54   enum ARM_specific {
  55     instruction_size = Assembler::InstructionSize
  56   };
  57 
  58   enum InstructionKind {
  59     instr_ldr_str    = 0x50,
  60     instr_ldrh_strh  = 0x10,
  61     instr_fld_fst    = 0xd0
  62   };
  63 
  64   // illegal instruction used by NativeJump::patch_verified_entry
  65   // permanently undefined (UDF): 0xe << 28 | 0b1111111 << 20 | 0b1111 << 4
  66   static const int zombie_illegal_instruction = 0xe7f000f0;
  67 
  68   static int decode_rotated_imm12(int encoding) {
  69     int base = encoding & 0xff;
  70     int right_rotation = (encoding & 0xf00) >> 7;
  71     int left_rotation = 32 - right_rotation;
  72     int val = (base >> right_rotation) | (base << left_rotation);
  73     return val;
  74   }
  75 
  76   address addr_at(int offset)        const { return (address)this + offset; }
  77   address instruction_address()      const { return addr_at(0); }
  78   address next_raw_instruction_address() const { return addr_at(instruction_size); }
  79 
  80   static RawNativeInstruction* at(address address) {
  81     return (RawNativeInstruction*)address;
  82   }
  83   RawNativeInstruction* next_raw() const {
  84     return at(next_raw_instruction_address());
  85   }
  86 
  87  public:
  88   int encoding()                     const { return *(int*)this; }
  89 
  90   void set_encoding(int value) {
  91     int old = *(int*)this;
  92     if (old != value) {
  93       *(int*)this = value;
  94       ICache::invalidate_word((address)this);
  95     }
  96   }
  97 
  98   InstructionKind kind() const {
  99     return (InstructionKind) ((encoding() >> 20) & 0xf2);
 100   }
 101 
 102   bool is_nop()            const { return encoding() == (int)0xe1a00000; }
 103   bool is_b()              const { return (encoding() & 0x0f000000) == 0x0a000000; }
 104   bool is_bx()             const { return (encoding() & 0x0ffffff0) == 0x012fff10; }
 105   bool is_bl()             const { return (encoding() & 0x0f000000) == 0x0b000000; }
 106   bool is_blx()            const { return (encoding() & 0x0ffffff0) == 0x012fff30; }
 107   bool is_fat_call()       const {
 108     return (is_add_lr() && next_raw()->is_jump());
 109   }
 110   bool is_ldr_call()       const {
 111     return (is_add_lr() && next_raw()->is_ldr_pc());
 112   }
 113   bool is_jump()           const { return is_b() || is_ldr_pc(); }
 114   bool is_call()           const { return is_bl() || is_fat_call(); }
 115   bool is_branch()         const { return is_b() || is_bl(); }
 116   bool is_far_branch()     const { return is_movw() || is_ldr_literal(); }
 117   bool is_ldr_literal()    const {
 118     // ldr Rx, [PC, #offset] for positive or negative offsets
 119     return (encoding() & 0x0f7f0000) == 0x051f0000;
 120   }
 121   bool is_ldr()    const {
 122     // ldr Rd, [Rn, #offset] for positive or negative offsets
 123     return (encoding() & 0x0f700000) == 0x05100000;
 124   }
 125   int ldr_offset() const {
 126     assert(is_ldr(), "must be");
 127     int offset = encoding() & 0xfff;
 128     if (encoding() & (1 << 23)) {
 129       // positive offset
 130     } else {
 131       // negative offset
 132       offset = -offset;
 133     }
 134     return offset;
 135   }
 136   // is_ldr_pc: ldr PC, PC, #offset
 137   bool is_ldr_pc()         const { return (encoding() & 0x0f7ff000) == 0x051ff000; }
 138   // is_setting_pc(): ldr PC, Rxx, #offset
 139   bool is_setting_pc()         const { return (encoding() & 0x0f70f000) == 0x0510f000; }
 140   bool is_add_lr()         const { return (encoding() & 0x0ffff000) == 0x028fe000; }
 141   bool is_add_pc()         const { return (encoding() & 0x0fff0000) == 0x028f0000; }
 142   bool is_sub_pc()         const { return (encoding() & 0x0fff0000) == 0x024f0000; }
 143   bool is_pc_rel()         const { return is_add_pc() || is_sub_pc(); }
 144   bool is_movw()           const { return (encoding() & 0x0ff00000) == 0x03000000; }
 145   bool is_movt()           const { return (encoding() & 0x0ff00000) == 0x03400000; }
 146   // c2 doesn't use fixed registers for safepoint poll address
 147   bool is_safepoint_poll() const { return (encoding() & 0xfff0ffff) == 0xe590c000; }
 148   // For unit tests
 149   static void test() {}
 150 
 151 };
 152 
 153 inline RawNativeInstruction* rawNativeInstruction_at(address address) {
 154   return (RawNativeInstruction*)address;
 155 }
 156 
 157 // Base class exported to the front-end
 158 class NativeInstruction: public RawNativeInstruction {
 159 public:
 160   static NativeInstruction* at(address address) {
 161     return (NativeInstruction*)address;
 162   }
 163 
 164 public:
 165   // No need to consider indirections while parsing NativeInstruction
 166   address next_instruction_address() const {
 167     return next_raw_instruction_address();
 168   }
 169 
 170   // next() is no longer defined to avoid confusion.
 171   //
 172   // The front end and most classes except for those defined in nativeInst_arm
 173   // or relocInfo_arm should only use next_instruction_address(), skipping
 174   // over composed instruction and ignoring back-end extensions.
 175   //
 176   // The back-end can use next_raw() when it knows the instruction sequence
 177   // and only wants to skip a single native instruction.
 178 };
 179 
 180 inline NativeInstruction* nativeInstruction_at(address address) {
 181   return (NativeInstruction*)address;
 182 }
 183 
 184 // -------------------------------------------------------------------
 185 // Raw b() or bl() instructions, not used by the front-end.
 186 class RawNativeBranch: public RawNativeInstruction {
 187  public:
 188 
 189   address destination(int adj = 0) const {
 190     return instruction_address() + (encoding() << 8 >> 6) + 8 + adj;
 191   }
 192 
 193   void set_destination(address dest) {
 194     int new_offset = (int)(dest - instruction_address() - 8);
 195     assert(new_offset < 0x2000000 && new_offset > -0x2000000, "encoding constraint");
 196     set_encoding((encoding() & 0xff000000) | ((unsigned int)new_offset << 6 >> 8));
 197   }
 198 };
 199 
 200 inline RawNativeBranch* rawNativeBranch_at(address address) {
 201   assert(rawNativeInstruction_at(address)->is_branch(), "must be");
 202   return (RawNativeBranch*)address;
 203 }
 204 
 205 class NativeBranch: public RawNativeBranch {
 206 };
 207 
 208 inline NativeBranch* nativeBranch_at(address address) {
 209   return (NativeBranch *) rawNativeBranch_at(address);
 210 }
 211 
 212 // -------------------------------------------------------------------
 213 // NativeGeneralJump is for patchable internal (near) jumps
 214 // It is used directly by the front-end and must be a single instruction wide
 215 // (to support patching to other kind of instructions).
 216 class NativeGeneralJump: public RawNativeInstruction {
 217  public:
 218 
 219   address jump_destination() const {
 220     return rawNativeBranch_at(instruction_address())->destination();
 221   }
 222 
 223   void set_jump_destination(address dest) {
 224     return rawNativeBranch_at(instruction_address())->set_destination(dest);
 225   }
 226 
 227   static void insert_unconditional(address code_pos, address entry);
 228 
 229   static void replace_mt_safe(address instr_addr, address code_buffer) {
 230     assert(((int)instr_addr & 3) == 0 && ((int)code_buffer & 3) == 0, "must be aligned");
 231     // Writing a word is atomic on ARM, so no MT-safe tricks are needed
 232     rawNativeInstruction_at(instr_addr)->set_encoding(*(int*)code_buffer);
 233   }
 234 };
 235 
 236 inline NativeGeneralJump* nativeGeneralJump_at(address address) {
 237   assert(rawNativeInstruction_at(address)->is_jump(), "must be");
 238   return (NativeGeneralJump*)address;
 239 }
 240 
 241 // -------------------------------------------------------------------
 242 class RawNativeJump: public NativeInstruction {
 243  public:
 244 
 245   address jump_destination(int adj = 0) const {
 246     address a;
 247     if (is_b()) {
 248       a = rawNativeBranch_at(instruction_address())->destination(adj);
 249       // Jump destination -1 is encoded as a jump to self
 250       if (a == instruction_address()) {
 251         return (address)-1;
 252       }
 253     } else {
 254       assert(is_ldr_pc(), "must be");
 255       int offset = this->ldr_offset();
 256       a = *(address*)(instruction_address() + 8 + offset);
 257     }
 258     return a;
 259   }
 260 
 261   void set_jump_destination(address dest) {
 262     address a;
 263     if (is_b()) {
 264       // Jump destination -1 is encoded as a jump to self
 265       if (dest == (address)-1) {
 266         dest = instruction_address();
 267       }
 268       rawNativeBranch_at(instruction_address())->set_destination(dest);
 269     } else {
 270       assert(is_ldr_pc(), "must be");
 271       int offset = this->ldr_offset();
 272       *(address*)(instruction_address() + 8 + offset) = dest;
 273       OrderAccess::storeload(); // overkill if caller holds lock?
 274     }
 275   }
 276 
 277   static void check_verified_entry_alignment(address entry, address verified_entry);
 278 
 279   static void patch_verified_entry(address entry, address verified_entry, address dest);
 280 
 281 };
 282 
 283 inline RawNativeJump* rawNativeJump_at(address address) {
 284   assert(rawNativeInstruction_at(address)->is_jump(), "must be");
 285   return (RawNativeJump*)address;
 286 }
 287 
 288 // -------------------------------------------------------------------
 289 class RawNativeCall: public NativeInstruction {
 290   // See IC calls in LIR_Assembler::ic_call(): ARM v5/v6 doesn't use a
 291   // single bl for IC calls.
 292 
 293  public:
 294 
 295   address return_address() const {
 296     if (is_bl()) {
 297       return addr_at(instruction_size);
 298     } else {
 299       assert(is_fat_call(), "must be");
 300       int offset = encoding() & 0xff;
 301       return addr_at(offset + 8);
 302     }
 303   }
 304 
 305   address destination(int adj = 0) const {
 306     if (is_bl()) {
 307       return rawNativeBranch_at(instruction_address())->destination(adj);
 308     } else {
 309       assert(is_add_lr(), "must be"); // fat_call
 310       RawNativeJump *next = rawNativeJump_at(next_raw_instruction_address());
 311       return next->jump_destination(adj);
 312     }
 313   }
 314 
 315   void set_destination(address dest) {
 316     if (is_bl()) {
 317       return rawNativeBranch_at(instruction_address())->set_destination(dest);
 318     } else {
 319       assert(is_add_lr(), "must be"); // fat_call
 320       RawNativeJump *next = rawNativeJump_at(next_raw_instruction_address());
 321       return next->set_jump_destination(dest);
 322     }
 323   }
 324 
 325   void set_destination_mt_safe(address dest) {
 326     assert(CodeCache::contains(dest), "external destination might be too far");
 327     set_destination(dest);
 328   }
 329 
 330   void verify() {
 331     assert(RawNativeInstruction::is_call() || (!VM_Version::supports_movw() && RawNativeInstruction::is_jump()), "must be");
 332   }
 333 
 334   void verify_alignment() {
 335     // Nothing to do on ARM
 336   }
 337 
 338   static bool is_call_before(address return_address);
 339 };
 340 
 341 inline RawNativeCall* rawNativeCall_at(address address) {
 342   assert(rawNativeInstruction_at(address)->is_call(), "must be");
 343   return (RawNativeCall*)address;
 344 }
 345 
 346 NativeCall* rawNativeCall_before(address return_address);
 347 
 348 // -------------------------------------------------------------------
 349 // NativeMovRegMem need not be extended with indirection support.
 350 // (field access patching is handled differently in that case)
 351 class NativeMovRegMem: public NativeInstruction {
 352  public:
 353 
 354   int offset() const;
 355   void set_offset(int x);
 356 
 357   void add_offset_in_bytes(int add_offset) {
 358     set_offset(offset() + add_offset);
 359   }
 360 
 361 };
 362 
 363 inline NativeMovRegMem* nativeMovRegMem_at(address address) {
 364   NativeMovRegMem* instr = (NativeMovRegMem*)address;
 365   assert(instr->kind() == NativeInstruction::instr_ldr_str   ||
 366          instr->kind() == NativeInstruction::instr_ldrh_strh ||
 367          instr->kind() == NativeInstruction::instr_fld_fst, "must be");
 368   return instr;
 369 }
 370 
 371 // -------------------------------------------------------------------
 372 // NativeMovConstReg is primarily for loading oops and metadata
 373 class NativeMovConstReg: public NativeInstruction {
 374  public:
 375 
 376   intptr_t data() const;
 377   void set_data(intptr_t x, address pc = 0);
 378   bool is_pc_relative() {
 379     return !is_movw();
 380   }
 381   void set_pc_relative_offset(address addr, address pc);
 382   address next_instruction_address() const {
 383     // NOTE: CompiledStaticCall::set_to_interpreted() calls this but
 384     // are restricted to single-instruction ldr. No need to jump over
 385     // several instructions.
 386     assert(is_ldr_literal(), "Should only use single-instructions load");
 387     return next_raw_instruction_address();
 388   }
 389 };
 390 
 391 inline NativeMovConstReg* nativeMovConstReg_at(address address) {
 392   NativeInstruction* ni = nativeInstruction_at(address);
 393   assert(ni->is_ldr_literal() || ni->is_pc_rel() ||
 394          ni->is_movw() && VM_Version::supports_movw(), "must be");
 395   return (NativeMovConstReg*)address;
 396 }
 397 
 398 // -------------------------------------------------------------------
 399 // Front end classes, hiding experimental back-end extensions.
 400 
 401 // Extension to support indirections
 402 class NativeJump: public RawNativeJump {
 403  public:
 404 };
 405 
 406 inline NativeJump* nativeJump_at(address address) {
 407   assert(nativeInstruction_at(address)->is_jump(), "must be");
 408   return (NativeJump*)address;
 409 }
 410 
 411 class NativeCall: public RawNativeCall {
 412 public:
 413   // NativeCall::next_instruction_address() is used only to define the
 414   // range where to look for the relocation information. We need not
 415   // walk over composed instructions (as long as the relocation information
 416   // is associated to the first instruction).
 417   address next_instruction_address() const {
 418     return next_raw_instruction_address();
 419   }
 420 
 421 };
 422 
 423 inline NativeCall* nativeCall_at(address address) {
 424   assert(nativeInstruction_at(address)->is_call() ||
 425          (!VM_Version::supports_movw() && nativeInstruction_at(address)->is_jump()), "must be");
 426   return (NativeCall*)address;
 427 }
 428 
 429 inline NativeCall* nativeCall_before(address return_address) {
 430   return (NativeCall *) rawNativeCall_before(return_address);
 431 }
 432 
 433 #endif // CPU_ARM_VM_NATIVEINST_ARM_32_HPP