1 /*
   2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2015, Linaro Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #ifndef CPU_AARCH32_VM_NATIVEINST_AARCH32_HPP
  28 #define CPU_AARCH32_VM_NATIVEINST_AARCH32_HPP
  29 
  30 #include "asm/assembler.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "runtime/icache.hpp"
  33 #include "runtime/os.hpp"
  34 #include "utilities/top.hpp"
  35 
  36 // We have interfaces for the following instructions:
  37 // - NativeInstruction
  38 // - - NativeCall
  39 // - - NativeMovConstReg
  40 // - - NativeMovRegMem
  41 // - - NativeMovRegMemPatching
  42 // - - NativeJump
  43 // - - NativeIllegalOpCode
  44 // - - NativeGeneralJump
  45 // - - NativeReturn
  46 // - - NativeReturnX (return with argument)
  47 // - - NativePushConst
  48 // - - NativeTstRegMem
  49 
  50 // The base class for different kinds of native instruction abstractions.
  51 // Provides the primitive operations to manipulate code relative to this.
  52 
  53 class NativeInstruction VALUE_OBJ_CLASS_SPEC {
  54   friend class Relocation;
  55   friend bool is_NativeCallTrampolineStub_at(address);
  56  public:
  57   enum { arm_insn_sz = 4 };
  58 
  59   inline bool is_nop();
  60   inline bool is_illegal();
  61   inline bool is_return();
  62   inline bool is_jump_or_nop();
  63   inline bool is_cond_jump();
  64   bool is_safepoint_poll();
  65   bool is_movt();
  66   bool is_orr();
  67   bool is_sigill_zombie_not_entrant();
  68 
  69   bool is_movt(Register dst, unsigned imm, Assembler::Condition cond = Assembler::C_DFLT);
  70   bool is_movw(Register dst, unsigned imm, Assembler::Condition cond = Assembler::C_DFLT);
  71   bool is_ldr(Register dst, Address addr, Assembler::Condition cond = Assembler::C_DFLT);
  72 
  73   inline bool is_jump() const;
  74   inline bool is_call() const;
  75 
  76   inline bool is_mov_const_reg() const;
  77   inline bool is_reg_call() const;
  78   inline bool is_imm_call() const;
  79   inline bool is_reg_jump() const;
  80   inline bool is_imm_jump() const;
  81 
  82  protected:
  83   address addr() const { return address(this); }
  84   // TODO remove this, every command is 4byte long
  85 #if 1
  86   address addr_at(int offset) const    { return addr() + offset; }
  87 
  88   s_char sbyte_at(int offset) const    { return *(s_char*) addr_at(offset); }
  89   u_char ubyte_at(int offset) const    { return *(u_char*) addr_at(offset); }
  90 
  91   jint int_at(int offset) const        { return *(jint*) addr_at(offset); }
  92   juint uint_at(int offset) const      { return *(juint*) addr_at(offset); }
  93 
  94   address ptr_at(int offset) const     { return *(address*) addr_at(offset); }
  95 
  96   oop  oop_at (int offset) const       { return *(oop*) addr_at(offset); }
  97 
  98 
  99   void set_char_at(int offset, char c)        { *addr_at(offset) = (u_char)c; }
 100   void set_int_at(int offset, jint  i)        { *(jint*)addr_at(offset) = i; }
 101   void set_uint_at(int offset, jint  i)       { *(juint*)addr_at(offset) = i; }
 102   void set_ptr_at (int offset, address  ptr)  { *(address*) addr_at(offset) = ptr; }
 103   void set_oop_at (int offset, oop  o)        { *(oop*) addr_at(offset) = o; }
 104 #endif
 105 
 106   static juint as_uint(address addr) {
 107     return *(juint *) addr;
 108   }
 109 
 110   juint as_uint() const {
 111     return as_uint(addr());
 112   }
 113 
 114   void set_uint(juint v) {
 115     *(juint *) addr() = v;
 116   }
 117 
 118   void atomic_set_ulong_at(int offset, julong v) {
 119     address a = addr() + offset;
 120     assert(((uintptr_t) a) % 8 == 0, "should be aligned");
 121     Atomic::store(v, (volatile jlong *) a);
 122   }
 123 
 124  public:
 125 
 126   // unit test stuff
 127   static void test() {}                 // override for testing
 128 
 129   static bool is_at(address address);
 130   static NativeInstruction* from(address address);
 131 
 132 };
 133 
 134 inline NativeInstruction* nativeInstruction_at(address addr) {
 135   return NativeInstruction::from(addr);
 136 }
 137 
 138 inline NativeInstruction* nativeInstruction_at(uint32_t *addr) {
 139   return NativeInstruction::from(address(addr));
 140 }
 141 
 142 class NativeBranchType: public NativeInstruction {
 143  protected:
 144   static bool is_branch_type(uint32_t insn);
 145   void patch_offset_to(address addr);
 146  public:
 147   enum {
 148     instruction_size = arm_insn_sz,
 149   };
 150 
 151   address next_instruction_address() const {
 152     return addr() + arm_insn_sz;
 153   }
 154 };
 155 
 156 class NativeMovConstReg: public NativeInstruction {
 157  protected:
 158   static bool is_movw_movt_at(address instr);
 159   static bool is_ldr_literal_at(address instr);
 160  public:
 161   enum {
 162     movw_movt_pair_sz = 2 * arm_insn_sz,
 163     ldr_sz = arm_insn_sz,
 164     max_instruction_size = movw_movt_pair_sz,
 165     min_instruction_size = ldr_sz,
 166   };
 167 
 168   address next_instruction_address() const  {
 169     if (is_movw_movt_at(addr())) {
 170       return addr() + movw_movt_pair_sz;
 171     } else if (is_ldr_literal_at(addr())) {
 172       return addr() + ldr_sz;
 173     }
 174 
 175     // Unknown instruction in NativeMovConstReg
 176     ShouldNotReachHere();
 177     return NULL;
 178   }
 179 
 180   intptr_t data() const;
 181   void set_data(intptr_t x);
 182 
 183   Register destination() const;
 184   void set_destination(Register r);
 185 
 186   void flush() {
 187     ICache::invalidate_range(addr(), max_instruction_size);
 188   }
 189 
 190   void  verify();
 191   void  print();
 192 
 193   // unit test stuff
 194   static void test() {}
 195 
 196   // Creation
 197   inline friend NativeMovConstReg* nativeMovConstReg_at(address address);
 198   inline friend NativeMovConstReg* nativeMovConstReg_before(address address);
 199 
 200   static bool is_at(address instr);
 201 
 202   static NativeMovConstReg* from(address addr);
 203 };
 204 
 205 inline NativeMovConstReg* nativeMovConstReg_at(address address) {
 206   return NativeMovConstReg::from(address);
 207 }
 208 
 209 inline NativeMovConstReg* nativeMovConstReg_before(address addr) {
 210   address mov_addr = NULL;
 211   if (NativeMovConstReg::is_movw_movt_at(addr - NativeMovConstReg::movw_movt_pair_sz)) {
 212     mov_addr = addr - NativeMovConstReg::movw_movt_pair_sz;
 213   } else if (NativeMovConstReg::is_ldr_literal_at(addr - NativeMovConstReg::ldr_sz)) {
 214     mov_addr = addr - NativeMovConstReg::ldr_sz;
 215   } else {
 216     ShouldNotReachHere();
 217   }
 218 
 219   NativeMovConstReg* test = (NativeMovConstReg*) mov_addr;
 220 #ifdef ASSERT
 221   test->verify();
 222 #endif
 223   return test;
 224 }
 225 
 226 class NativeTrampolineCall: public NativeBranchType {
 227  public:
 228   enum {
 229     instruction_size = 3 * arm_insn_sz
 230   };
 231   address destination() const;
 232   void set_destination(address dest);
 233   void set_destination_mt_safe(address dest, bool assert_lock = true);
 234 
 235   static bool is_at(address address);
 236   static NativeTrampolineCall* from(address address);
 237 
 238   address next_instruction_address() const  {
 239     assert(is_at(addr()), "not call");
 240     return addr() + instruction_size;
 241   }
 242 };
 243 
 244 class NativeRegCall: public NativeBranchType {
 245  public:
 246 
 247   Register destination() const;
 248   void set_destination(Register r);
 249 
 250   static bool is_at(address address);
 251   static NativeRegCall* from(address address);
 252 };
 253 
 254 class NativeCall: public NativeInstruction {
 255   friend class Relocation;
 256  protected:
 257   NativeInstruction* is_long_jump_or_call_at(address addr);
 258 
 259   // NativeCall represents:
 260   //  NativeImmCall,
 261   //  NativeMovConstReg + NativeBranchType,
 262   //  NativeTrampolineCall
 263  public:
 264   enum {
 265     instruction_size = 3 * arm_insn_sz
 266   };
 267 #ifdef ASSERT
 268   StaticAssert<(int) NativeTrampolineCall::instruction_size <= (int) instruction_size> dummy1;
 269   StaticAssert<NativeMovConstReg::movw_movt_pair_sz
 270       + NativeRegCall::instruction_size <= (int) instruction_size> dummy2;
 271 #endif
 272 
 273   address destination() const;
 274   void set_destination(address dest);
 275 
 276   void  verify_alignment()                       { ; }
 277   void  verify();
 278   void  print();
 279 
 280   address instruction_address() const       { return addr_at(0); }
 281   address next_instruction_address() const;
 282   address return_address() const;
 283 
 284   // MT-safe patching of a call instruction.
 285   static void insert(address code_pos, address entry);
 286 
 287   // Similar to replace_mt_safe, but just changes the destination.  The
 288   // important thing is that free-running threads are able to execute
 289   // this call instruction at all times.  If the call is an immediate BL
 290   // instruction we can simply rely on atomicity of 32-bit writes to
 291   // make sure other threads will see no intermediate states.
 292 
 293   // We cannot rely on locks here, since the free-running threads must run at
 294   // full speed.
 295   //
 296   // Used in the runtime linkage of calls; see class CompiledIC.
 297   // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.)
 298 
 299   // The parameter assert_lock disables the assertion during code generation.
 300   void set_destination_mt_safe(address dest, bool assert_lock = true);
 301 
 302   static bool is_at(address instr);
 303   static NativeCall* from(address instr);
 304 
 305   static bool is_call_before(address return_address);
 306 };
 307 
 308 inline NativeCall* nativeCall_at(address address) {
 309   return NativeCall::from(address);
 310 }
 311 
 312 inline NativeCall* nativeCall_before(address return_address) {
 313   address call_addr = NULL;
 314   if (NativeCall::is_at(return_address - NativeBranchType::instruction_size)) {
 315     call_addr = return_address - NativeBranchType::instruction_size;
 316   } else if (NativeCall::is_at(return_address - NativeCall::instruction_size)) {
 317     call_addr = return_address - NativeCall::instruction_size;
 318   } else {
 319     ShouldNotReachHere();
 320   }
 321 
 322   return NativeCall::from(call_addr);
 323 }
 324 
 325 
 326 // An interface for accessing/manipulating native moves of the form:
 327 //      mov[b/w/l/q] [reg + offset], reg   (instruction_code_reg2mem)
 328 //      mov[b/w/l/q] reg, [reg+offset]     (instruction_code_mem2reg
 329 //      mov[s/z]x[w/b/q] [reg + offset], reg
 330 //      fld_s  [reg+offset]
 331 //      fld_d  [reg+offset]
 332 //      fstp_s [reg + offset]
 333 //      fstp_d [reg + offset]
 334 //      mov_literal64  scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch)
 335 //
 336 // Warning: These routines must be able to handle any instruction sequences
 337 // that are generated as a result of the load/store byte,word,long
 338 // macros.  For example: The load_unsigned_byte instruction generates
 339 // an xor reg,reg inst prior to generating the movb instruction.  This
 340 // class must skip the xor instruction.
 341 
 342 
 343 // TODO Review
 344 class NativeMovRegMem: public NativeInstruction {
 345  public:
 346   enum {
 347     instruction_size = 2 * arm_insn_sz, // TODO check this
 348   };
 349   // helper
 350   int instruction_start() const;
 351 
 352   address instruction_address() const;
 353 
 354   address next_instruction_address() const;
 355 
 356   int   offset() const;
 357 
 358   void  set_offset(int x);
 359 
 360   void  add_offset_in_bytes(int add_offset)     { set_offset ( ( offset() + add_offset ) ); }
 361 
 362   void verify();
 363   void print ();
 364 
 365   // unit test stuff
 366   static void test() {}
 367 
 368  private:
 369   inline friend NativeMovRegMem* nativeMovRegMem_at (address address);
 370 };
 371 
 372 inline NativeMovRegMem* nativeMovRegMem_at (address address) {
 373   NativeMovRegMem* test = (NativeMovRegMem*) address;
 374 #ifdef ASSERT
 375   test->verify();
 376 #endif
 377   return test;
 378 }
 379 
 380 class NativeMovRegMemPatching: public NativeMovRegMem {
 381  private:
 382   friend NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) {Unimplemented(); return 0;  }
 383 };
 384 
 385 class NativeJump: public NativeInstruction {
 386  public:
 387   enum {
 388     instruction_size = NativeMovConstReg::movw_movt_pair_sz + NativeBranchType::instruction_size,
 389   };
 390   address instruction_address() const {
 391     return addr();
 392   }
 393 
 394   address next_instruction_address() const;
 395 
 396   address jump_destination() const;
 397   void set_jump_destination(address dest);
 398 
 399   // Creation
 400   inline friend NativeJump* nativeJump_at(address address);
 401 
 402   void verify();
 403 
 404   // Unit testing stuff
 405   static void test() {}
 406 
 407   // Insertion of native jump instruction
 408   static void insert(address code_pos, address entry);
 409   // MT-safe insertion of native jump at verified method entry
 410   static void check_verified_entry_alignment(address entry, address verified_entry);
 411   static void patch_verified_entry(address entry, address verified_entry, address dest);
 412 
 413   static bool is_at(address instr);
 414   static NativeJump* from(address instr);
 415 };
 416 
 417 inline NativeJump* nativeJump_at(address addr) {
 418   return NativeJump::from(addr);
 419 }
 420 
 421 // TODO We don't really need NativeGeneralJump, NativeJump should be able to do
 422 // everything that General Jump would.  Make this only interface to NativeJump
 423 // from share code (c1_Runtime)
 424 class NativeGeneralJump: public NativeJump {
 425 public:
 426   enum {
 427     instruction_size = arm_insn_sz,
 428   };
 429 
 430   static void insert_unconditional(address code_pos, address entry);
 431   static void replace_mt_safe(address instr_addr, address code_buffer);
 432   static void verify();
 433 };
 434 
 435 inline NativeGeneralJump* nativeGeneralJump_at(address address) {
 436   NativeGeneralJump* jump = (NativeGeneralJump*)(address);
 437   debug_only(jump->verify();)
 438   return jump;
 439 }
 440 
 441 class NativePopReg : public NativeInstruction {
 442  public:
 443   // Insert a pop instruction
 444   static void insert(address code_pos, Register reg);
 445 };
 446 
 447 
 448 class NativeIllegalInstruction: public NativeInstruction {
 449  public:
 450   // Insert illegal opcode as specific address
 451   static void insert(address code_pos);
 452 };
 453 
 454 // return instruction that does not pop values of the stack
 455 class NativeReturn: public NativeInstruction {
 456  public:
 457 };
 458 
 459 // return instruction that does pop values of the stack
 460 class NativeReturnX: public NativeInstruction {
 461  public:
 462 };
 463 
 464 // Simple test vs memory
 465 class NativeTstRegMem: public NativeInstruction {
 466  public:
 467 };
 468 
 469 inline bool NativeInstruction::is_nop()         {
 470   return (as_uint() & 0x0fffffff) == 0x0320f000;
 471 }
 472 
 473 inline bool NativeInstruction::is_jump_or_nop() {
 474   return is_nop() || is_jump();
 475 }
 476 
 477 class NativeImmCall: public NativeBranchType {
 478  public:
 479   address destination() const;
 480   void set_destination(address dest);
 481 
 482   static bool is_at(address address);
 483   static NativeImmCall* from(address address);
 484 };
 485 
 486 class NativeImmJump: public NativeBranchType {
 487  public:
 488 
 489   address destination() const;
 490   void set_destination(address r);
 491 
 492   static bool is_at(address address);
 493   static NativeImmJump* from(address address);
 494 };
 495 
 496 class NativeRegJump: public NativeBranchType {
 497  public:
 498 
 499   Register destination() const;
 500   void set_destination(Register r);
 501 
 502   static bool is_at(address address);
 503   static NativeRegJump* from(address address);
 504 };
 505 
 506 inline bool NativeInstruction::is_call() const          { return NativeCall::is_at(addr()); }
 507 inline bool NativeInstruction::is_jump() const          { return NativeJump::is_at(addr()); }
 508 inline bool NativeInstruction::is_mov_const_reg() const { return NativeMovConstReg::is_at(addr()); }
 509 inline bool NativeInstruction::is_imm_call() const      { return NativeImmCall::is_at(addr()); }
 510 inline bool NativeInstruction::is_reg_call() const      { return NativeRegCall::is_at(addr()); }
 511 inline bool NativeInstruction::is_imm_jump() const      { return NativeImmJump::is_at(addr()); }
 512 inline bool NativeInstruction::is_reg_jump() const      { return NativeRegJump::is_at(addr()); }
 513 
 514 #endif // CPU_AARCH32_VM_NATIVEINST_AARCH32_HPP