1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_ASM_ASSEMBLER_HPP
  26 #define SHARE_VM_ASM_ASSEMBLER_HPP
  27 
  28 #include "asm/codeBuffer.hpp"
  29 #include "code/oopRecorder.hpp"
  30 #include "code/relocInfo.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "utilities/debug.hpp"
  33 #include "utilities/growableArray.hpp"
  34 #include "utilities/top.hpp"
  35 
  36 #ifdef TARGET_ARCH_x86
  37 # include "register_x86.hpp"
  38 # include "vm_version_x86.hpp"
  39 #endif
  40 #ifdef TARGET_ARCH_sparc
  41 # include "register_sparc.hpp"
  42 # include "vm_version_sparc.hpp"
  43 #endif
  44 #ifdef TARGET_ARCH_zero
  45 # include "register_zero.hpp"
  46 # include "vm_version_zero.hpp"
  47 #endif
  48 #ifdef TARGET_ARCH_arm
  49 # include "register_arm.hpp"
  50 # include "vm_version_arm.hpp"
  51 #endif
  52 #ifdef TARGET_ARCH_ppc
  53 # include "register_ppc.hpp"
  54 # include "vm_version_ppc.hpp"
  55 #endif
  56 #ifdef TARGET_ARCH_aarch64
  57 # include "register_aarch64.hpp"
  58 # include "vm_version_aarch64.hpp"
  59 #endif
  60 
  61 // This file contains platform-independent assembler declarations.
  62 
  63 class MacroAssembler;
  64 class AbstractAssembler;
  65 class Label;
  66 
  67 /**
  68  * Labels represent destinations for control transfer instructions.  Such
  69  * instructions can accept a Label as their target argument.  A Label is
  70  * bound to the current location in the code stream by calling the
  71  * MacroAssembler's 'bind' method, which in turn calls the Label's 'bind'
  72  * method.  A Label may be referenced by an instruction before it's bound
  73  * (i.e., 'forward referenced').  'bind' stores the current code offset
  74  * in the Label object.
  75  *
  76  * If an instruction references a bound Label, the offset field(s) within
  77  * the instruction are immediately filled in based on the Label's code
  78  * offset.  If an instruction references an unbound label, that
  79  * instruction is put on a list of instructions that must be patched
  80  * (i.e., 'resolved') when the Label is bound.
  81  *
  82  * 'bind' will call the platform-specific 'patch_instruction' method to
  83  * fill in the offset field(s) for each unresolved instruction (if there
  84  * are any).  'patch_instruction' lives in one of the
  85  * cpu/<arch>/vm/assembler_<arch>* files.
  86  *
  87  * Instead of using a linked list of unresolved instructions, a Label has
  88  * an array of unresolved instruction code offsets.  _patch_index
  89  * contains the total number of forward references.  If the Label's array
  90  * overflows (i.e., _patch_index grows larger than the array size), a
  91  * GrowableArray is allocated to hold the remaining offsets.  (The cache
  92  * size is 4 for now, which handles over 99.5% of the cases)
  93  *
  94  * Labels may only be used within a single CodeSection.  If you need
  95  * to create references between code sections, use explicit relocations.
  96  */
  97 class Label VALUE_OBJ_CLASS_SPEC {
  98  private:
  99   enum { PatchCacheSize = 4 };
 100 
 101   // _loc encodes both the binding state (via its sign)
 102   // and the binding locator (via its value) of a label.
 103   //
 104   // _loc >= 0   bound label, loc() encodes the target (jump) position
 105   // _loc == -1  unbound label
 106   int _loc;
 107 
 108   // References to instructions that jump to this unresolved label.
 109   // These instructions need to be patched when the label is bound
 110   // using the platform-specific patchInstruction() method.
 111   //
 112   // To avoid having to allocate from the C-heap each time, we provide
 113   // a local cache and use the overflow only if we exceed the local cache
 114   int _patches[PatchCacheSize];
 115   int _patch_index;
 116   GrowableArray<int>* _patch_overflow;
 117 
 118   Label(const Label&) { ShouldNotReachHere(); }
 119 
 120  public:
 121 
 122   /**
 123    * After binding, be sure 'patch_instructions' is called later to link
 124    */
 125   void bind_loc(int loc) {
 126     assert(loc >= 0, "illegal locator");
 127     assert(_loc == -1, "already bound");
 128     _loc = loc;
 129   }
 130   void bind_loc(int pos, int sect) { bind_loc(CodeBuffer::locator(pos, sect)); }
 131 
 132 #ifndef PRODUCT
 133   // Iterates over all unresolved instructions for printing
 134   void print_instructions(MacroAssembler* masm) const;
 135 #endif // PRODUCT
 136 
 137   /**
 138    * Returns the position of the the Label in the code buffer
 139    * The position is a 'locator', which encodes both offset and section.
 140    */
 141   int loc() const {
 142     assert(_loc >= 0, "unbound label");
 143     return _loc;
 144   }
 145   int loc_pos()  const { return CodeBuffer::locator_pos(loc()); }
 146   int loc_sect() const { return CodeBuffer::locator_sect(loc()); }
 147 
 148   bool is_bound() const    { return _loc >=  0; }
 149   bool is_unbound() const  { return _loc == -1 && _patch_index > 0; }
 150   bool is_unused() const   { return _loc == -1 && _patch_index == 0; }
 151 
 152   /**
 153    * Adds a reference to an unresolved displacement instruction to
 154    * this unbound label
 155    *
 156    * @param cb         the code buffer being patched
 157    * @param branch_loc the locator of the branch instruction in the code buffer
 158    */
 159   void add_patch_at(CodeBuffer* cb, int branch_loc);
 160 
 161   /**
 162    * Iterate over the list of patches, resolving the instructions
 163    * Call patch_instruction on each 'branch_loc' value
 164    */
 165   void patch_instructions(MacroAssembler* masm);
 166 
 167   void init() {
 168     _loc = -1;
 169     _patch_index = 0;
 170     _patch_overflow = NULL;
 171   }
 172 
 173   Label() {
 174     init();
 175   }
 176 
 177   ~Label() {
 178     assert(is_bound() || is_unused(), "Label was never bound to a location, but it was used as a jmp target");
 179   }
 180 
 181   void reset() {
 182     init(); //leave _patch_overflow because it points to CodeBuffer.
 183   }
 184 };
 185 
 186 // A union type for code which has to assemble both constant and
 187 // non-constant operands, when the distinction cannot be made
 188 // statically.
 189 class RegisterOrConstant VALUE_OBJ_CLASS_SPEC {
 190  private:
 191   Register _r;
 192   intptr_t _c;
 193 
 194  public:
 195   RegisterOrConstant(): _r(noreg), _c(0) {}
 196   RegisterOrConstant(Register r): _r(r), _c(0) {}
 197   RegisterOrConstant(intptr_t c): _r(noreg), _c(c) {}
 198 
 199   Register as_register() const { assert(is_register(),""); return _r; }
 200   intptr_t as_constant() const { assert(is_constant(),""); return _c; }
 201 
 202   Register register_or_noreg() const { return _r; }
 203   intptr_t constant_or_zero() const  { return _c; }
 204 
 205   bool is_register() const { return _r != noreg; }
 206   bool is_constant() const { return _r == noreg; }
 207 };
 208 
 209 // The Abstract Assembler: Pure assembler doing NO optimizations on the
 210 // instruction level; i.e., what you write is what you get.
 211 // The Assembler is generating code into a CodeBuffer.
 212 class AbstractAssembler : public ResourceObj  {
 213   friend class Label;
 214 
 215  protected:
 216   CodeSection* _code_section;          // section within the code buffer
 217   OopRecorder* _oop_recorder;          // support for relocInfo::oop_type
 218 
 219  public:
 220   // Code emission & accessing
 221   address addr_at(int pos) const { return code_section()->start() + pos; }
 222 
 223  protected:
 224   // This routine is called with a label is used for an address.
 225   // Labels and displacements truck in offsets, but target must return a PC.
 226   address target(Label& L)             { return code_section()->target(L, pc()); }
 227 
 228   bool is8bit(int x) const             { return -0x80 <= x && x < 0x80; }
 229   bool isByte(int x) const             { return 0 <= x && x < 0x100; }
 230   bool isShiftCount(int x) const       { return 0 <= x && x < 32; }
 231 
 232   // Instruction boundaries (required when emitting relocatable values).
 233   class InstructionMark: public StackObj {
 234    private:
 235     AbstractAssembler* _assm;
 236 
 237    public:
 238     InstructionMark(AbstractAssembler* assm) : _assm(assm) {
 239       assert(assm->inst_mark() == NULL, "overlapping instructions");
 240       _assm->set_inst_mark();
 241     }
 242     ~InstructionMark() {
 243       _assm->clear_inst_mark();
 244     }
 245   };
 246   friend class InstructionMark;
 247 #ifdef ASSERT
 248   // Make it return true on platforms which need to verify
 249   // instruction boundaries for some operations.
 250   static bool pd_check_instruction_mark();
 251 
 252   // Add delta to short branch distance to verify that it still fit into imm8.
 253   int _short_branch_delta;
 254 
 255   int  short_branch_delta() const { return _short_branch_delta; }
 256   void set_short_branch_delta()   { _short_branch_delta = 32; }
 257   void clear_short_branch_delta() { _short_branch_delta = 0; }
 258 
 259   class ShortBranchVerifier: public StackObj {
 260    private:
 261     AbstractAssembler* _assm;
 262 
 263    public:
 264     ShortBranchVerifier(AbstractAssembler* assm) : _assm(assm) {
 265       assert(assm->short_branch_delta() == 0, "overlapping instructions");
 266       _assm->set_short_branch_delta();
 267     }
 268     ~ShortBranchVerifier() {
 269       _assm->clear_short_branch_delta();
 270     }
 271   };
 272 #else
 273   // Dummy in product.
 274   class ShortBranchVerifier: public StackObj {
 275    public:
 276     ShortBranchVerifier(AbstractAssembler* assm) {}
 277   };
 278 #endif
 279 
 280  public:
 281 
 282   // Creation
 283   AbstractAssembler(CodeBuffer* code);
 284 
 285   // ensure buf contains all code (call this before using/copying the code)
 286   void flush();
 287 
 288   void emit_int8(   int8_t  x) { code_section()->emit_int8(   x); }
 289   void emit_int16(  int16_t x) { code_section()->emit_int16(  x); }
 290   void emit_int32(  int32_t x) { code_section()->emit_int32(  x); }
 291   void emit_int64(  int64_t x) { code_section()->emit_int64(  x); }
 292 
 293   void emit_float(  jfloat  x) { code_section()->emit_float(  x); }
 294   void emit_double( jdouble x) { code_section()->emit_double( x); }
 295   void emit_address(address x) { code_section()->emit_address(x); }
 296 
 297   // min and max values for signed immediate ranges
 298   static int min_simm(int nbits) { return -(intptr_t(1) << (nbits - 1))    ; }
 299   static int max_simm(int nbits) { return  (intptr_t(1) << (nbits - 1)) - 1; }
 300 
 301   // Define some:
 302   static int min_simm10() { return min_simm(10); }
 303   static int min_simm13() { return min_simm(13); }
 304   static int min_simm16() { return min_simm(16); }
 305 
 306   // Test if x is within signed immediate range for nbits
 307   static bool is_simm(intptr_t x, int nbits) { return min_simm(nbits) <= x && x <= max_simm(nbits); }
 308 
 309   // Define some:
 310   static bool is_simm5( intptr_t x) { return is_simm(x, 5 ); }
 311   static bool is_simm8( intptr_t x) { return is_simm(x, 8 ); }
 312   static bool is_simm10(intptr_t x) { return is_simm(x, 10); }
 313   static bool is_simm11(intptr_t x) { return is_simm(x, 11); }
 314   static bool is_simm12(intptr_t x) { return is_simm(x, 12); }
 315   static bool is_simm13(intptr_t x) { return is_simm(x, 13); }
 316   static bool is_simm16(intptr_t x) { return is_simm(x, 16); }
 317   static bool is_simm26(intptr_t x) { return is_simm(x, 26); }
 318   static bool is_simm32(intptr_t x) { return is_simm(x, 32); }
 319 
 320   // Accessors
 321   CodeSection*  code_section() const   { return _code_section; }
 322   CodeBuffer*   code()         const   { return code_section()->outer(); }
 323   int           sect()         const   { return code_section()->index(); }
 324   address       pc()           const   { return code_section()->end();   }
 325   int           offset()       const   { return code_section()->size();  }
 326   int           locator()      const   { return CodeBuffer::locator(offset(), sect()); }
 327 
 328   OopRecorder*  oop_recorder() const   { return _oop_recorder; }
 329   void      set_oop_recorder(OopRecorder* r) { _oop_recorder = r; }
 330 
 331   address       inst_mark() const { return code_section()->mark();       }
 332   void      set_inst_mark()       {        code_section()->set_mark();   }
 333   void    clear_inst_mark()       {        code_section()->clear_mark(); }
 334 
 335   // Constants in code
 336   void relocate(RelocationHolder const& rspec, int format = 0) {
 337     assert(!pd_check_instruction_mark()
 338         || inst_mark() == NULL || inst_mark() == code_section()->end(),
 339         "call relocate() between instructions");
 340     code_section()->relocate(code_section()->end(), rspec, format);
 341   }
 342   void relocate(   relocInfo::relocType rtype, int format = 0) {
 343     code_section()->relocate(code_section()->end(), rtype, format);
 344   }
 345 
 346   static int code_fill_byte();         // used to pad out odd-sized code buffers
 347 
 348   // Associate a comment with the current offset.  It will be printed
 349   // along with the disassembly when printing nmethods.  Currently
 350   // only supported in the instruction section of the code buffer.
 351   void block_comment(const char* comment);
 352   // Copy str to a buffer that has the same lifetime as the CodeBuffer
 353   const char* code_string(const char* str);
 354 
 355   // Label functions
 356   void bind(Label& L); // binds an unbound label L to the current code position
 357 
 358   // Move to a different section in the same code buffer.
 359   void set_code_section(CodeSection* cs);
 360 
 361   // Inform assembler when generating stub code and relocation info
 362   address    start_a_stub(int required_space);
 363   void       end_a_stub();
 364   // Ditto for constants.
 365   address    start_a_const(int required_space, int required_align = sizeof(double));
 366   void       end_a_const(CodeSection* cs);  // Pass the codesection to continue in (insts or stubs?).
 367 
 368   // constants support
 369   //
 370   // We must remember the code section (insts or stubs) in c1
 371   // so we can reset to the proper section in end_a_const().
 372   address long_constant(jlong c) {
 373     CodeSection* c1 = _code_section;
 374     address ptr = start_a_const(sizeof(c), sizeof(c));
 375     if (ptr != NULL) {
 376       emit_int64(c);
 377       end_a_const(c1);
 378     }
 379     return ptr;
 380   }
 381   address double_constant(jdouble c) {
 382     CodeSection* c1 = _code_section;
 383     address ptr = start_a_const(sizeof(c), sizeof(c));
 384     if (ptr != NULL) {
 385       emit_double(c);
 386       end_a_const(c1);
 387     }
 388     return ptr;
 389   }
 390   address float_constant(jfloat c) {
 391     CodeSection* c1 = _code_section;
 392     address ptr = start_a_const(sizeof(c), sizeof(c));
 393     if (ptr != NULL) {
 394       emit_float(c);
 395       end_a_const(c1);
 396     }
 397     return ptr;
 398   }
 399   address address_constant(address c) {
 400     CodeSection* c1 = _code_section;
 401     address ptr = start_a_const(sizeof(c), sizeof(c));
 402     if (ptr != NULL) {
 403       emit_address(c);
 404       end_a_const(c1);
 405     }
 406     return ptr;
 407   }
 408   address address_constant(address c, RelocationHolder const& rspec) {
 409     CodeSection* c1 = _code_section;
 410     address ptr = start_a_const(sizeof(c), sizeof(c));
 411     if (ptr != NULL) {
 412       relocate(rspec);
 413       emit_address(c);
 414       end_a_const(c1);
 415     }
 416     return ptr;
 417   }
 418 
 419   // Bootstrapping aid to cope with delayed determination of constants.
 420   // Returns a static address which will eventually contain the constant.
 421   // The value zero (NULL) stands instead of a constant which is still uncomputed.
 422   // Thus, the eventual value of the constant must not be zero.
 423   // This is fine, since this is designed for embedding object field
 424   // offsets in code which must be generated before the object class is loaded.
 425   // Field offsets are never zero, since an object's header (mark word)
 426   // is located at offset zero.
 427   RegisterOrConstant delayed_value(int(*value_fn)(), Register tmp, int offset = 0);
 428   RegisterOrConstant delayed_value(address(*value_fn)(), Register tmp, int offset = 0);
 429   virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset) = 0;
 430   // Last overloading is platform-dependent; look in assembler_<arch>.cpp.
 431   static intptr_t* delayed_value_addr(int(*constant_fn)());
 432   static intptr_t* delayed_value_addr(address(*constant_fn)());
 433   static void update_delayed_values();
 434 
 435   // Bang stack to trigger StackOverflowError at a safe location
 436   // implementation delegates to machine-specific bang_stack_with_offset
 437   void generate_stack_overflow_check( int frame_size_in_bytes );
 438   virtual void bang_stack_with_offset(int offset) = 0;
 439 
 440 
 441   /**
 442    * A platform-dependent method to patch a jump instruction that refers
 443    * to this label.
 444    *
 445    * @param branch the location of the instruction to patch
 446    * @param masm the assembler which generated the branch
 447    */
 448   void pd_patch_instruction(address branch, address target);
 449 
 450 };
 451 
 452 #ifdef TARGET_ARCH_x86
 453 # include "assembler_x86.hpp"
 454 #endif
 455 #ifdef TARGET_ARCH_aarch64
 456 # include "assembler_aarch64.hpp"
 457 #endif
 458 #ifdef TARGET_ARCH_sparc
 459 # include "assembler_sparc.hpp"
 460 #endif
 461 #ifdef TARGET_ARCH_zero
 462 # include "assembler_zero.hpp"
 463 #endif
 464 #ifdef TARGET_ARCH_arm
 465 # include "assembler_arm.hpp"
 466 #endif
 467 #ifdef TARGET_ARCH_ppc
 468 # include "assembler_ppc.hpp"
 469 #endif
 470 
 471 
 472 #endif // SHARE_VM_ASM_ASSEMBLER_HPP