1 /*
   2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_VM_NATIVEINST_X86_HPP
  26 #define CPU_X86_VM_NATIVEINST_X86_HPP
  27 
  28 #include "asm/assembler.hpp"
  29 #include "memory/allocation.hpp"
  30 #include "runtime/icache.hpp"
  31 #include "runtime/os.hpp"
  32 #include "runtime/safepointMechanism.hpp"
  33 
  34 // We have interfaces for the following instructions:
  35 // - NativeInstruction
  36 // - - NativeCall
  37 // - - NativeMovConstReg
  38 // - - NativeMovConstRegPatching
  39 // - - NativeMovRegMem
  40 // - - NativeMovRegMemPatching
  41 // - - NativeJump
  42 // - - NativeFarJump
  43 // - - NativeIllegalOpCode
  44 // - - NativeGeneralJump
  45 // - - NativeReturn
  46 // - - NativeReturnX (return with argument)
  47 // - - NativePushConst
  48 // - - NativeTstRegMem
  49 
  50 // The base class for different kinds of native instruction abstractions.
  51 // Provides the primitive operations to manipulate code relative to this.
  52 
  53 class NativeInstruction VALUE_OBJ_CLASS_SPEC {
  54   friend class Relocation;
  55 
  56  public:
  57   enum Intel_specific_constants {
  58     nop_instruction_code        = 0x90,
  59     nop_instruction_size        =    1
  60   };
  61 
  62   bool is_nop()                        { return ubyte_at(0) == nop_instruction_code; }
  63   inline bool is_call();
  64   inline bool is_call_reg();
  65   inline bool is_illegal();
  66   inline bool is_return();
  67   inline bool is_jump();
  68   inline bool is_jump_reg();
  69   inline bool is_far_jump();
  70   inline bool is_cond_jump();
  71   inline bool is_safepoint_poll();
  72   inline bool is_mov_literal64();
  73 
  74  protected:
  75   address addr_at(int offset) const    { return address(this) + offset; }
  76 
  77   s_char sbyte_at(int offset) const    { return *(s_char*) addr_at(offset); }
  78   u_char ubyte_at(int offset) const    { return *(u_char*) addr_at(offset); }
  79 
  80   jint int_at(int offset) const         { return *(jint*) addr_at(offset); }
  81 
  82   intptr_t ptr_at(int offset) const    { return *(intptr_t*) addr_at(offset); }
  83 
  84   oop  oop_at (int offset) const       { return *(oop*) addr_at(offset); }
  85 
  86 
  87   void set_char_at(int offset, char c)        { *addr_at(offset) = (u_char)c; wrote(offset); }
  88   void set_int_at(int offset, jint  i)        { *(jint*)addr_at(offset) = i;  wrote(offset); }
  89   void set_ptr_at (int offset, intptr_t  ptr) { *(intptr_t*) addr_at(offset) = ptr;  wrote(offset); }
  90   void set_oop_at (int offset, oop  o)        { *(oop*) addr_at(offset) = o;  wrote(offset); }
  91 
  92   // This doesn't really do anything on Intel, but it is the place where
  93   // cache invalidation belongs, generically:
  94   void wrote(int offset);
  95 
  96  public:
  97 
  98   // unit test stuff
  99   static void test() {}                 // override for testing
 100 
 101   inline friend NativeInstruction* nativeInstruction_at(address address);
 102 };
 103 
 104 inline NativeInstruction* nativeInstruction_at(address address) {
 105   NativeInstruction* inst = (NativeInstruction*)address;
 106 #ifdef ASSERT
 107   //inst->verify();
 108 #endif
 109   return inst;
 110 }
 111 
 112 class NativePltCall: public NativeInstruction {
 113 public:
 114   enum Intel_specific_constants {
 115     instruction_code           = 0xE8,
 116     instruction_size           =    5,
 117     instruction_offset         =    0,
 118     displacement_offset        =    1,
 119     return_address_offset      =    5
 120   };
 121   address instruction_address() const { return addr_at(instruction_offset); }
 122   address next_instruction_address() const { return addr_at(return_address_offset); }
 123   address displacement_address() const { return addr_at(displacement_offset); }
 124   int displacement() const { return (jint) int_at(displacement_offset); }
 125   address return_address() const { return addr_at(return_address_offset); }
 126   address destination() const;
 127   address plt_entry() const;
 128   address plt_jump() const;
 129   address plt_load_got() const;
 130   address plt_resolve_call() const;
 131   address plt_c2i_stub() const;
 132   void set_stub_to_clean();
 133 
 134   void  reset_to_plt_resolve_call();
 135   void  set_destination_mt_safe(address dest);
 136 
 137   void verify() const;
 138 };
 139 
 140 inline NativePltCall* nativePltCall_at(address address) {
 141   NativePltCall* call = (NativePltCall*) address;
 142 #ifdef ASSERT
 143   call->verify();
 144 #endif
 145   return call;
 146 }
 147 
 148 inline NativePltCall* nativePltCall_before(address addr) {
 149   address at = addr - NativePltCall::instruction_size;
 150   return nativePltCall_at(at);
 151 }
 152 
 153 inline NativeCall* nativeCall_at(address address);
 154 // The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off
 155 // instructions (used to manipulate inline caches, primitive & dll calls, etc.).
 156 
 157 class NativeCall: public NativeInstruction {
 158  public:
 159   enum Intel_specific_constants {
 160     instruction_code            = 0xE8,
 161     instruction_size            =    5,
 162     instruction_offset          =    0,
 163     displacement_offset         =    1,
 164     return_address_offset       =    5
 165   };
 166 
 167   enum { cache_line_size = BytesPerWord };  // conservative estimate!
 168 
 169   address instruction_address() const       { return addr_at(instruction_offset); }
 170   address next_instruction_address() const  { return addr_at(return_address_offset); }
 171   int   displacement() const                { return (jint) int_at(displacement_offset); }
 172   address displacement_address() const      { return addr_at(displacement_offset); }
 173   address return_address() const            { return addr_at(return_address_offset); }
 174   address destination() const;
 175   void  set_destination(address dest)       {
 176 #ifdef AMD64
 177     intptr_t disp = dest - return_address();
 178     guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
 179 #endif // AMD64
 180     set_int_at(displacement_offset, dest - return_address());
 181   }
 182   void  set_destination_mt_safe(address dest);
 183 
 184   void  verify_alignment() { assert((intptr_t)addr_at(displacement_offset) % BytesPerInt == 0, "must be aligned"); }
 185   void  verify();
 186   void  print();
 187 
 188   // Creation
 189   inline friend NativeCall* nativeCall_at(address address);
 190   inline friend NativeCall* nativeCall_before(address return_address);
 191 
 192   static bool is_call_at(address instr) {
 193     return ((*instr) & 0xFF) == NativeCall::instruction_code;
 194   }
 195 
 196   static bool is_call_before(address return_address) {
 197     return is_call_at(return_address - NativeCall::return_address_offset);
 198   }
 199 
 200   static bool is_call_to(address instr, address target) {
 201     return nativeInstruction_at(instr)->is_call() &&
 202       nativeCall_at(instr)->destination() == target;
 203   }
 204 
 205 #if INCLUDE_AOT
 206   static bool is_far_call(address instr, address target) {
 207     intptr_t disp = target - (instr + sizeof(int32_t));
 208     return !Assembler::is_simm32(disp);
 209   }
 210 #endif
 211 
 212   // MT-safe patching of a call instruction.
 213   static void insert(address code_pos, address entry);
 214 
 215   static void replace_mt_safe(address instr_addr, address code_buffer);
 216 };
 217 
 218 inline NativeCall* nativeCall_at(address address) {
 219   NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset);
 220 #ifdef ASSERT
 221   call->verify();
 222 #endif
 223   return call;
 224 }
 225 
 226 inline NativeCall* nativeCall_before(address return_address) {
 227   NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset);
 228 #ifdef ASSERT
 229   call->verify();
 230 #endif
 231   return call;
 232 }
 233 
 234 class NativeCallReg: public NativeInstruction {
 235  public:
 236   enum Intel_specific_constants {
 237     instruction_code            = 0xFF,
 238     instruction_offset          =    0,
 239     return_address_offset_norex =    2,
 240     return_address_offset_rex   =    3
 241   };
 242 
 243   int next_instruction_offset() const  {
 244     if (ubyte_at(0) == NativeCallReg::instruction_code) {
 245       return return_address_offset_norex;
 246     } else {
 247       return return_address_offset_rex;
 248     }
 249   }
 250 };
 251 
 252 // An interface for accessing/manipulating native mov reg, imm32 instructions.
 253 // (used to manipulate inlined 32bit data dll calls, etc.)
 254 class NativeMovConstReg: public NativeInstruction {
 255 #ifdef AMD64
 256   static const bool has_rex = true;
 257   static const int rex_size = 1;
 258 #else
 259   static const bool has_rex = false;
 260   static const int rex_size = 0;
 261 #endif // AMD64
 262  public:
 263   enum Intel_specific_constants {
 264     instruction_code            = 0xB8,
 265     instruction_size            =    1 + rex_size + wordSize,
 266     instruction_offset          =    0,
 267     data_offset                 =    1 + rex_size,
 268     next_instruction_offset     =    instruction_size,
 269     register_mask               = 0x07
 270   };
 271 
 272   address instruction_address() const       { return addr_at(instruction_offset); }
 273   address next_instruction_address() const  { return addr_at(next_instruction_offset); }
 274   intptr_t data() const                     { return ptr_at(data_offset); }
 275   void  set_data(intptr_t x)                { set_ptr_at(data_offset, x); }
 276 
 277   void  verify();
 278   void  print();
 279 
 280   // unit test stuff
 281   static void test() {}
 282 
 283   // Creation
 284   inline friend NativeMovConstReg* nativeMovConstReg_at(address address);
 285   inline friend NativeMovConstReg* nativeMovConstReg_before(address address);
 286 };
 287 
 288 inline NativeMovConstReg* nativeMovConstReg_at(address address) {
 289   NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset);
 290 #ifdef ASSERT
 291   test->verify();
 292 #endif
 293   return test;
 294 }
 295 
 296 inline NativeMovConstReg* nativeMovConstReg_before(address address) {
 297   NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset);
 298 #ifdef ASSERT
 299   test->verify();
 300 #endif
 301   return test;
 302 }
 303 
 304 class NativeMovConstRegPatching: public NativeMovConstReg {
 305  private:
 306     friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) {
 307     NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset);
 308     #ifdef ASSERT
 309       test->verify();
 310     #endif
 311     return test;
 312   }
 313 };
 314 
 315 // An interface for accessing/manipulating native moves of the form:
 316 //      mov[b/w/l/q] [reg + offset], reg   (instruction_code_reg2mem)
 317 //      mov[b/w/l/q] reg, [reg+offset]     (instruction_code_mem2reg
 318 //      mov[s/z]x[w/b/q] [reg + offset], reg
 319 //      fld_s  [reg+offset]
 320 //      fld_d  [reg+offset]
 321 //      fstp_s [reg + offset]
 322 //      fstp_d [reg + offset]
 323 //      mov_literal64  scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch)
 324 //
 325 // Warning: These routines must be able to handle any instruction sequences
 326 // that are generated as a result of the load/store byte,word,long
 327 // macros.  For example: The load_unsigned_byte instruction generates
 328 // an xor reg,reg inst prior to generating the movb instruction.  This
 329 // class must skip the xor instruction.
 330 
 331 class NativeMovRegMem: public NativeInstruction {
 332  public:
 333   enum Intel_specific_constants {
 334     instruction_prefix_wide_lo          = Assembler::REX,
 335     instruction_prefix_wide_hi          = Assembler::REX_WRXB,
 336     instruction_code_xor                = 0x33,
 337     instruction_extended_prefix         = 0x0F,
 338     instruction_code_mem2reg_movslq     = 0x63,
 339     instruction_code_mem2reg_movzxb     = 0xB6,
 340     instruction_code_mem2reg_movsxb     = 0xBE,
 341     instruction_code_mem2reg_movzxw     = 0xB7,
 342     instruction_code_mem2reg_movsxw     = 0xBF,
 343     instruction_operandsize_prefix      = 0x66,
 344     instruction_code_reg2mem            = 0x89,
 345     instruction_code_mem2reg            = 0x8b,
 346     instruction_code_reg2memb           = 0x88,
 347     instruction_code_mem2regb           = 0x8a,
 348     instruction_code_float_s            = 0xd9,
 349     instruction_code_float_d            = 0xdd,
 350     instruction_code_long_volatile      = 0xdf,
 351     instruction_code_xmm_ss_prefix      = 0xf3,
 352     instruction_code_xmm_sd_prefix      = 0xf2,
 353     instruction_code_xmm_code           = 0x0f,
 354     instruction_code_xmm_load           = 0x10,
 355     instruction_code_xmm_store          = 0x11,
 356     instruction_code_xmm_lpd            = 0x12,
 357 
 358     instruction_VEX_prefix_2bytes       = Assembler::VEX_2bytes,
 359     instruction_VEX_prefix_3bytes       = Assembler::VEX_3bytes,
 360     instruction_EVEX_prefix_4bytes      = Assembler::EVEX_4bytes,
 361 
 362     instruction_size                    = 4,
 363     instruction_offset                  = 0,
 364     data_offset                         = 2,
 365     next_instruction_offset             = 4
 366   };
 367 
 368   // helper
 369   int instruction_start() const;
 370 
 371   address instruction_address() const;
 372 
 373   address next_instruction_address() const;
 374 
 375   int   offset() const;
 376 
 377   void  set_offset(int x);
 378 
 379   void  add_offset_in_bytes(int add_offset)     { set_offset ( ( offset() + add_offset ) ); }
 380 
 381   void verify();
 382   void print ();
 383 
 384   // unit test stuff
 385   static void test() {}
 386 
 387  private:
 388   inline friend NativeMovRegMem* nativeMovRegMem_at (address address);
 389 };
 390 
 391 inline NativeMovRegMem* nativeMovRegMem_at (address address) {
 392   NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset);
 393 #ifdef ASSERT
 394   test->verify();
 395 #endif
 396   return test;
 397 }
 398 
 399 
 400 // An interface for accessing/manipulating native leal instruction of form:
 401 //        leal reg, [reg + offset]
 402 
 403 class NativeLoadAddress: public NativeMovRegMem {
 404 #ifdef AMD64
 405   static const bool has_rex = true;
 406   static const int rex_size = 1;
 407 #else
 408   static const bool has_rex = false;
 409   static const int rex_size = 0;
 410 #endif // AMD64
 411  public:
 412   enum Intel_specific_constants {
 413     instruction_prefix_wide             = Assembler::REX_W,
 414     instruction_prefix_wide_extended    = Assembler::REX_WB,
 415     lea_instruction_code                = 0x8D,
 416     mov64_instruction_code              = 0xB8
 417   };
 418 
 419   void verify();
 420   void print ();
 421 
 422   // unit test stuff
 423   static void test() {}
 424 
 425  private:
 426   friend NativeLoadAddress* nativeLoadAddress_at (address address) {
 427     NativeLoadAddress* test = (NativeLoadAddress*)(address - instruction_offset);
 428     #ifdef ASSERT
 429       test->verify();
 430     #endif
 431     return test;
 432   }
 433 };
 434 
 435 // destination is rbx or rax
 436 // mov rbx, [rip + offset]
 437 class NativeLoadGot: public NativeInstruction {
 438 #ifdef AMD64
 439   static const bool has_rex = true;
 440   static const int rex_size = 1;
 441 #else
 442   static const bool has_rex = false;
 443   static const int rex_size = 0;
 444 #endif
 445 public:
 446   enum Intel_specific_constants {
 447     rex_prefix = 0x48,
 448     instruction_code = 0x8b,
 449     modrm_rbx_code = 0x1d,
 450     modrm_rax_code = 0x05,
 451     instruction_length = 6 + rex_size,
 452     offset_offset = 2 + rex_size
 453   };
 454 
 455   address instruction_address() const { return addr_at(0); }
 456   address rip_offset_address() const { return addr_at(offset_offset); }
 457   int rip_offset() const { return int_at(offset_offset); }
 458   address return_address() const { return addr_at(instruction_length); }
 459   address got_address() const { return return_address() + rip_offset(); }
 460   address next_instruction_address() const { return return_address(); }
 461   intptr_t data() const;
 462   void set_data(intptr_t data) {
 463     intptr_t *addr = (intptr_t *) got_address();
 464     *addr = data;
 465   }
 466 
 467   void verify() const;
 468 private:
 469   void report_and_fail() const;
 470 };
 471 
 472 inline NativeLoadGot* nativeLoadGot_at(address addr) {
 473   NativeLoadGot* load = (NativeLoadGot*) addr;
 474 #ifdef ASSERT
 475   load->verify();
 476 #endif
 477   return load;
 478 }
 479 
 480 // jump rel32off
 481 
 482 class NativeJump: public NativeInstruction {
 483  public:
 484   enum Intel_specific_constants {
 485     instruction_code            = 0xe9,
 486     instruction_size            =    5,
 487     instruction_offset          =    0,
 488     data_offset                 =    1,
 489     next_instruction_offset     =    5
 490   };
 491 
 492   address instruction_address() const       { return addr_at(instruction_offset); }
 493   address next_instruction_address() const  { return addr_at(next_instruction_offset); }
 494   address jump_destination() const          {
 495      address dest = (int_at(data_offset)+next_instruction_address());
 496      // 32bit used to encode unresolved jmp as jmp -1
 497      // 64bit can't produce this so it used jump to self.
 498      // Now 32bit and 64bit use jump to self as the unresolved address
 499      // which the inline cache code (and relocs) know about
 500 
 501      // return -1 if jump to self
 502     dest = (dest == (address) this) ? (address) -1 : dest;
 503     return dest;
 504   }
 505 
 506   void  set_jump_destination(address dest)  {
 507     intptr_t val = dest - next_instruction_address();
 508     if (dest == (address) -1) {
 509       val = -5; // jump to self
 510     }
 511 #ifdef AMD64
 512     assert((labs(val)  & 0xFFFFFFFF00000000) == 0 || dest == (address)-1, "must be 32bit offset or -1");
 513 #endif // AMD64
 514     set_int_at(data_offset, (jint)val);
 515   }
 516 
 517   // Creation
 518   inline friend NativeJump* nativeJump_at(address address);
 519 
 520   void verify();
 521 
 522   // Unit testing stuff
 523   static void test() {}
 524 
 525   // Insertion of native jump instruction
 526   static void insert(address code_pos, address entry);
 527   // MT-safe insertion of native jump at verified method entry
 528   static void check_verified_entry_alignment(address entry, address verified_entry);
 529   static void patch_verified_entry(address entry, address verified_entry, address dest);
 530 };
 531 
 532 inline NativeJump* nativeJump_at(address address) {
 533   NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset);
 534 #ifdef ASSERT
 535   jump->verify();
 536 #endif
 537   return jump;
 538 }
 539 
 540 // far jump reg
 541 class NativeFarJump: public NativeInstruction {
 542  public:
 543   address jump_destination() const;
 544 
 545   // Creation
 546   inline friend NativeFarJump* nativeFarJump_at(address address);
 547 
 548   void verify();
 549 
 550   // Unit testing stuff
 551   static void test() {}
 552 
 553 };
 554 
 555 inline NativeFarJump* nativeFarJump_at(address address) {
 556   NativeFarJump* jump = (NativeFarJump*)(address);
 557 #ifdef ASSERT
 558   jump->verify();
 559 #endif
 560   return jump;
 561 }
 562 
 563 // Handles all kinds of jump on Intel. Long/far, conditional/unconditional
 564 class NativeGeneralJump: public NativeInstruction {
 565  public:
 566   enum Intel_specific_constants {
 567     // Constants does not apply, since the lengths and offsets depends on the actual jump
 568     // used
 569     // Instruction codes:
 570     //   Unconditional jumps: 0xE9    (rel32off), 0xEB (rel8off)
 571     //   Conditional jumps:   0x0F8x  (rel32off), 0x7x (rel8off)
 572     unconditional_long_jump  = 0xe9,
 573     unconditional_short_jump = 0xeb,
 574     instruction_size = 5
 575   };
 576 
 577   address instruction_address() const       { return addr_at(0); }
 578   address jump_destination()    const;
 579 
 580   // Creation
 581   inline friend NativeGeneralJump* nativeGeneralJump_at(address address);
 582 
 583   // Insertion of native general jump instruction
 584   static void insert_unconditional(address code_pos, address entry);
 585   static void replace_mt_safe(address instr_addr, address code_buffer);
 586 
 587   void verify();
 588 };
 589 
 590 inline NativeGeneralJump* nativeGeneralJump_at(address address) {
 591   NativeGeneralJump* jump = (NativeGeneralJump*)(address);
 592   debug_only(jump->verify();)
 593   return jump;
 594 }
 595 
 596 class NativeGotJump: public NativeInstruction {
 597 public:
 598   enum Intel_specific_constants {
 599     instruction_code = 0xff,
 600     instruction_offset = 0,
 601     instruction_size = 6,
 602     rip_offset = 2
 603   };
 604 
 605   void verify() const;
 606   address instruction_address() const { return addr_at(instruction_offset); }
 607   address destination() const;
 608   address return_address() const { return addr_at(instruction_size); }
 609   int got_offset() const { return (jint) int_at(rip_offset); }
 610   address got_address() const { return return_address() + got_offset(); }
 611   address next_instruction_address() const { return addr_at(instruction_size); }
 612   bool is_GotJump() const { return ubyte_at(0) == instruction_code; }
 613 
 614   void set_jump_destination(address dest)  {
 615     address *got_entry = (address *) got_address();
 616     *got_entry = dest;
 617   }
 618 };
 619 
 620 inline NativeGotJump* nativeGotJump_at(address addr) {
 621   NativeGotJump* jump = (NativeGotJump*)(addr);
 622   debug_only(jump->verify());
 623   return jump;
 624 }
 625 
 626 class NativePopReg : public NativeInstruction {
 627  public:
 628   enum Intel_specific_constants {
 629     instruction_code            = 0x58,
 630     instruction_size            =    1,
 631     instruction_offset          =    0,
 632     data_offset                 =    1,
 633     next_instruction_offset     =    1
 634   };
 635 
 636   // Insert a pop instruction
 637   static void insert(address code_pos, Register reg);
 638 };
 639 
 640 
 641 class NativeIllegalInstruction: public NativeInstruction {
 642  public:
 643   enum Intel_specific_constants {
 644     instruction_code            = 0x0B0F,    // Real byte order is: 0x0F, 0x0B
 645     instruction_size            =    2,
 646     instruction_offset          =    0,
 647     next_instruction_offset     =    2
 648   };
 649 
 650   // Insert illegal opcode as specific address
 651   static void insert(address code_pos);
 652 };
 653 
 654 // return instruction that does not pop values of the stack
 655 class NativeReturn: public NativeInstruction {
 656  public:
 657   enum Intel_specific_constants {
 658     instruction_code            = 0xC3,
 659     instruction_size            =    1,
 660     instruction_offset          =    0,
 661     next_instruction_offset     =    1
 662   };
 663 };
 664 
 665 // return instruction that does pop values of the stack
 666 class NativeReturnX: public NativeInstruction {
 667  public:
 668   enum Intel_specific_constants {
 669     instruction_code            = 0xC2,
 670     instruction_size            =    2,
 671     instruction_offset          =    0,
 672     next_instruction_offset     =    2
 673   };
 674 };
 675 
 676 // Simple test vs memory
 677 class NativeTstRegMem: public NativeInstruction {
 678  public:
 679   enum Intel_specific_constants {
 680     instruction_rex_prefix_mask = 0xF0,
 681     instruction_rex_prefix      = Assembler::REX,
 682     instruction_rex_b_prefix    = Assembler::REX_B,
 683     instruction_code_memXregl   = 0x85,
 684     modrm_mask                  = 0x38, // select reg from the ModRM byte
 685     modrm_reg                   = 0x00  // rax
 686   };
 687 };
 688 
 689 inline bool NativeInstruction::is_illegal()      { return (short)int_at(0) == (short)NativeIllegalInstruction::instruction_code; }
 690 inline bool NativeInstruction::is_call()         { return ubyte_at(0) == NativeCall::instruction_code; }
 691 inline bool NativeInstruction::is_call_reg()     { return ubyte_at(0) == NativeCallReg::instruction_code ||
 692                                                           (ubyte_at(1) == NativeCallReg::instruction_code &&
 693                                                            (ubyte_at(0) == Assembler::REX || ubyte_at(0) == Assembler::REX_B)); }
 694 inline bool NativeInstruction::is_return()       { return ubyte_at(0) == NativeReturn::instruction_code ||
 695                                                           ubyte_at(0) == NativeReturnX::instruction_code; }
 696 inline bool NativeInstruction::is_jump()         { return ubyte_at(0) == NativeJump::instruction_code ||
 697                                                           ubyte_at(0) == 0xEB; /* short jump */ }
 698 inline bool NativeInstruction::is_jump_reg()     {
 699   int pos = 0;
 700   if (ubyte_at(0) == Assembler::REX_B) pos = 1;
 701   return ubyte_at(pos) == 0xFF && (ubyte_at(pos + 1) & 0xF0) == 0xE0;
 702 }
 703 inline bool NativeInstruction::is_far_jump()     { return is_mov_literal64(); }
 704 inline bool NativeInstruction::is_cond_jump()    { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ ||
 705                                                           (ubyte_at(0) & 0xF0) == 0x70;  /* short jump */ }
 706 inline bool NativeInstruction::is_safepoint_poll() {
 707 #ifdef AMD64
 708   if (SafepointMechanism::uses_thread_local_poll()) {
 709     // We know that the poll must have a REX_B prefix since we enforce its source to be
 710     // a rex-register and the destination to be rax.
 711     const bool has_rex_prefix = ubyte_at(0) == NativeTstRegMem::instruction_rex_b_prefix;
 712     const bool is_test_opcode = ubyte_at(1) == NativeTstRegMem::instruction_code_memXregl;
 713     const bool is_rax_target = (ubyte_at(2) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg;
 714     if (has_rex_prefix && is_test_opcode && is_rax_target) {
 715       return true;
 716     }
 717   }
 718   // Try decoding a near safepoint first:
 719   if (ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
 720       ubyte_at(1) == 0x05) { // 00 rax 101
 721     address fault = addr_at(6) + int_at(2);
 722     NOT_JVMCI(assert(!Assembler::is_polling_page_far(), "unexpected poll encoding");)
 723     return os::is_poll_address(fault);
 724   }
 725   // Now try decoding a far safepoint:
 726   // two cases, depending on the choice of the base register in the address.
 727   if (((ubyte_at(0) & NativeTstRegMem::instruction_rex_prefix_mask) == NativeTstRegMem::instruction_rex_prefix &&
 728        ubyte_at(1) == NativeTstRegMem::instruction_code_memXregl &&
 729        (ubyte_at(2) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg) ||
 730       (ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
 731        (ubyte_at(1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg)) {
 732     NOT_JVMCI(assert(Assembler::is_polling_page_far(), "unexpected poll encoding");)
 733     return true;
 734   }
 735   return false;
 736 #else
 737   return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2reg ||
 738            ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl ) &&
 739            (ubyte_at(1)&0xC7) == 0x05 && /* Mod R/M == disp32 */
 740            (os::is_poll_address((address)int_at(2)));
 741 #endif // AMD64
 742 }
 743 
 744 inline bool NativeInstruction::is_mov_literal64() {
 745 #ifdef AMD64
 746   return ((ubyte_at(0) == Assembler::REX_W || ubyte_at(0) == Assembler::REX_WB) &&
 747           (ubyte_at(1) & (0xff ^ NativeMovConstReg::register_mask)) == 0xB8);
 748 #else
 749   return false;
 750 #endif // AMD64
 751 }
 752 
 753 #endif // CPU_X86_VM_NATIVEINST_X86_HPP