1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2015, Linaro Ltd. All rights reserved.
   5  * Copyright (c) 2015-2018, Azul Systems, Inc. All rights reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  23  * or visit www.oracle.com if you need additional information or have any
  24  * questions.
  25  *
  26  */
  27 
  28 #include "precompiled.hpp"
  29 #include "asm/macroAssembler.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "memory/resourceArea.hpp"
  32 #include "nativeInst_aarch32.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "runtime/handles.hpp"
  35 #include "runtime/sharedRuntime.hpp"
  36 #include "runtime/stubRoutines.hpp"
  37 #include "utilities/ostream.hpp"
  38 #ifdef COMPILER1
  39 #include "c1/c1_Runtime1.hpp"
  40 #endif
  41 
  42 // LIRAssembler fills patching site with nops up to NativeCall::instruction_size
  43 int NativeCall::instruction_size = 5 * arm_insn_sz;
  44 
  45 NativeInstruction* NativeInstruction::from(address addr) {
  46   return (NativeInstruction*) addr;
  47 }
  48 
  49 //-------------------------------------------------------------------
  50 
  51 void NativeCall::init() {
  52   instruction_size = (VM_Version::features() & (FT_ARMV6T2 | FT_ARMV7) ? 3 : 5) * arm_insn_sz;
  53 }
  54 
  55 void NativeCall::verify() {
  56   if (!is_call()) {
  57     fatal("not a call");
  58   }
  59 }
  60 
  61 address NativeCall::destination() const {
  62   assert(is_call(), "not a call");
  63   if (NativeImmCall::is_at(addr())) {
  64     return NativeImmCall::from(addr())->destination();
  65   } else if (NativeMovConstReg::is_at(addr())) {
  66     return address(NativeMovConstReg::from(addr())->data());
  67   } else if (NativeTrampolineCall::is_at(addr())) {
  68     return NativeTrampolineCall::from(addr())->destination();
  69   }
  70   ShouldNotReachHere();
  71   return NULL;
  72 }
  73 
  74 void NativeCall::set_destination(address dest) {
  75   assert(is_call(), "not a call");
  76   if (NativeImmCall::is_at(addr())) {
  77     NativeImmCall::from(addr())->set_destination(dest);
  78   } else if (NativeMovConstReg::is_at(addr())) {
  79     NativeMovConstReg::from(addr())->set_data((uintptr_t) dest);
  80   } else if (NativeTrampolineCall::is_at(addr())) {
  81     NativeTrampolineCall::from(addr())->set_destination(dest);
  82   } else {
  83     ShouldNotReachHere();
  84   }
  85 }
  86 
  87 void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
  88   assert(is_call(), "not a call");
  89 
  90   // patching should be not only safe (i.e. this call could be executed by some thread),
  91   // but it also should be atomic (some other thread could call NativeCall::destination()
  92   // and see valid destination value)
  93 
  94   if (NativeImmCall::is_at(addr())) {
  95     NativeImmCall::from(addr())->set_destination(dest);
  96     ICache::invalidate_word(addr());
  97   } else if (NativeTrampolineCall::is_at(addr())) {
  98     NativeTrampolineCall::from(addr())->set_destination_mt_safe(dest);
  99   } else {
 100     ShouldNotReachHere();
 101   }
 102 }
 103 
 104 void NativeCall::insert(address code_pos, address entry) {
 105   Unimplemented();
 106 }
 107 
 108 bool NativeCall::is_call_before(address return_address) {
 109   if (NativeTrampolineCall::is_at(return_address - NativeCall::instruction_size)) {
 110     return true;
 111   }
 112 
 113   if (NativeMovConstReg::is_at(return_address - NativeCall::instruction_size)) {
 114     NativeMovConstReg *nm = NativeMovConstReg::from(return_address - NativeCall::instruction_size);
 115     address next_instr = nm->next_instruction_address();
 116     if (NativeRegCall::is_at(next_instr) && NativeRegCall::from(next_instr)->destination() == nm->destination()) {
 117       return true;
 118     }
 119   }
 120 
 121   if (NativeImmCall::is_at(return_address - NativeBranchType::instruction_size)) {
 122     return true;
 123   }
 124 
 125   return false;
 126 }
 127 
 128 address NativeCall::next_instruction_address() const {
 129   assert(is_call(), "not a call");
 130   if (NativeImmCall::is_at(addr())) {
 131     return NativeImmCall::from(addr())->next_instruction_address();
 132   } else if (NativeMovConstReg::is_at(addr())) {
 133     NativeMovConstReg *nm = NativeMovConstReg::from(addr());
 134     address next_instr = nm->next_instruction_address();
 135     assert(NativeRegCall::is_at(next_instr), "should be");
 136     return NativeRegCall::from(next_instr)->next_instruction_address();
 137   } else if (NativeTrampolineCall::is_at(addr())) {
 138     return NativeTrampolineCall::from(addr())->next_instruction_address();
 139   } else {
 140     ShouldNotReachHere();
 141     return NULL;
 142   }
 143 }
 144 
 145 address NativeCall::return_address() const {
 146   return next_instruction_address();
 147 }
 148 
 149 bool NativeCall::is_at(address addr) {
 150   if (NativeImmCall::is_at(addr)) {
 151     return true;
 152   }
 153 
 154   if (NativeMovConstReg::is_at(addr)) {
 155     NativeMovConstReg *nm = NativeMovConstReg::from(addr);
 156     address next_instr = nm->next_instruction_address();
 157     if (NativeRegCall::is_at(next_instr) &&
 158         NativeRegCall::from(next_instr)->destination() == nm->destination()) {
 159       return true;
 160     }
 161   }
 162 
 163   if (NativeTrampolineCall::is_at(addr)) {
 164     return true;
 165   }
 166 
 167   return false;
 168 }
 169 
 170 NativeCall* NativeCall::from(address addr) {
 171   assert(NativeCall::is_at(addr), "");
 172   return (NativeCall*) addr;
 173 }
 174 
 175 //-------------------------------------------------------------------
 176 
 177 address NativeTrampolineCall::destination() const {
 178   assert(is_at(addr()), "not call");
 179   return (address) uint_at(8);
 180 }
 181 
 182 void NativeTrampolineCall::set_destination(address dest) {
 183   assert(is_at(addr()), "not call");
 184   set_uint_at(8, (uintptr_t) dest);
 185 }
 186 
 187 void NativeTrampolineCall::set_destination_mt_safe(address dest, bool assert_lock) {
 188   assert(is_at(addr()), "not call");
 189   set_destination(dest);
 190   ICache::invalidate_word(addr() + 8);
 191 }
 192 
 193 bool NativeTrampolineCall::is_at(address addr) {
 194   return (as_uint(addr    ) & ~0xffu) == 0xe28fe000 // add     lr, pc, #disp
 195        && as_uint(addr + 4)          == 0xe51ff004; // ldr     pc, [pc, -4]
 196 }
 197 
 198 NativeTrampolineCall* NativeTrampolineCall::from(address addr) {
 199   assert(NativeTrampolineCall::is_at(addr), "");
 200   return (NativeTrampolineCall*) addr;
 201 }
 202 
 203 //-------------------------------------------------------------------
 204 
 205 address NativeImmCall::destination() const {
 206   assert(is_imm_call(), "not call");
 207   uint32_t insn = as_uint();
 208   intptr_t off = Instruction_aarch32::sextract(insn, 23, 0);
 209   address destination = addr() + 8 + (off << 2);
 210   return destination;
 211 }
 212 
 213 void NativeImmCall::set_destination(address dest) {
 214   assert(is_imm_call(), "not call");
 215   patch_offset_to(dest);
 216 }
 217 
 218 bool NativeImmCall::is_at(address addr) {
 219   return Instruction_aarch32::extract(as_uint(addr), 27, 24)  == 0b1011;
 220 }
 221 
 222 NativeImmCall* NativeImmCall::from(address addr) {
 223   assert(NativeImmCall::is_at(addr), "");
 224   return (NativeImmCall*) addr;
 225 }
 226 
 227 //-------------------------------------------------------------------
 228 
 229 Register NativeRegCall::destination() const {
 230   assert(is_reg_call(), "not call");
 231   return (Register) Instruction_aarch32::extract(as_uint(), 3, 0);
 232 }
 233 
 234 bool NativeRegCall::is_at(address addr) {
 235   unsigned insn = as_uint(addr);
 236   return is_branch_type(insn) && Instruction_aarch32::extract(insn, 7, 4) == 0b0011;
 237 }
 238 
 239 NativeRegCall* NativeRegCall::from(address addr) {
 240   assert(NativeRegCall::is_at(addr), "");
 241   return (NativeRegCall*) addr;
 242 }
 243 
 244 //-------------------------------------------------------------------
 245 
 246 address NativeFarLdr::skip_patching_prolog(address addr) {
 247   if (NativeInstruction::from(addr)->is_nop() &&
 248       NativeInstruction::from(addr + arm_insn_sz)->is_barrer()) {
 249     return addr+2*arm_insn_sz;
 250   }
 251   return addr;
 252 }
 253 
 254 bool NativeFarLdr::is_at(address addr) {
 255   addr = skip_patching_prolog(addr);
 256   unsigned add_condidate = as_uint(addr);
 257   if (((Instruction_aarch32::extract(add_condidate, 27, 21)  != 0b0010100) /*add*/ &&
 258         (Instruction_aarch32::extract(add_condidate, 27, 21) != 0b0010010) /*sub*/) ||
 259       (Instruction_aarch32::extract(add_condidate, 19, 16) != (unsigned) r15_pc->encoding())) {
 260     return false;
 261   }
 262   Register dest = as_Register(Instruction_aarch32::extract(add_condidate, 15, 12));
 263   return NativeMovConstReg::is_ldr_literal_at(addr + arm_insn_sz, dest);
 264 }
 265 
 266 NativeFarLdr* NativeFarLdr::from(address addr) {
 267   assert(is_at(addr), "");
 268   return (NativeFarLdr*) addr;
 269 }
 270 
 271 intptr_t* NativeFarLdr::data_addr() {
 272   address self = skip_patching_prolog(addr());
 273   off_t offset = 8;
 274   off_t add_off = Assembler::decode_imm12(as_uint(self) & 0xfff);
 275   if (Instruction_aarch32::extract(as_uint(self), 24, 21) == 0x4) {
 276     offset += add_off;
 277   } else {
 278     offset -= add_off;
 279   }
 280   off_t ldr_off = as_uint(self + arm_insn_sz) & 0xfff;
 281   if (Instruction_aarch32::extract(as_uint(self), 23, 23)) {
 282     offset += ldr_off;
 283   } else {
 284     offset -= ldr_off;
 285   }
 286 
 287   return (intptr_t*)(self + offset);
 288 }
 289 
 290 void NativeFarLdr::set_data_addr(intptr_t *data_addr) {
 291   address self = skip_patching_prolog(addr());
 292   off_t offset = (address)data_addr - (self + 8);
 293   bool minus = false;
 294   if (offset < 0) {
 295     offset = -offset;
 296     minus = true;
 297   }
 298   guarantee((0 <= offset) && (offset <= 0xffffff), "offset too large");
 299   set_uint_at(self - addr(), (as_uint(self) & ~0xc00fff) |
 300     (minus ? 0x400000u /*sub*/ : 0x800000u /*add*/) |
 301     Assembler::encode_imm12(offset & 0xff000));
 302 
 303   set_uint_at(self - addr() + arm_insn_sz,
 304       (as_uint(self + arm_insn_sz) & ~0x800fff) |
 305       (minus ? 0x000000 : 0x800000) |
 306       (offset & 0xfff));
 307   ICache::invalidate_range(self, 2*arm_insn_sz);
 308 }
 309 
 310 address NativeFarLdr::next_instruction_address() const {
 311   return skip_patching_prolog(addr()) + NativeMovConstReg::far_ldr_sz;
 312 }
 313 
 314 //-------------------------------------------------------------------
 315 
 316 void NativeMovConstReg::verify() {
 317   if (!is_mov_const_reg()) {
 318     fatal("not a mov const reg");
 319   }
 320 }
 321 
 322 intptr_t NativeMovConstReg::data() const {
 323   if (NativeFarLdr::is_at(addr())) {
 324     return *NativeFarLdr::from(addr())->data_addr();
 325   }
 326   return (intptr_t) MacroAssembler::target_addr_for_insn(addr());
 327 }
 328 
 329 void NativeMovConstReg::set_data(intptr_t x) {
 330   if (NativeFarLdr::is_at(addr())) {
 331     *NativeFarLdr::from(addr())->data_addr() = x;
 332     // Fences should be provided by calling code!
 333   } else {
 334     // Store x into the instruction stream.
 335     MacroAssembler::pd_patch_instruction(addr(), (address)x);
 336     ICache::invalidate_range(addr(), max_instruction_size);
 337   }
 338 
 339   // Find and replace the oop/metadata corresponding to this
 340   // instruction in oops section.
 341   CodeBlob* cb = CodeCache::find_blob(addr());
 342   nmethod* nm = cb->as_nmethod_or_null();
 343   if (nm != NULL) {
 344     RelocIterator iter(nm, addr(), next_instruction_address());
 345     while (iter.next()) {
 346       if (iter.type() == relocInfo::oop_type) {
 347         oop* oop_addr = iter.oop_reloc()->oop_addr();
 348         *oop_addr = cast_to_oop(x);
 349         break;
 350       } else if (iter.type() == relocInfo::metadata_type) {
 351         Metadata** metadata_addr = iter.metadata_reloc()->metadata_addr();
 352         *metadata_addr = (Metadata*)x;
 353         break;
 354       }
 355     }
 356   }
 357 }
 358 
 359 void NativeMovConstReg::print() {
 360   tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
 361                 p2i(addr()), data());
 362 }
 363 
 364 Register NativeMovConstReg::destination() const {
 365   return (Register) Instruction_aarch32::extract(as_uint(), 15, 12);
 366 }
 367 
 368 NativeMovConstReg* NativeMovConstReg::from(address addr) {
 369   assert(NativeMovConstReg::is_at(addr), "");
 370   return (NativeMovConstReg*) addr;
 371 }
 372 
 373 bool NativeMovConstReg::is_ldr_literal_at(address addr, Register from) {
 374   unsigned insn = as_uint(addr);
 375   if (from == noreg) {
 376     return (Instruction_aarch32::extract(insn, 27, 20) & 0b11100101) == 0b01000001;
 377   }
 378   unsigned reg = from->encoding();
 379   return (Instruction_aarch32::extract(insn, 27, 16) & 0b111001011111) == (0b010000010000 | reg);
 380 }
 381 
 382 bool NativeMovConstReg::is_far_ldr_literal_at(address addr) {
 383   return NativeFarLdr::is_at(addr);
 384 }
 385 
 386 bool NativeMovConstReg::is_movw_movt_at(address addr) {
 387   unsigned insn = as_uint(addr);
 388   unsigned insn2 = as_uint(addr + arm_insn_sz);
 389   return Instruction_aarch32::extract(insn,  27, 20) == 0b00110000 && //mov
 390          Instruction_aarch32::extract(insn2, 27, 20) == 0b00110100;   //movt
 391 }
 392 
 393 bool NativeMovConstReg::is_mov_n_three_orr_at(address addr) {
 394   return (Instruction_aarch32::extract(as_uint(addr), 27, 16) & 0b111111101111) == 0b001110100000 &&
 395           Instruction_aarch32::extract(as_uint(addr+arm_insn_sz), 27, 20) == 0b00111000 &&
 396           Instruction_aarch32::extract(as_uint(addr+2*arm_insn_sz), 27, 20) == 0b00111000 &&
 397           Instruction_aarch32::extract(as_uint(addr+3*arm_insn_sz), 27, 21) == 0b0011100;
 398 }
 399 
 400 bool NativeMovConstReg::is_at(address addr) {
 401   return is_ldr_literal_at(addr) ||
 402           is_far_ldr_literal_at(addr) ||
 403           is_movw_movt_at(addr) ||
 404           is_mov_n_three_orr_at(addr);
 405 }
 406 
 407 //-------------------------------------------------------------------
 408 address NativeMovRegMem::instruction_address() const {
 409   return addr();
 410 }
 411 
 412 int NativeMovRegMem::offset() const  {
 413   assert(NativeMovConstReg::is_at(addr()), "no others");
 414   return NativeMovConstReg::from(addr())->data();
 415 }
 416 
 417 void NativeMovRegMem::set_offset(int x) {
 418   assert(NativeMovConstReg::is_at(addr()), "no others");
 419   NativeMovConstReg::from(addr())->set_data(x);
 420 }
 421 
 422 void NativeMovRegMem::verify() {
 423   assert(NativeMovConstReg::is_at(addr()), "no others");
 424 }
 425 
 426 //--------------------------------------------------------------------------------
 427 
 428 void NativeJump::verify() {
 429   if (!is_jump()) {
 430     fatal("not a call");
 431   }
 432 }
 433 
 434 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
 435 }
 436 
 437 address NativeJump::jump_destination() const {
 438   assert(is_jump(), "not a call");
 439   if (NativeImmJump::is_at(addr())) {
 440     return NativeImmJump::from(addr())->destination();
 441   } else if (NativeMovConstReg::is_at(addr())) {
 442     return address(NativeMovConstReg::from(addr())->data());
 443   }
 444   ShouldNotReachHere();
 445   return NULL;
 446 }
 447 
 448 void NativeJump::set_jump_destination(address dest) {
 449   assert(is_jump(), "not a call");
 450   if (NativeImmJump::is_at(addr())) {
 451     NativeImmJump::from(addr())->set_destination(dest);
 452   } else if (NativeMovConstReg::is_at(addr())) {
 453     NativeMovConstReg::from(addr())->set_data((uintptr_t) dest);
 454   } else {
 455     ShouldNotReachHere();
 456   }
 457 }
 458 
 459 address NativeJump::next_instruction_address() const {
 460   assert(is_jump(), "not a call");
 461   if (NativeImmJump::is_at(addr())) {
 462     return NativeImmJump::from(addr())->next_instruction_address();
 463   } else if (NativeMovConstReg::is_at(addr())) {
 464     address after_move = NativeMovConstReg::from(addr())->next_instruction_address();
 465     assert(NativeRegJump::is_at(after_move), "should be jump");
 466     return NativeRegJump::from(after_move)->next_instruction_address();
 467   }
 468   ShouldNotReachHere();
 469   return NULL;
 470 }
 471 
 472 bool NativeJump::is_at(address addr) {
 473   if (NativeImmJump::is_at(addr)) {
 474     return true;
 475   }
 476   if (NativeMovConstReg::is_at(addr)) {
 477     NativeMovConstReg *nm = NativeMovConstReg::from(addr);
 478     address next_instr = nm->next_instruction_address();
 479     return NativeRegJump::is_at(next_instr) &&
 480       NativeRegJump::from(next_instr)->destination() == nm->destination();
 481   }
 482   return false;
 483 }
 484 
 485 NativeJump* NativeJump::from(address addr) {
 486   assert(NativeJump::is_at(addr), "");
 487   return (NativeJump*) addr;
 488 }
 489 
 490 // MT-safe inserting of a jump over a jump or a nop (used by
 491 // nmethod::make_not_entrant_or_zombie)
 492 
 493 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
 494 
 495   assert(dest == SharedRuntime::get_handle_wrong_method_stub(),
 496      "expected fixed destination of patch");
 497   assert(NativeInstruction::from(verified_entry)->is_jump_or_nop() ||
 498       NativeInstruction::from(verified_entry)->is_sigill_zombie_not_entrant(),
 499          "Aarch32 cannot replace non-jump with jump");
 500 
 501   // Patch this nmethod atomically.
 502   if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
 503     assert((((intptr_t) dest & 0x3) == 0) && (((intptr_t) verified_entry & 0x3) == 0),
 504         "addresses should be aligned on 4");
 505     ptrdiff_t disp = (dest - verified_entry - 8) >> 2;
 506     guarantee((-(1 << 23) <= disp) && (disp < (1 << 23)), "branch overflow");
 507 
 508     unsigned int insn = (0b11101010 << 24) | (disp & 0xffffff);
 509     *(unsigned int*)verified_entry = insn;
 510   } else {
 511     // We use an illegal instruction for marking a method as
 512     // not_entrant or zombie.
 513     NativeIllegalInstruction::insert(verified_entry);
 514   }
 515 
 516   ICache::invalidate_range(verified_entry, instruction_size);
 517 }
 518 
 519 //-------------------------------------------------------------------
 520 
 521 bool NativeBranchType::is_branch_type(uint32_t insn) {
 522   return Instruction_aarch32::extract(insn, 27, 20) == 0b00010010 &&
 523     Instruction_aarch32::extract(insn, 19, 8) == 0b111111111111;
 524 }
 525 
 526 void NativeBranchType::patch_offset_to(address dest) {
 527   uint32_t insn = as_uint();
 528   const intptr_t off = (dest - (addr() + 8));
 529   assert((off & 3) == 0, "should be");
 530   assert(-32 * 1024 * 1024 <= off && off < 32 * 1024 * 1042,
 531       "new offset should fit in instruction");
 532 
 533   const unsigned off_mask = ((1U << 24) - 1);
 534   insn &= ~off_mask; // mask off offset part
 535   insn |= ((unsigned) off >> 2) & off_mask;
 536 
 537   set_uint(insn);
 538   ICache::invalidate_range(addr_at(0), instruction_size);
 539 }
 540 
 541 //-------------------------------------------------------------------
 542 
 543 address NativeImmJump::destination() const {
 544   assert(is_imm_jump(), "not jump");
 545   return addr() + 8 + 4 * Instruction_aarch32::sextract(as_uint(), 23, 0);
 546 }
 547 
 548 void NativeImmJump::set_destination(address addr) {
 549   assert(is_imm_jump(), "");
 550   patch_offset_to(addr);
 551 }
 552 
 553 bool NativeImmJump::is_at(address addr) {
 554   unsigned insn = as_uint(addr);
 555   return Instruction_aarch32::extract(insn, 27, 24)  == 0b1010;
 556 }
 557 
 558 NativeImmJump* NativeImmJump::from(address addr) {
 559   assert(NativeImmJump::is_at(addr), "");
 560   return (NativeImmJump*) addr;
 561 }
 562 
 563 //-------------------------------------------------------------------
 564 
 565 bool NativeRegJump::is_at(address addr) {
 566   unsigned insn = as_uint(addr);
 567   return is_branch_type(insn) && Instruction_aarch32::extract(insn, 7, 4) == 0b0001;
 568 }
 569 
 570 NativeRegJump* NativeRegJump::from(address addr) {
 571   assert(NativeRegJump::is_at(addr), "");
 572   return (NativeRegJump*) addr;
 573 }
 574 
 575 Register NativeRegJump::destination() const {
 576   assert(is_reg_jump(), "");
 577   return (Register) Instruction_aarch32::extract(as_uint(), 3, 0);
 578 }
 579 
 580 //-------------------------------------------------------------------
 581 
 582 bool NativeInstruction::is_safepoint_poll() {
 583 #ifdef COMPILER2_OR_JVMCI
 584   // it would be too complex to find the place where poll address is
 585   // loaded into address register since C2 can do this somewhere else
 586   // so we only checking the exact poll instruction in the form
 587   // ldr(r12, [rXXX, #0])
 588   return (NativeInstruction::as_uint() & 0xfff0ffff) == 0xe590c000;
 589 #else
 590   // a safepoint_poll is implemented in two steps as
 591   //
 592   // movw(r12, polling_page & 0xffff);
 593   // movt(r12, polling_page >> 16);
 594   // ldr(r12, [r12, #0]);
 595   //
 596   // or, if thread-local handshakes are used
 597   //
 598   // ldr(r12, [rthread, #offset]);
 599   // ldr(r12, [r12, #0]);
 600   //
 601   //
 602   // We can rely on this instructions order since we have only C1
 603 
 604   if (SafepointMechanism::uses_thread_local_poll()) {
 605     const Register scratch = rscratch2;
 606 
 607     if (NativeInstruction::from(addr())->is_ldr(scratch, Address(scratch))) {
 608       return NativeInstruction::from(addr()-arm_insn_sz)
 609         ->is_ldr(scratch, Address(rthread, Thread::polling_page_offset()));
 610     }
 611   } else {
 612     const intptr_t paddr = (intptr_t)os::get_polling_page();
 613     const Register scratch = rscratch2;
 614 
 615     if (NativeInstruction::from(addr())->is_ldr(scratch, Address(scratch))) {
 616       NativeMovConstReg* mov_const = NativeMovConstReg::before(addr());
 617       return (mov_const->data() == paddr) && (mov_const->destination() == scratch);
 618     }
 619   }
 620 
 621   return false;
 622 #endif
 623 }
 624 
 625 bool NativeInstruction::is_movt(Register dst, unsigned imm, Assembler::Condition cond) {
 626   bool a1 = Instruction_aarch32::extract(uint_at(0), 27, 20) == 0b00110100;
 627   bool a2 = Instruction_aarch32::extract(uint_at(0), 15, 12) == (unsigned)dst;
 628   bool a3 = Instruction_aarch32::extract(uint_at(0), 11, 0) == ((unsigned)imm & 0xfff);
 629   bool a4 = Instruction_aarch32::extract(uint_at(0), 19, 16) == ((unsigned)imm >> 12);
 630   bool a5 = Instruction_aarch32::extract(uint_at(0), 31, 28) == (unsigned)cond;
 631 
 632   return a1 && a2 && a3 && a4 && a5;
 633 }
 634 
 635 bool NativeInstruction::is_movw(Register dst, unsigned imm, Assembler::Condition cond) {
 636   bool a1 = Instruction_aarch32::extract(uint_at(0), 27, 20) == 0b00110000;
 637   bool a2 = Instruction_aarch32::extract(uint_at(0), 15, 12) == (unsigned)dst;
 638   bool a3 = Instruction_aarch32::extract(uint_at(0), 11, 0) == ((unsigned)imm & 0xfff);
 639   bool a4 = Instruction_aarch32::extract(uint_at(0), 19, 16) == ((unsigned)imm >> 12);
 640   bool a5 = Instruction_aarch32::extract(uint_at(0), 31, 28) == (unsigned)cond;
 641 
 642   return a1 && a2 && a3 && a4 && a5;
 643 }
 644 
 645 bool NativeInstruction::is_ldr(Register dst, Address addr, Assembler::Condition cond) {
 646     assert(addr.get_mode() == Address::imm, "unimplemented");
 647     assert(addr.get_wb_mode() == Address::off, "unimplemented");
 648     assert(addr.index() == noreg, "unimplemented");
 649     assert(addr.offset() == 0, "unimplemented");
 650 
 651     bool b0 = Instruction_aarch32::extract(uint_at(0), 24, 24) == 1; //P
 652     bool b1 = Instruction_aarch32::extract(uint_at(0), 23, 23) == 1; //U
 653     bool b2 = Instruction_aarch32::extract(uint_at(0), 21, 21) == 0; //W
 654     bool b3 = Instruction_aarch32::extract(uint_at(0), 19, 16) == (unsigned)addr.base();
 655     bool b4 = Instruction_aarch32::extract(uint_at(0), 11, 0) == 0;
 656 
 657     bool a1 = b0 && b1 && b2 && b3 && b4; //Address encoding
 658 
 659     bool a2 = Instruction_aarch32::extract(uint_at(0), 15, 12) == (unsigned)dst;
 660     bool a3 = Instruction_aarch32::extract(uint_at(0), 20, 20) == 1;
 661     bool a4 = Instruction_aarch32::extract(uint_at(0), 22, 22) == 0;
 662     bool a5 = Instruction_aarch32::extract(uint_at(0), 27, 25) == 0b010;
 663     bool a6 = Instruction_aarch32::extract(uint_at(0), 31, 28) == (unsigned)cond;
 664 
 665     return a1 && a2 && a3 && a4 && a5 && a6;
 666 }
 667 
 668 
 669 bool NativeInstruction::is_movt() {
 670   return Instruction_aarch32::extract(int_at(0), 27, 20) == 0b00110100;
 671 }
 672 
 673 bool NativeInstruction::is_orr() {
 674   return Instruction_aarch32::extract(int_at(0), 27, 21) == 0b0011100;
 675 }
 676 
 677 bool NativeInstruction::is_sigill_zombie_not_entrant() {
 678   return as_uint() == 0xe7fdeafd; // udf #0xdead
 679 }
 680 
 681 void NativeIllegalInstruction::insert(address code_pos) {
 682   *(juint*)code_pos = 0xe7fdeafd; // udf #0xdead
 683 }
 684 
 685 //-------------------------------------------------------------------
 686 
 687 void NativeGeneralJump::verify() {  }
 688 
 689 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
 690   NativeGeneralJump* n_jump = (NativeGeneralJump*)code_pos;
 691   assert(n_jump->is_nop() || n_jump->is_imm_jump(), "not overwrite whats not supposed");
 692 
 693   CodeBuffer cb(code_pos, instruction_size);
 694   MacroAssembler a(&cb);
 695 
 696   a.b(entry);
 697 
 698   ICache::invalidate_range(code_pos, instruction_size);
 699 }
 700 
 701 // MT-safe patching of a long jump instruction.
 702 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
 703   if (NativeFarLdr::is_at(instr_addr+2*arm_insn_sz)) {
 704     assert(NativeInstruction::from(code_buffer)->is_nop(), "code_buffer image");
 705     assert(NativeImmJump::is_at(instr_addr), "instr_image image");
 706     // first 'b' prevents NativeFarLdr to recognize patching_prolog, skip it manually
 707     address load_instr = instr_addr+2*arm_insn_sz;
 708 
 709     NativeFarLdr::from(load_instr)->set_data_addr(NativeFarLdr::from(code_buffer)->data_addr());
 710 
 711     WRITE_MEM_BARRIER;
 712     *(uintptr_t*)instr_addr = *(uintptr_t*)code_buffer;
 713     ICache::invalidate_word(instr_addr);
 714 
 715     assert(NativeFarLdr::is_at(instr_addr), "now valid constant loading");
 716   } else {
 717     ShouldNotReachHere();
 718   }
 719 }