1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "nativeInst_aarch64.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/handles.hpp"
  32 #include "runtime/sharedRuntime.hpp"
  33 #include "runtime/stubRoutines.hpp"
  34 #include "utilities/ostream.hpp"
  35 #ifdef COMPILER1
  36 #include "c1/c1_Runtime1.hpp"
  37 #endif
  38 
  39 void NativeCall::verify() {
  40   assert(NativeCall::is_call_at((address)this), "unexpected code at call site");
  41 }
  42 
  43 void NativeInstruction::wrote(int offset) {
  44   ICache::invalidate_word(addr_at(offset));
  45 }
  46 
  47 void NativeLoadGot::report_and_fail() const {
  48   tty->print_cr("Addr: " INTPTR_FORMAT, p2i(instruction_address()));
  49   fatal("not a indirect rip mov to rbx");
  50 }
  51 
  52 void NativeLoadGot::verify() const {
  53   assert(is_adrp_at((address)this), "must be adrp");
  54 }
  55 
  56 address NativeLoadGot::got_address() const {
  57   return MacroAssembler::target_addr_for_insn((address)this);
  58 }
  59 
  60 intptr_t NativeLoadGot::data() const {
  61   return *(intptr_t *) got_address();
  62 }
  63 
  64 address NativePltCall::destination() const {
  65   NativeGotJump* jump = nativeGotJump_at(plt_jump());
  66   return *(address*)MacroAssembler::target_addr_for_insn((address)jump);
  67 }
  68 
  69 address NativePltCall::plt_entry() const {
  70   return MacroAssembler::target_addr_for_insn((address)this);
  71 }
  72 
  73 address NativePltCall::plt_jump() const {
  74   address entry = plt_entry();
  75   // Virtual PLT code has move instruction first
  76   if (((NativeGotJump*)entry)->is_GotJump()) {
  77     return entry;
  78   } else {
  79     return nativeLoadGot_at(entry)->next_instruction_address();
  80   }
  81 }
  82 
  83 address NativePltCall::plt_load_got() const {
  84   address entry = plt_entry();
  85   if (!((NativeGotJump*)entry)->is_GotJump()) {
  86     // Virtual PLT code has move instruction first
  87     return entry;
  88   } else {
  89     // Static PLT code has move instruction second (from c2i stub)
  90     return nativeGotJump_at(entry)->next_instruction_address();
  91   }
  92 }
  93 
  94 address NativePltCall::plt_c2i_stub() const {
  95   address entry = plt_load_got();
  96   // This method should be called only for static calls which has C2I stub.
  97   NativeLoadGot* load = nativeLoadGot_at(entry);
  98   return entry;
  99 }
 100 
 101 address NativePltCall::plt_resolve_call() const {
 102   NativeGotJump* jump = nativeGotJump_at(plt_jump());
 103   address entry = jump->next_instruction_address();
 104   if (((NativeGotJump*)entry)->is_GotJump()) {
 105     return entry;
 106   } else {
 107     // c2i stub 2 instructions
 108     entry = nativeLoadGot_at(entry)->next_instruction_address();
 109     return nativeGotJump_at(entry)->next_instruction_address();
 110   }
 111 }
 112 
 113 void NativePltCall::reset_to_plt_resolve_call() {
 114   set_destination_mt_safe(plt_resolve_call());
 115 }
 116 
 117 void NativePltCall::set_destination_mt_safe(address dest) {
 118   // rewriting the value in the GOT, it should always be aligned
 119   NativeGotJump* jump = nativeGotJump_at(plt_jump());
 120   address* got = (address *) jump->got_address();
 121   *got = dest;
 122 }
 123 
 124 void NativePltCall::set_stub_to_clean() {
 125   NativeLoadGot* method_loader = nativeLoadGot_at(plt_c2i_stub());
 126   NativeGotJump* jump          = nativeGotJump_at(method_loader->next_instruction_address());
 127   method_loader->set_data(0);
 128   jump->set_jump_destination((address)-1);
 129 }
 130 
 131 void NativePltCall::verify() const {
 132   assert(NativeCall::is_call_at((address)this), "unexpected code at call site");
 133 }
 134 
 135 address NativeGotJump::got_address() const {
 136   return MacroAssembler::target_addr_for_insn((address)this);
 137 }
 138 
 139 address NativeGotJump::destination() const {
 140   address *got_entry = (address *) got_address();
 141   return *got_entry;
 142 }
 143 
 144 bool NativeGotJump::is_GotJump() const {
 145   NativeInstruction *insn =
 146     nativeInstruction_at(addr_at(3 * NativeInstruction::instruction_size));
 147   return insn->encoding() == 0xd61f0200; // br x16
 148 }
 149 
 150 void NativeGotJump::verify() const {
 151   assert(is_adrp_at((address)this), "must be adrp");
 152 }
 153 
 154 address NativeCall::destination() const {
 155   address addr = (address)this;
 156   address destination = instruction_address() + displacement();
 157 
 158   // Do we use a trampoline stub for this call?
 159   CodeBlob* cb = CodeCache::find_blob_unsafe(addr);   // Else we get assertion if nmethod is zombie.
 160   assert(cb && cb->is_nmethod(), "sanity");
 161   nmethod *nm = (nmethod *)cb;
 162   if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
 163     // Yes we do, so get the destination from the trampoline stub.
 164     const address trampoline_stub_addr = destination;
 165     destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination();
 166   }
 167 
 168   return destination;
 169 }
 170 
 171 // Similar to replace_mt_safe, but just changes the destination. The
 172 // important thing is that free-running threads are able to execute this
 173 // call instruction at all times.
 174 //
 175 // Used in the runtime linkage of calls; see class CompiledIC.
 176 //
 177 // Add parameter assert_lock to switch off assertion
 178 // during code generation, where no patching lock is needed.
 179 void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
 180   assert(!assert_lock ||
 181          (Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()),
 182          "concurrent code patching");
 183 
 184   ResourceMark rm;
 185   int code_size = NativeInstruction::instruction_size;
 186   address addr_call = addr_at(0);
 187   assert(NativeCall::is_call_at(addr_call), "unexpected code at call site");
 188 
 189   // Patch the constant in the call's trampoline stub.
 190   address trampoline_stub_addr = get_trampoline();
 191   if (trampoline_stub_addr != NULL) {
 192     assert (! is_NativeCallTrampolineStub_at(dest), "chained trampolines");
 193     nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
 194   }
 195 
 196   // Patch the call.
 197   if (Assembler::reachable_from_branch_at(addr_call, dest)) {
 198     set_destination(dest);
 199   } else {
 200     assert (trampoline_stub_addr != NULL, "we need a trampoline");
 201     set_destination(trampoline_stub_addr);
 202   }
 203 
 204   ICache::invalidate_range(addr_call, instruction_size);
 205 }
 206 
 207 address NativeCall::get_trampoline() {
 208   address call_addr = addr_at(0);
 209 
 210   CodeBlob *code = CodeCache::find_blob(call_addr);
 211   assert(code != NULL, "Could not find the containing code blob");
 212 
 213   address bl_destination
 214     = MacroAssembler::pd_call_destination(call_addr);
 215   if (code->contains(bl_destination) &&
 216       is_NativeCallTrampolineStub_at(bl_destination))
 217     return bl_destination;
 218 
 219   if (code->is_nmethod()) {
 220     return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code);
 221   }
 222 
 223   return NULL;
 224 }
 225 
 226 // Inserts a native call instruction at a given pc
 227 void NativeCall::insert(address code_pos, address entry) { Unimplemented(); }
 228 
 229 //-------------------------------------------------------------------
 230 
 231 void NativeMovConstReg::verify() {
 232   // make sure code pattern is actually mov reg64, imm64 instructions
 233 }
 234 
 235 
 236 intptr_t NativeMovConstReg::data() const {
 237   // das(uint64_t(instruction_address()),2);
 238   address addr = MacroAssembler::target_addr_for_insn(instruction_address());
 239   if (maybe_cpool_ref(instruction_address())) {
 240     return *(intptr_t*)addr;
 241   } else {
 242     return (intptr_t)addr;
 243   }
 244 }
 245 
 246 void NativeMovConstReg::set_data(intptr_t x) {
 247   if (maybe_cpool_ref(instruction_address())) {
 248     address addr = MacroAssembler::target_addr_for_insn(instruction_address());
 249     *(intptr_t*)addr = x;
 250   } else {
 251     MacroAssembler::pd_patch_instruction(instruction_address(), (address)x);
 252     ICache::invalidate_range(instruction_address(), instruction_size);
 253   }
 254 }
 255 
 256 void NativeMovConstReg::print() {
 257   tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
 258                 p2i(instruction_address()), data());
 259 }
 260 
 261 //-------------------------------------------------------------------
 262 
 263 address NativeMovRegMem::instruction_address() const      { return addr_at(instruction_offset); }
 264 
 265 int NativeMovRegMem::offset() const  {
 266   address pc = instruction_address();
 267   unsigned insn = *(unsigned*)pc;
 268   if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) {
 269     address addr = MacroAssembler::target_addr_for_insn(pc);
 270     return *addr;
 271   } else {
 272     return (int)(intptr_t)MacroAssembler::target_addr_for_insn(instruction_address());
 273   }
 274 }
 275 
 276 void NativeMovRegMem::set_offset(int x) {
 277   address pc = instruction_address();
 278   unsigned insn = *(unsigned*)pc;
 279   if (maybe_cpool_ref(pc)) {
 280     address addr = MacroAssembler::target_addr_for_insn(pc);
 281     *(long*)addr = x;
 282   } else {
 283     MacroAssembler::pd_patch_instruction(pc, (address)intptr_t(x));
 284     ICache::invalidate_range(instruction_address(), instruction_size);
 285   }
 286 }
 287 
 288 void NativeMovRegMem::verify() {
 289 #ifdef ASSERT
 290   address dest = MacroAssembler::target_addr_for_insn(instruction_address());
 291 #endif
 292 }
 293 
 294 //--------------------------------------------------------------------------------
 295 
 296 void NativeJump::verify() { ; }
 297 
 298 
 299 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
 300 }
 301 
 302 
 303 address NativeJump::jump_destination() const          {
 304   address dest = MacroAssembler::target_addr_for_insn(instruction_address());
 305 
 306   // We use jump to self as the unresolved address which the inline
 307   // cache code (and relocs) know about
 308 
 309   // return -1 if jump to self
 310   dest = (dest == (address) this) ? (address) -1 : dest;
 311   return dest;
 312 }
 313 
 314 void NativeJump::set_jump_destination(address dest) {
 315   // We use jump to self as the unresolved address which the inline
 316   // cache code (and relocs) know about
 317   if (dest == (address) -1)
 318     dest = instruction_address();
 319 
 320   MacroAssembler::pd_patch_instruction(instruction_address(), dest);
 321   ICache::invalidate_range(instruction_address(), instruction_size);
 322 };
 323 
 324 //-------------------------------------------------------------------
 325 
 326 address NativeGeneralJump::jump_destination() const {
 327   NativeMovConstReg* move = nativeMovConstReg_at(instruction_address());
 328   address dest = (address) move->data();
 329 
 330   // We use jump to self as the unresolved address which the inline
 331   // cache code (and relocs) know about
 332 
 333   // return -1 if jump to self
 334   dest = (dest == (address) this) ? (address) -1 : dest;
 335   return dest;
 336 }
 337 
 338 void NativeGeneralJump::set_jump_destination(address dest) {
 339   NativeMovConstReg* move = nativeMovConstReg_at(instruction_address());
 340 
 341   // We use jump to self as the unresolved address which the inline
 342   // cache code (and relocs) know about
 343   if (dest == (address) -1) {
 344     dest = instruction_address();
 345   }
 346 
 347   move->set_data((uintptr_t) dest);
 348 };
 349 
 350 //-------------------------------------------------------------------
 351 
 352 bool NativeInstruction::is_safepoint_poll() {
 353   // a safepoint_poll is implemented in two steps as either
 354   //
 355   // adrp(reg, polling_page);
 356   // ldr(zr, [reg, #offset]);
 357   //
 358   // or
 359   //
 360   // mov(reg, polling_page);
 361   // ldr(zr, [reg, #offset]);
 362   //
 363   // or
 364   //
 365   // ldr(reg, [rthread, #offset]);
 366   // ldr(zr, [reg, #offset]);
 367   //
 368   // however, we cannot rely on the polling page address load always
 369   // directly preceding the read from the page. C1 does that but C2
 370   // has to do the load and read as two independent instruction
 371   // generation steps. that's because with a single macro sequence the
 372   // generic C2 code can only add the oop map before the mov/adrp and
 373   // the trap handler expects an oop map to be associated with the
 374   // load. with the load scheuled as a prior step the oop map goes
 375   // where it is needed.
 376   //
 377   // so all we can do here is check that marked instruction is a load
 378   // word to zr
 379   return is_ldrw_to_zr(address(this));
 380 }
 381 
 382 bool NativeInstruction::is_adrp_at(address instr) {
 383   unsigned insn = *(unsigned*)instr;
 384   return (Instruction_aarch64::extract(insn, 31, 24) & 0b10011111) == 0b10010000;
 385 }
 386 
 387 bool NativeInstruction::is_ldr_literal_at(address instr) {
 388   unsigned insn = *(unsigned*)instr;
 389   return (Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000;
 390 }
 391 
 392 bool NativeInstruction::is_ldrw_to_zr(address instr) {
 393   unsigned insn = *(unsigned*)instr;
 394   return (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
 395           Instruction_aarch64::extract(insn, 4, 0) == 0b11111);
 396 }
 397 
 398 bool NativeInstruction::is_general_jump() {
 399   if (is_movz()) {
 400     NativeInstruction* inst1 = nativeInstruction_at(addr_at(instruction_size * 1));
 401     if (inst1->is_movk()) {
 402       NativeInstruction* inst2 = nativeInstruction_at(addr_at(instruction_size * 2));
 403       if (inst2->is_movk()) {
 404         NativeInstruction* inst3 = nativeInstruction_at(addr_at(instruction_size * 3));
 405         if (inst3->is_blr()) {
 406           return true;
 407         }
 408       }
 409     }
 410   }
 411   return false;
 412 }
 413 
 414 bool NativeInstruction::is_movz() {
 415   return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b10100101;
 416 }
 417 
 418 bool NativeInstruction::is_movk() {
 419   return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b11100101;
 420 }
 421 
 422 bool NativeInstruction::is_sigill_zombie_not_entrant() {
 423   return uint_at(0) == 0xd4bbd5a1; // dcps1 #0xdead
 424 }
 425 
 426 void NativeIllegalInstruction::insert(address code_pos) {
 427   *(juint*)code_pos = 0xd4bbd5a1; // dcps1 #0xdead
 428 }
 429 
 430 //-------------------------------------------------------------------
 431 
 432 // MT-safe inserting of a jump over a jump or a nop (used by
 433 // nmethod::make_not_entrant_or_zombie)
 434 
 435 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
 436 
 437   assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
 438   assert(nativeInstruction_at(verified_entry)->is_jump_or_nop()
 439          || nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(),
 440          "Aarch64 cannot replace non-jump with jump");
 441 
 442   // Patch this nmethod atomically.
 443   if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
 444     ptrdiff_t disp = dest - verified_entry;
 445     guarantee(disp < 1 << 27 && disp > - (1 << 27), "branch overflow");
 446 
 447     unsigned int insn = (0b000101 << 26) | ((disp >> 2) & 0x3ffffff);
 448     *(unsigned int*)verified_entry = insn;
 449   } else {
 450     // We use an illegal instruction for marking a method as
 451     // not_entrant or zombie.
 452     NativeIllegalInstruction::insert(verified_entry);
 453   }
 454 
 455   ICache::invalidate_range(verified_entry, instruction_size);
 456 }
 457 
 458 void NativeGeneralJump::verify() {  }
 459 
 460 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
 461   NativeGeneralJump* n_jump = (NativeGeneralJump*)code_pos;
 462 
 463   CodeBuffer cb(code_pos, instruction_size);
 464   MacroAssembler a(&cb);
 465 
 466   a.mov(rscratch1, entry);
 467   a.br(rscratch1);
 468 
 469   ICache::invalidate_range(code_pos, instruction_size);
 470 }
 471 
 472 // MT-safe patching of a long jump instruction.
 473 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
 474   ShouldNotCallThis();
 475 }
 476 
 477 address NativeCallTrampolineStub::destination(nmethod *nm) const {
 478   return ptr_at(data_offset);
 479 }
 480 
 481 void NativeCallTrampolineStub::set_destination(address new_destination) {
 482   set_ptr_at(data_offset, new_destination);
 483   OrderAccess::release();
 484 }
 485 
 486 // Generate a trampoline for a branch to dest.  If there's no need for a
 487 // trampoline, simply patch the call directly to dest.
 488 address NativeCall::trampoline_jump(CodeBuffer &cbuf, address dest) {
 489   MacroAssembler a(&cbuf);
 490   address stub = NULL;
 491 
 492   if (a.far_branches()
 493       && ! is_NativeCallTrampolineStub_at(instruction_address() + displacement())) {
 494     stub = a.emit_trampoline_stub(instruction_address() - cbuf.insts()->start(), dest);
 495   }
 496 
 497   if (stub == NULL) {
 498     // If we generated no stub, patch this call directly to dest.
 499     // This will happen if we don't need far branches or if there
 500     // already was a trampoline.
 501     set_destination(dest);
 502   }
 503 
 504   return stub;
 505 }
 506