1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "nativeInst_aarch64.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/handles.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "runtime/stubRoutines.hpp"
  35 #include "utilities/ostream.hpp"
  36 #ifdef COMPILER1
  37 #include "c1/c1_Runtime1.hpp"
  38 #endif
  39 
  40 void NativeCall::verify() {
  41   assert(NativeCall::is_call_at((address)this), "unexpected code at call site");
  42 }
  43 
  44 void NativeInstruction::wrote(int offset) {
  45   ICache::invalidate_word(addr_at(offset));
  46 }
  47 
  48 void NativeLoadGot::report_and_fail() const {
  49   tty->print_cr("Addr: " INTPTR_FORMAT, p2i(instruction_address()));
  50   fatal("not a indirect rip mov to rbx");
  51 }
  52 
  53 void NativeLoadGot::verify() const {
  54   assert(is_adrp_at((address)this), "must be adrp");
  55 }
  56 
  57 address NativeLoadGot::got_address() const {
  58   return MacroAssembler::target_addr_for_insn((address)this);
  59 }
  60 
  61 intptr_t NativeLoadGot::data() const {
  62   return *(intptr_t *) got_address();
  63 }
  64 
  65 address NativePltCall::destination() const {
  66   NativeGotJump* jump = nativeGotJump_at(plt_jump());
  67   return *(address*)MacroAssembler::target_addr_for_insn((address)jump);
  68 }
  69 
  70 address NativePltCall::plt_entry() const {
  71   return MacroAssembler::target_addr_for_insn((address)this);
  72 }
  73 
  74 address NativePltCall::plt_jump() const {
  75   address entry = plt_entry();
  76   // Virtual PLT code has move instruction first
  77   if (((NativeGotJump*)entry)->is_GotJump()) {
  78     return entry;
  79   } else {
  80     return nativeLoadGot_at(entry)->next_instruction_address();
  81   }
  82 }
  83 
  84 address NativePltCall::plt_load_got() const {
  85   address entry = plt_entry();
  86   if (!((NativeGotJump*)entry)->is_GotJump()) {
  87     // Virtual PLT code has move instruction first
  88     return entry;
  89   } else {
  90     // Static PLT code has move instruction second (from c2i stub)
  91     return nativeGotJump_at(entry)->next_instruction_address();
  92   }
  93 }
  94 
  95 address NativePltCall::plt_c2i_stub() const {
  96   address entry = plt_load_got();
  97   // This method should be called only for static calls which has C2I stub.
  98   NativeLoadGot* load = nativeLoadGot_at(entry);
  99   return entry;
 100 }
 101 
 102 address NativePltCall::plt_resolve_call() const {
 103   NativeGotJump* jump = nativeGotJump_at(plt_jump());
 104   address entry = jump->next_instruction_address();
 105   if (((NativeGotJump*)entry)->is_GotJump()) {
 106     return entry;
 107   } else {
 108     // c2i stub 2 instructions
 109     entry = nativeLoadGot_at(entry)->next_instruction_address();
 110     return nativeGotJump_at(entry)->next_instruction_address();
 111   }
 112 }
 113 
 114 void NativePltCall::reset_to_plt_resolve_call() {
 115   set_destination_mt_safe(plt_resolve_call());
 116 }
 117 
 118 void NativePltCall::set_destination_mt_safe(address dest) {
 119   // rewriting the value in the GOT, it should always be aligned
 120   NativeGotJump* jump = nativeGotJump_at(plt_jump());
 121   address* got = (address *) jump->got_address();
 122   *got = dest;
 123 }
 124 
 125 void NativePltCall::set_stub_to_clean() {
 126   NativeLoadGot* method_loader = nativeLoadGot_at(plt_c2i_stub());
 127   NativeGotJump* jump          = nativeGotJump_at(method_loader->next_instruction_address());
 128   method_loader->set_data(0);
 129   jump->set_jump_destination((address)-1);
 130 }
 131 
 132 void NativePltCall::verify() const {
 133   assert(NativeCall::is_call_at((address)this), "unexpected code at call site");
 134 }
 135 
 136 address NativeGotJump::got_address() const {
 137   return MacroAssembler::target_addr_for_insn((address)this);
 138 }
 139 
 140 address NativeGotJump::destination() const {
 141   address *got_entry = (address *) got_address();
 142   return *got_entry;
 143 }
 144 
 145 bool NativeGotJump::is_GotJump() const {
 146   NativeInstruction *insn =
 147     nativeInstruction_at(addr_at(3 * NativeInstruction::instruction_size));
 148   return insn->encoding() == 0xd61f0200; // br x16
 149 }
 150 
 151 void NativeGotJump::verify() const {
 152   assert(is_adrp_at((address)this), "must be adrp");
 153 }
 154 
 155 address NativeCall::destination() const {
 156   address addr = (address)this;
 157   address destination = instruction_address() + displacement();
 158 
 159   // Do we use a trampoline stub for this call?
 160   CodeBlob* cb = CodeCache::find_blob_unsafe(addr);   // Else we get assertion if nmethod is zombie.
 161   assert(cb && cb->is_nmethod(), "sanity");
 162   nmethod *nm = (nmethod *)cb;
 163   if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
 164     // Yes we do, so get the destination from the trampoline stub.
 165     const address trampoline_stub_addr = destination;
 166     destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination();
 167   }
 168 
 169   return destination;
 170 }
 171 
 172 // Similar to replace_mt_safe, but just changes the destination. The
 173 // important thing is that free-running threads are able to execute this
 174 // call instruction at all times.
 175 //
 176 // Used in the runtime linkage of calls; see class CompiledIC.
 177 //
 178 // Add parameter assert_lock to switch off assertion
 179 // during code generation, where no patching lock is needed.
 180 void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
 181   assert(!assert_lock ||
 182          (Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) ||
 183          CompiledICLocker::is_safe(addr_at(0)),
 184          "concurrent code patching");
 185 
 186   ResourceMark rm;
 187   int code_size = NativeInstruction::instruction_size;
 188   address addr_call = addr_at(0);
 189   bool reachable = Assembler::reachable_from_branch_at(addr_call, dest);
 190   assert(NativeCall::is_call_at(addr_call), "unexpected code at call site");
 191 
 192   // Patch the constant in the call's trampoline stub.
 193   address trampoline_stub_addr = get_trampoline();
 194   if (trampoline_stub_addr != NULL) {
 195     assert (! is_NativeCallTrampolineStub_at(dest), "chained trampolines");
 196     nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
 197   }
 198 
 199   // Patch the call.
 200   if (reachable) {
 201     set_destination(dest);
 202   } else {
 203     assert (trampoline_stub_addr != NULL, "we need a trampoline");
 204     set_destination(trampoline_stub_addr);
 205   }
 206 
 207   ICache::invalidate_range(addr_call, instruction_size);
 208 }
 209 
 210 address NativeCall::get_trampoline() {
 211   address call_addr = addr_at(0);
 212 
 213   CodeBlob *code = CodeCache::find_blob(call_addr);
 214   assert(code != NULL, "Could not find the containing code blob");
 215 
 216   address bl_destination
 217     = MacroAssembler::pd_call_destination(call_addr);
 218   if (code->contains(bl_destination) &&
 219       is_NativeCallTrampolineStub_at(bl_destination))
 220     return bl_destination;
 221 
 222   if (code->is_nmethod()) {
 223     return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code);
 224   }
 225 
 226   return NULL;
 227 }
 228 
 229 // Inserts a native call instruction at a given pc
 230 void NativeCall::insert(address code_pos, address entry) { Unimplemented(); }
 231 
 232 //-------------------------------------------------------------------
 233 
 234 void NativeMovConstReg::verify() {
 235   if (! (nativeInstruction_at(instruction_address())->is_movz() ||
 236         is_adrp_at(instruction_address()) ||
 237         is_ldr_literal_at(instruction_address())) ) {
 238     fatal("should be MOVZ or ADRP or LDR (literal)");
 239   }
 240 }
 241 
 242 
 243 intptr_t NativeMovConstReg::data() const {
 244   // das(uint64_t(instruction_address()),2);
 245   address addr = MacroAssembler::target_addr_for_insn(instruction_address());
 246   if (maybe_cpool_ref(instruction_address())) {
 247     return *(intptr_t*)addr;
 248   } else {
 249     return (intptr_t)addr;
 250   }
 251 }
 252 
 253 void NativeMovConstReg::set_data(intptr_t x) {
 254   if (maybe_cpool_ref(instruction_address())) {
 255     address addr = MacroAssembler::target_addr_for_insn(instruction_address());
 256     *(intptr_t*)addr = x;
 257   } else {
 258     // Store x into the instruction stream.
 259     MacroAssembler::pd_patch_instruction(instruction_address(), (address)x);
 260     ICache::invalidate_range(instruction_address(), instruction_size);
 261   }
 262 
 263   // Find and replace the oop/metadata corresponding to this
 264   // instruction in oops section.
 265   CodeBlob* cb = CodeCache::find_blob(instruction_address());
 266   nmethod* nm = cb->as_nmethod_or_null();
 267   if (nm != NULL) {
 268     RelocIterator iter(nm, instruction_address(), next_instruction_address());
 269     while (iter.next()) {
 270       if (iter.type() == relocInfo::oop_type) {
 271         oop* oop_addr = iter.oop_reloc()->oop_addr();
 272         *oop_addr = cast_to_oop(x);
 273         break;
 274       } else if (iter.type() == relocInfo::metadata_type) {
 275         Metadata** metadata_addr = iter.metadata_reloc()->metadata_addr();
 276         *metadata_addr = (Metadata*)x;
 277         break;
 278       }
 279     }
 280   }
 281 }
 282 
 283 void NativeMovConstReg::print() {
 284   tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
 285                 p2i(instruction_address()), data());
 286 }
 287 
 288 //-------------------------------------------------------------------
 289 
 290 address NativeMovRegMem::instruction_address() const      { return addr_at(instruction_offset); }
 291 
 292 int NativeMovRegMem::offset() const  {
 293   address pc = instruction_address();
 294   unsigned insn = *(unsigned*)pc;
 295   if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) {
 296     address addr = MacroAssembler::target_addr_for_insn(pc);
 297     return *addr;
 298   } else {
 299     return (int)(intptr_t)MacroAssembler::target_addr_for_insn(instruction_address());
 300   }
 301 }
 302 
 303 void NativeMovRegMem::set_offset(int x) {
 304   address pc = instruction_address();
 305   unsigned insn = *(unsigned*)pc;
 306   if (maybe_cpool_ref(pc)) {
 307     address addr = MacroAssembler::target_addr_for_insn(pc);
 308     *(long*)addr = x;
 309   } else {
 310     MacroAssembler::pd_patch_instruction(pc, (address)intptr_t(x));
 311     ICache::invalidate_range(instruction_address(), instruction_size);
 312   }
 313 }
 314 
 315 void NativeMovRegMem::verify() {
 316 #ifdef ASSERT
 317   address dest = MacroAssembler::target_addr_for_insn(instruction_address());
 318 #endif
 319 }
 320 
 321 //--------------------------------------------------------------------------------
 322 
 323 void NativeJump::verify() { ; }
 324 
 325 
 326 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
 327 }
 328 
 329 
 330 address NativeJump::jump_destination() const          {
 331   address dest = MacroAssembler::target_addr_for_insn(instruction_address());
 332 
 333   // We use jump to self as the unresolved address which the inline
 334   // cache code (and relocs) know about
 335   // As a special case we also use sequence movptr(r,0); br(r);
 336   // i.e. jump to 0 when we need leave space for a wide immediate
 337   // load
 338 
 339   // return -1 if jump to self or to 0
 340   if ((dest == (address)this) || dest == 0) {
 341     dest = (address) -1;
 342   }
 343   return dest;
 344 }
 345 
 346 void NativeJump::set_jump_destination(address dest) {
 347   // We use jump to self as the unresolved address which the inline
 348   // cache code (and relocs) know about
 349   if (dest == (address) -1)
 350     dest = instruction_address();
 351 
 352   MacroAssembler::pd_patch_instruction(instruction_address(), dest);
 353   ICache::invalidate_range(instruction_address(), instruction_size);
 354 };
 355 
 356 //-------------------------------------------------------------------
 357 
 358 address NativeGeneralJump::jump_destination() const {
 359   NativeMovConstReg* move = nativeMovConstReg_at(instruction_address());
 360   address dest = (address) move->data();
 361 
 362   // We use jump to self as the unresolved address which the inline
 363   // cache code (and relocs) know about
 364   // As a special case we also use jump to 0 when first generating
 365   // a general jump
 366 
 367   // return -1 if jump to self or to 0
 368   if ((dest == (address)this) || dest == 0) {
 369     dest = (address) -1;
 370   }
 371   return dest;
 372 }
 373 
 374 void NativeGeneralJump::set_jump_destination(address dest) {
 375   NativeMovConstReg* move = nativeMovConstReg_at(instruction_address());
 376 
 377   // We use jump to self as the unresolved address which the inline
 378   // cache code (and relocs) know about
 379   if (dest == (address) -1) {
 380     dest = instruction_address();
 381   }
 382 
 383   move->set_data((uintptr_t) dest);
 384 };
 385 
 386 //-------------------------------------------------------------------
 387 
 388 bool NativeInstruction::is_safepoint_poll() {
 389   // a safepoint_poll is implemented in two steps as either
 390   //
 391   // adrp(reg, polling_page);
 392   // ldr(zr, [reg, #offset]);
 393   //
 394   // or
 395   //
 396   // mov(reg, polling_page);
 397   // ldr(zr, [reg, #offset]);
 398   //
 399   // or
 400   //
 401   // ldr(reg, [rthread, #offset]);
 402   // ldr(zr, [reg, #offset]);
 403   //
 404   // however, we cannot rely on the polling page address load always
 405   // directly preceding the read from the page. C1 does that but C2
 406   // has to do the load and read as two independent instruction
 407   // generation steps. that's because with a single macro sequence the
 408   // generic C2 code can only add the oop map before the mov/adrp and
 409   // the trap handler expects an oop map to be associated with the
 410   // load. with the load scheuled as a prior step the oop map goes
 411   // where it is needed.
 412   //
 413   // so all we can do here is check that marked instruction is a load
 414   // word to zr
 415   return is_ldrw_to_zr(address(this));
 416 }
 417 
 418 bool NativeInstruction::is_adrp_at(address instr) {
 419   unsigned insn = *(unsigned*)instr;
 420   return (Instruction_aarch64::extract(insn, 31, 24) & 0b10011111) == 0b10010000;
 421 }
 422 
 423 bool NativeInstruction::is_ldr_literal_at(address instr) {
 424   unsigned insn = *(unsigned*)instr;
 425   return (Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000;
 426 }
 427 
 428 bool NativeInstruction::is_ldrw_to_zr(address instr) {
 429   unsigned insn = *(unsigned*)instr;
 430   return (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
 431           Instruction_aarch64::extract(insn, 4, 0) == 0b11111);
 432 }
 433 
 434 bool NativeInstruction::is_general_jump() {
 435   if (is_movz()) {
 436     NativeInstruction* inst1 = nativeInstruction_at(addr_at(instruction_size * 1));
 437     if (inst1->is_movk()) {
 438       NativeInstruction* inst2 = nativeInstruction_at(addr_at(instruction_size * 2));
 439       if (inst2->is_movk()) {
 440         NativeInstruction* inst3 = nativeInstruction_at(addr_at(instruction_size * 3));
 441         if (inst3->is_blr()) {
 442           return true;
 443         }
 444       }
 445     }
 446   }
 447   return false;
 448 }
 449 
 450 bool NativeInstruction::is_movz() {
 451   return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b10100101;
 452 }
 453 
 454 bool NativeInstruction::is_movk() {
 455   return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b11100101;
 456 }
 457 
 458 bool NativeInstruction::is_sigill_zombie_not_entrant() {
 459   return uint_at(0) == 0xd4bbd5a1; // dcps1 #0xdead
 460 }
 461 
 462 void NativeIllegalInstruction::insert(address code_pos) {
 463   *(juint*)code_pos = 0xd4bbd5a1; // dcps1 #0xdead
 464 }
 465 
 466 //-------------------------------------------------------------------
 467 
 468 // MT-safe inserting of a jump over a jump or a nop (used by
 469 // nmethod::make_not_entrant_or_zombie)
 470 
 471 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
 472 
 473   assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
 474 
 475 #ifdef ASSERT
 476   // This may be the temporary nmethod generated while we're AOT
 477   // compiling.  Such an nmethod doesn't begin with a NOP but with an ADRP.
 478   if (! (CalculateClassFingerprint && UseAOT && is_adrp_at(verified_entry))) {
 479     assert(nativeInstruction_at(verified_entry)->is_jump_or_nop()
 480            || nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(),
 481            "Aarch64 cannot replace non-jump with jump");
 482   }
 483 #endif
 484 
 485   // Patch this nmethod atomically.
 486   if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
 487     ptrdiff_t disp = dest - verified_entry;
 488     guarantee(disp < 1 << 27 && disp > - (1 << 27), "branch overflow");
 489 
 490     unsigned int insn = (0b000101 << 26) | ((disp >> 2) & 0x3ffffff);
 491     *(unsigned int*)verified_entry = insn;
 492   } else {
 493     // We use an illegal instruction for marking a method as
 494     // not_entrant or zombie.
 495     NativeIllegalInstruction::insert(verified_entry);
 496   }
 497 
 498   ICache::invalidate_range(verified_entry, instruction_size);
 499 }
 500 
 501 void NativeGeneralJump::verify() {  }
 502 
 503 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
 504   NativeGeneralJump* n_jump = (NativeGeneralJump*)code_pos;
 505 
 506   CodeBuffer cb(code_pos, instruction_size);
 507   MacroAssembler a(&cb);
 508 
 509   a.movptr(rscratch1, (uintptr_t)entry);
 510   a.br(rscratch1);
 511 
 512   ICache::invalidate_range(code_pos, instruction_size);
 513 }
 514 
 515 // MT-safe patching of a long jump instruction.
 516 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
 517   ShouldNotCallThis();
 518 }
 519 
 520 address NativeCallTrampolineStub::destination(nmethod *nm) const {
 521   return ptr_at(data_offset);
 522 }
 523 
 524 void NativeCallTrampolineStub::set_destination(address new_destination) {
 525   set_ptr_at(data_offset, new_destination);
 526   OrderAccess::release();
 527 }
 528 
 529 // Generate a trampoline for a branch to dest.  If there's no need for a
 530 // trampoline, simply patch the call directly to dest.
 531 address NativeCall::trampoline_jump(CodeBuffer &cbuf, address dest) {
 532   MacroAssembler a(&cbuf);
 533   address stub = NULL;
 534 
 535   if (a.far_branches()
 536       && ! is_NativeCallTrampolineStub_at(instruction_address() + displacement())) {
 537     stub = a.emit_trampoline_stub(instruction_address() - cbuf.insts()->start(), dest);
 538   }
 539 
 540   if (stub == NULL) {
 541     // If we generated no stub, patch this call directly to dest.
 542     // This will happen if we don't need far branches or if there
 543     // already was a trampoline.
 544     set_destination(dest);
 545   }
 546 
 547   return stub;
 548 }