1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.hpp"
  28 #include "code/compiledIC.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "nativeInst_aarch64.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/handles.hpp"
  33 #include "runtime/orderAccess.hpp"
  34 #include "runtime/sharedRuntime.hpp"
  35 #include "runtime/stubRoutines.hpp"
  36 #include "utilities/ostream.hpp"
  37 #ifdef COMPILER1
  38 #include "c1/c1_Runtime1.hpp"
  39 #endif
  40 
  41 void NativeCall::verify() {
  42   assert(NativeCall::is_call_at((address)this), "unexpected code at call site");
  43 }
  44 
  45 void NativeInstruction::wrote(int offset) {
  46   ICache::invalidate_word(addr_at(offset));
  47 }
  48 
  49 void NativeLoadGot::report_and_fail() const {
  50   tty->print_cr("Addr: " INTPTR_FORMAT, p2i(instruction_address()));
  51   fatal("not a indirect rip mov to rbx");
  52 }
  53 
  54 void NativeLoadGot::verify() const {
  55   assert(is_adrp_at((address)this), "must be adrp");
  56 }
  57 
  58 address NativeLoadGot::got_address() const {
  59   return MacroAssembler::target_addr_for_insn((address)this);
  60 }
  61 
  62 intptr_t NativeLoadGot::data() const {
  63   return *(intptr_t *) got_address();
  64 }
  65 
  66 address NativePltCall::destination() const {
  67   NativeGotJump* jump = nativeGotJump_at(plt_jump());
  68   return *(address*)MacroAssembler::target_addr_for_insn((address)jump);
  69 }
  70 
  71 address NativePltCall::plt_entry() const {
  72   return MacroAssembler::target_addr_for_insn((address)this);
  73 }
  74 
  75 address NativePltCall::plt_jump() const {
  76   address entry = plt_entry();
  77   // Virtual PLT code has move instruction first
  78   if (((NativeGotJump*)entry)->is_GotJump()) {
  79     return entry;
  80   } else {
  81     return nativeLoadGot_at(entry)->next_instruction_address();
  82   }
  83 }
  84 
  85 address NativePltCall::plt_load_got() const {
  86   address entry = plt_entry();
  87   if (!((NativeGotJump*)entry)->is_GotJump()) {
  88     // Virtual PLT code has move instruction first
  89     return entry;
  90   } else {
  91     // Static PLT code has move instruction second (from c2i stub)
  92     return nativeGotJump_at(entry)->next_instruction_address();
  93   }
  94 }
  95 
  96 address NativePltCall::plt_c2i_stub() const {
  97   address entry = plt_load_got();
  98   // This method should be called only for static calls which has C2I stub.
  99   NativeLoadGot* load = nativeLoadGot_at(entry);
 100   return entry;
 101 }
 102 
 103 address NativePltCall::plt_resolve_call() const {
 104   NativeGotJump* jump = nativeGotJump_at(plt_jump());
 105   address entry = jump->next_instruction_address();
 106   if (((NativeGotJump*)entry)->is_GotJump()) {
 107     return entry;
 108   } else {
 109     // c2i stub 2 instructions
 110     entry = nativeLoadGot_at(entry)->next_instruction_address();
 111     return nativeGotJump_at(entry)->next_instruction_address();
 112   }
 113 }
 114 
 115 void NativePltCall::reset_to_plt_resolve_call() {
 116   set_destination_mt_safe(plt_resolve_call());
 117 }
 118 
 119 void NativePltCall::set_destination_mt_safe(address dest) {
 120   // rewriting the value in the GOT, it should always be aligned
 121   NativeGotJump* jump = nativeGotJump_at(plt_jump());
 122   address* got = (address *) jump->got_address();
 123   *got = dest;
 124 }
 125 
 126 void NativePltCall::set_stub_to_clean() {
 127   NativeLoadGot* method_loader = nativeLoadGot_at(plt_c2i_stub());
 128   NativeGotJump* jump          = nativeGotJump_at(method_loader->next_instruction_address());
 129   method_loader->set_data(0);
 130   jump->set_jump_destination((address)-1);
 131 }
 132 
 133 void NativePltCall::verify() const {
 134   assert(NativeCall::is_call_at((address)this), "unexpected code at call site");
 135 }
 136 
 137 address NativeGotJump::got_address() const {
 138   return MacroAssembler::target_addr_for_insn((address)this);
 139 }
 140 
 141 address NativeGotJump::destination() const {
 142   address *got_entry = (address *) got_address();
 143   return *got_entry;
 144 }
 145 
 146 bool NativeGotJump::is_GotJump() const {
 147   NativeInstruction *insn =
 148     nativeInstruction_at(addr_at(3 * NativeInstruction::instruction_size));
 149   return insn->encoding() == 0xd61f0200; // br x16
 150 }
 151 
 152 void NativeGotJump::verify() const {
 153   assert(is_adrp_at((address)this), "must be adrp");
 154 }
 155 
 156 address NativeCall::destination() const {
 157   address addr = (address)this;
 158   address destination = instruction_address() + displacement();
 159 
 160   // Do we use a trampoline stub for this call?
 161   CodeBlob* cb = CodeCache::find_blob_unsafe(addr);   // Else we get assertion if nmethod is zombie.
 162   assert(cb && cb->is_nmethod(), "sanity");
 163   nmethod *nm = (nmethod *)cb;
 164   if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
 165     // Yes we do, so get the destination from the trampoline stub.
 166     const address trampoline_stub_addr = destination;
 167     destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination();
 168   }
 169 
 170   return destination;
 171 }
 172 
 173 // Similar to replace_mt_safe, but just changes the destination. The
 174 // important thing is that free-running threads are able to execute this
 175 // call instruction at all times.
 176 //
 177 // Used in the runtime linkage of calls; see class CompiledIC.
 178 //
 179 // Add parameter assert_lock to switch off assertion
 180 // during code generation, where no patching lock is needed.
 181 void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
 182   assert(!assert_lock ||
 183          (Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) ||
 184          CompiledICLocker::is_safe(addr_at(0)),
 185          "concurrent code patching");
 186 
 187   ResourceMark rm;
 188   int code_size = NativeInstruction::instruction_size;
 189   address addr_call = addr_at(0);
 190   bool reachable = Assembler::reachable_from_branch_at(addr_call, dest);
 191   assert(NativeCall::is_call_at(addr_call), "unexpected code at call site");
 192 
 193   // Patch the constant in the call's trampoline stub.
 194   address trampoline_stub_addr = get_trampoline();
 195   if (trampoline_stub_addr != NULL) {
 196     assert (! is_NativeCallTrampolineStub_at(dest), "chained trampolines");
 197     nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
 198   }
 199 
 200   // Patch the call.
 201   if (reachable) {
 202     set_destination(dest);
 203   } else {
 204     assert (trampoline_stub_addr != NULL, "we need a trampoline");
 205     set_destination(trampoline_stub_addr);
 206   }
 207 
 208   ICache::invalidate_range(addr_call, instruction_size);
 209 }
 210 
 211 address NativeCall::get_trampoline() {
 212   address call_addr = addr_at(0);
 213 
 214   CodeBlob *code = CodeCache::find_blob(call_addr);
 215   assert(code != NULL, "Could not find the containing code blob");
 216 
 217   address bl_destination
 218     = MacroAssembler::pd_call_destination(call_addr);
 219   if (code->contains(bl_destination) &&
 220       is_NativeCallTrampolineStub_at(bl_destination))
 221     return bl_destination;
 222 
 223   if (code->is_nmethod()) {
 224     return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code);
 225   }
 226 
 227   return NULL;
 228 }
 229 
 230 // Inserts a native call instruction at a given pc
 231 void NativeCall::insert(address code_pos, address entry) { Unimplemented(); }
 232 
 233 //-------------------------------------------------------------------
 234 
 235 void NativeMovConstReg::verify() {
 236   if (! (nativeInstruction_at(instruction_address())->is_movz() ||
 237         is_adrp_at(instruction_address()) ||
 238         is_ldr_literal_at(instruction_address())) ) {
 239     fatal("should be MOVZ or ADRP or LDR (literal)");
 240   }
 241 }
 242 
 243 
 244 intptr_t NativeMovConstReg::data() const {
 245   // das(uint64_t(instruction_address()),2);
 246   address addr = MacroAssembler::target_addr_for_insn(instruction_address());
 247   if (maybe_cpool_ref(instruction_address())) {
 248     return *(intptr_t*)addr;
 249   } else {
 250     return (intptr_t)addr;
 251   }
 252 }
 253 
 254 void NativeMovConstReg::set_data(intptr_t x) {
 255   if (maybe_cpool_ref(instruction_address())) {
 256     address addr = MacroAssembler::target_addr_for_insn(instruction_address());
 257     *(intptr_t*)addr = x;
 258   } else {
 259     // Store x into the instruction stream.
 260     MacroAssembler::pd_patch_instruction(instruction_address(), (address)x);
 261     ICache::invalidate_range(instruction_address(), instruction_size);
 262   }
 263 
 264   // Find and replace the oop/metadata corresponding to this
 265   // instruction in oops section.
 266   CodeBlob* cb = CodeCache::find_blob(instruction_address());
 267   nmethod* nm = cb->as_nmethod_or_null();
 268   if (nm != NULL) {
 269     RelocIterator iter(nm, instruction_address(), next_instruction_address());
 270     while (iter.next()) {
 271       if (iter.type() == relocInfo::oop_type) {
 272         oop* oop_addr = iter.oop_reloc()->oop_addr();
 273         *oop_addr = cast_to_oop(x);
 274         break;
 275       } else if (iter.type() == relocInfo::metadata_type) {
 276         Metadata** metadata_addr = iter.metadata_reloc()->metadata_addr();
 277         *metadata_addr = (Metadata*)x;
 278         break;
 279       }
 280     }
 281   }
 282 }
 283 
 284 void NativeMovConstReg::print() {
 285   tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
 286                 p2i(instruction_address()), data());
 287 }
 288 
 289 //-------------------------------------------------------------------
 290 
 291 int NativeMovRegMem::offset() const  {
 292   address pc = instruction_address();
 293   unsigned insn = *(unsigned*)pc;
 294   if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) {
 295     address addr = MacroAssembler::target_addr_for_insn(pc);
 296     return *addr;
 297   } else {
 298     return (int)(intptr_t)MacroAssembler::target_addr_for_insn(instruction_address());
 299   }
 300 }
 301 
 302 void NativeMovRegMem::set_offset(int x) {
 303   address pc = instruction_address();
 304   unsigned insn = *(unsigned*)pc;
 305   if (maybe_cpool_ref(pc)) {
 306     address addr = MacroAssembler::target_addr_for_insn(pc);
 307     *(long*)addr = x;
 308   } else {
 309     MacroAssembler::pd_patch_instruction(pc, (address)intptr_t(x));
 310     ICache::invalidate_range(instruction_address(), instruction_size);
 311   }
 312 }
 313 
 314 void NativeMovRegMem::verify() {
 315 #ifdef ASSERT
 316   address dest = MacroAssembler::target_addr_for_insn(instruction_address());
 317 #endif
 318 }
 319 
 320 //--------------------------------------------------------------------------------
 321 
 322 void NativeJump::verify() { ; }
 323 
 324 
 325 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
 326 }
 327 
 328 
 329 address NativeJump::jump_destination() const          {
 330   address dest = MacroAssembler::target_addr_for_insn(instruction_address());
 331 
 332   // We use jump to self as the unresolved address which the inline
 333   // cache code (and relocs) know about
 334   // As a special case we also use sequence movptr(r,0); br(r);
 335   // i.e. jump to 0 when we need leave space for a wide immediate
 336   // load
 337 
 338   // return -1 if jump to self or to 0
 339   if ((dest == (address)this) || dest == 0) {
 340     dest = (address) -1;
 341   }
 342   return dest;
 343 }
 344 
 345 void NativeJump::set_jump_destination(address dest) {
 346   // We use jump to self as the unresolved address which the inline
 347   // cache code (and relocs) know about
 348   if (dest == (address) -1)
 349     dest = instruction_address();
 350 
 351   MacroAssembler::pd_patch_instruction(instruction_address(), dest);
 352   ICache::invalidate_range(instruction_address(), instruction_size);
 353 };
 354 
 355 //-------------------------------------------------------------------
 356 
 357 address NativeGeneralJump::jump_destination() const {
 358   NativeMovConstReg* move = nativeMovConstReg_at(instruction_address());
 359   address dest = (address) move->data();
 360 
 361   // We use jump to self as the unresolved address which the inline
 362   // cache code (and relocs) know about
 363   // As a special case we also use jump to 0 when first generating
 364   // a general jump
 365 
 366   // return -1 if jump to self or to 0
 367   if ((dest == (address)this) || dest == 0) {
 368     dest = (address) -1;
 369   }
 370   return dest;
 371 }
 372 
 373 void NativeGeneralJump::set_jump_destination(address dest) {
 374   NativeMovConstReg* move = nativeMovConstReg_at(instruction_address());
 375 
 376   // We use jump to self as the unresolved address which the inline
 377   // cache code (and relocs) know about
 378   if (dest == (address) -1) {
 379     dest = instruction_address();
 380   }
 381 
 382   move->set_data((uintptr_t) dest);
 383 };
 384 
 385 //-------------------------------------------------------------------
 386 
 387 bool NativeInstruction::is_safepoint_poll() {
 388   // a safepoint_poll is implemented in two steps as either
 389   //
 390   // adrp(reg, polling_page);
 391   // ldr(zr, [reg, #offset]);
 392   //
 393   // or
 394   //
 395   // mov(reg, polling_page);
 396   // ldr(zr, [reg, #offset]);
 397   //
 398   // or
 399   //
 400   // ldr(reg, [rthread, #offset]);
 401   // ldr(zr, [reg, #offset]);
 402   //
 403   // however, we cannot rely on the polling page address load always
 404   // directly preceding the read from the page. C1 does that but C2
 405   // has to do the load and read as two independent instruction
 406   // generation steps. that's because with a single macro sequence the
 407   // generic C2 code can only add the oop map before the mov/adrp and
 408   // the trap handler expects an oop map to be associated with the
 409   // load. with the load scheuled as a prior step the oop map goes
 410   // where it is needed.
 411   //
 412   // so all we can do here is check that marked instruction is a load
 413   // word to zr
 414   return is_ldrw_to_zr(address(this));
 415 }
 416 
 417 bool NativeInstruction::is_adrp_at(address instr) {
 418   unsigned insn = *(unsigned*)instr;
 419   return (Instruction_aarch64::extract(insn, 31, 24) & 0b10011111) == 0b10010000;
 420 }
 421 
 422 bool NativeInstruction::is_ldr_literal_at(address instr) {
 423   unsigned insn = *(unsigned*)instr;
 424   return (Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000;
 425 }
 426 
 427 bool NativeInstruction::is_ldrw_to_zr(address instr) {
 428   unsigned insn = *(unsigned*)instr;
 429   return (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
 430           Instruction_aarch64::extract(insn, 4, 0) == 0b11111);
 431 }
 432 
 433 bool NativeInstruction::is_general_jump() {
 434   if (is_movz()) {
 435     NativeInstruction* inst1 = nativeInstruction_at(addr_at(instruction_size * 1));
 436     if (inst1->is_movk()) {
 437       NativeInstruction* inst2 = nativeInstruction_at(addr_at(instruction_size * 2));
 438       if (inst2->is_movk()) {
 439         NativeInstruction* inst3 = nativeInstruction_at(addr_at(instruction_size * 3));
 440         if (inst3->is_blr()) {
 441           return true;
 442         }
 443       }
 444     }
 445   }
 446   return false;
 447 }
 448 
 449 bool NativeInstruction::is_movz() {
 450   return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b10100101;
 451 }
 452 
 453 bool NativeInstruction::is_movk() {
 454   return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b11100101;
 455 }
 456 
 457 bool NativeInstruction::is_sigill_zombie_not_entrant() {
 458   return uint_at(0) == 0xd4bbd5a1; // dcps1 #0xdead
 459 }
 460 
 461 void NativeIllegalInstruction::insert(address code_pos) {
 462   *(juint*)code_pos = 0xd4bbd5a1; // dcps1 #0xdead
 463 }
 464 
 465 bool NativeInstruction::is_stop() {
 466   return uint_at(0) == 0xd4bbd5c1; // dcps1 #0xdeae
 467 }
 468 
 469 //-------------------------------------------------------------------
 470 
 471 // MT-safe inserting of a jump over a jump or a nop (used by
 472 // nmethod::make_not_entrant_or_zombie)
 473 
 474 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
 475 
 476   assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
 477 
 478 #ifdef ASSERT
 479   // This may be the temporary nmethod generated while we're AOT
 480   // compiling.  Such an nmethod doesn't begin with a NOP but with an ADRP.
 481   if (! (CalculateClassFingerprint && UseAOT && is_adrp_at(verified_entry))) {
 482     assert(nativeInstruction_at(verified_entry)->is_jump_or_nop()
 483            || nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(),
 484            "Aarch64 cannot replace non-jump with jump");
 485   }
 486 #endif
 487 
 488   // Patch this nmethod atomically.
 489   if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
 490     ptrdiff_t disp = dest - verified_entry;
 491     guarantee(disp < 1 << 27 && disp > - (1 << 27), "branch overflow");
 492 
 493     unsigned int insn = (0b000101 << 26) | ((disp >> 2) & 0x3ffffff);
 494     *(unsigned int*)verified_entry = insn;
 495   } else {
 496     // We use an illegal instruction for marking a method as
 497     // not_entrant or zombie.
 498     NativeIllegalInstruction::insert(verified_entry);
 499   }
 500 
 501   ICache::invalidate_range(verified_entry, instruction_size);
 502 }
 503 
 504 void NativeGeneralJump::verify() {  }
 505 
 506 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
 507   NativeGeneralJump* n_jump = (NativeGeneralJump*)code_pos;
 508 
 509   CodeBuffer cb(code_pos, instruction_size);
 510   MacroAssembler a(&cb);
 511 
 512   a.movptr(rscratch1, (uintptr_t)entry);
 513   a.br(rscratch1);
 514 
 515   ICache::invalidate_range(code_pos, instruction_size);
 516 }
 517 
 518 // MT-safe patching of a long jump instruction.
 519 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
 520   ShouldNotCallThis();
 521 }
 522 
 523 address NativeCallTrampolineStub::destination(nmethod *nm) const {
 524   return ptr_at(data_offset);
 525 }
 526 
 527 void NativeCallTrampolineStub::set_destination(address new_destination) {
 528   set_ptr_at(data_offset, new_destination);
 529   OrderAccess::release();
 530 }
 531 
 532 // Generate a trampoline for a branch to dest.  If there's no need for a
 533 // trampoline, simply patch the call directly to dest.
 534 address NativeCall::trampoline_jump(CodeBuffer &cbuf, address dest) {
 535   MacroAssembler a(&cbuf);
 536   address stub = NULL;
 537 
 538   if (a.far_branches()
 539       && ! is_NativeCallTrampolineStub_at(instruction_address() + displacement())) {
 540     stub = a.emit_trampoline_stub(instruction_address() - cbuf.insts()->start(), dest);
 541   }
 542 
 543   if (stub == NULL) {
 544     // If we generated no stub, patch this call directly to dest.
 545     // This will happen if we don't need far branches or if there
 546     // already was a trampoline.
 547     set_destination(dest);
 548   }
 549 
 550   return stub;
 551 }