1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "nativeInst_aarch64.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/handles.hpp"
  32 #include "runtime/sharedRuntime.hpp"
  33 #include "runtime/stubRoutines.hpp"
  34 #include "utilities/ostream.hpp"
  35 #ifdef COMPILER1
  36 #include "c1/c1_Runtime1.hpp"
  37 #endif
  38 
  39 void NativeCall::verify() { ; }
  40 
  41 address NativeCall::destination() const {
  42   address addr = (address)this;
  43   address destination = instruction_address() + displacement();
  44 
  45   // Do we use a trampoline stub for this call?
  46   CodeBlob* cb = CodeCache::find_blob_unsafe(addr);   // Else we get assertion if nmethod is zombie.
  47   assert(cb && cb->is_nmethod(), "sanity");
  48   nmethod *nm = (nmethod *)cb;
  49   if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
  50     // Yes we do, so get the destination from the trampoline stub.
  51     const address trampoline_stub_addr = destination;
  52     destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination();
  53   }
  54 
  55   return destination;
  56 }
  57 
  58 // Similar to replace_mt_safe, but just changes the destination. The
  59 // important thing is that free-running threads are able to execute this
  60 // call instruction at all times.
  61 //
  62 // Used in the runtime linkage of calls; see class CompiledIC.
  63 //
  64 // Add parameter assert_lock to switch off assertion
  65 // during code generation, where no patching lock is needed.
  66 void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
  67   assert(!assert_lock ||
  68          (Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()),
  69          "concurrent code patching");
  70 
  71   ResourceMark rm;
  72   int code_size = NativeInstruction::instruction_size;
  73   address addr_call = addr_at(0);
  74   assert(NativeCall::is_call_at(addr_call), "unexpected code at call site");
  75 
  76   // Patch the constant in the call's trampoline stub.
  77   address trampoline_stub_addr = get_trampoline();
  78   if (trampoline_stub_addr != NULL) {
  79     assert (! is_NativeCallTrampolineStub_at(dest), "chained trampolines");
  80     nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
  81   }
  82 
  83   // Patch the call.
  84   if (Assembler::reachable_from_branch_at(addr_call, dest)) {
  85     set_destination(dest);
  86   } else {
  87     assert (trampoline_stub_addr != NULL, "we need a trampoline");
  88     set_destination(trampoline_stub_addr);
  89   }
  90 
  91   ICache::invalidate_range(addr_call, instruction_size);
  92 }
  93 
  94 address NativeCall::get_trampoline() {
  95   address call_addr = addr_at(0);
  96 
  97   CodeBlob *code = CodeCache::find_blob(call_addr);
  98   assert(code != NULL, "Could not find the containing code blob");
  99 
 100   address bl_destination
 101     = MacroAssembler::pd_call_destination(call_addr);
 102   if (code->contains(bl_destination) &&
 103       is_NativeCallTrampolineStub_at(bl_destination))
 104     return bl_destination;
 105 
 106   // If the codeBlob is not a nmethod, this is because we get here from the
 107   // CodeBlob constructor, which is called within the nmethod constructor.
 108   return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code);
 109 }
 110 
 111 // Inserts a native call instruction at a given pc
 112 void NativeCall::insert(address code_pos, address entry) { Unimplemented(); }
 113 
 114 //-------------------------------------------------------------------
 115 
 116 void NativeMovConstReg::verify() {
 117   // make sure code pattern is actually mov reg64, imm64 instructions
 118 }
 119 
 120 
 121 intptr_t NativeMovConstReg::data() const {
 122   // das(uint64_t(instruction_address()),2);
 123   address addr = MacroAssembler::target_addr_for_insn(instruction_address());
 124   if (maybe_cpool_ref(instruction_address())) {
 125     return *(intptr_t*)addr;
 126   } else {
 127     return (intptr_t)addr;
 128   }
 129 }
 130 
 131 void NativeMovConstReg::set_data(intptr_t x) {
 132   if (maybe_cpool_ref(instruction_address())) {
 133     address addr = MacroAssembler::target_addr_for_insn(instruction_address());
 134     *(intptr_t*)addr = x;
 135   } else {
 136     // Store x into the instruction stream.
 137     MacroAssembler::pd_patch_instruction(instruction_address(), (address)x);
 138     ICache::invalidate_range(instruction_address(), instruction_size);
 139   }
 140 
 141   // Find and replace the oop/metadata corresponding to this
 142   // instruction in oops section.
 143   CodeBlob* cb = CodeCache::find_blob(instruction_address());
 144   nmethod* nm = cb->as_nmethod_or_null();
 145   if (nm != NULL) {
 146     RelocIterator iter(nm, instruction_address(), next_instruction_address());
 147     while (iter.next()) {
 148       if (iter.type() == relocInfo::oop_type) {
 149         oop* oop_addr = iter.oop_reloc()->oop_addr();
 150         *oop_addr = cast_to_oop(x);
 151         break;
 152       } else if (iter.type() == relocInfo::metadata_type) {
 153         Metadata** metadata_addr = iter.metadata_reloc()->metadata_addr();
 154         *metadata_addr = (Metadata*)x;
 155         break;
 156       }
 157     }
 158   }
 159 }
 160 
 161 void NativeMovConstReg::print() {
 162   tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
 163                 p2i(instruction_address()), data());
 164 }
 165 
 166 //-------------------------------------------------------------------
 167 
 168 address NativeMovRegMem::instruction_address() const      { return addr_at(instruction_offset); }
 169 
 170 int NativeMovRegMem::offset() const  {
 171   address pc = instruction_address();
 172   unsigned insn = *(unsigned*)pc;
 173   if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) {
 174     address addr = MacroAssembler::target_addr_for_insn(pc);
 175     return *addr;
 176   } else {
 177     return (int)(intptr_t)MacroAssembler::target_addr_for_insn(instruction_address());
 178   }
 179 }
 180 
 181 void NativeMovRegMem::set_offset(int x) {
 182   address pc = instruction_address();
 183   unsigned insn = *(unsigned*)pc;
 184   if (maybe_cpool_ref(pc)) {
 185     address addr = MacroAssembler::target_addr_for_insn(pc);
 186     *(long*)addr = x;
 187   } else {
 188     MacroAssembler::pd_patch_instruction(pc, (address)intptr_t(x));
 189     ICache::invalidate_range(instruction_address(), instruction_size);
 190   }
 191 }
 192 
 193 void NativeMovRegMem::verify() {
 194 #ifdef ASSERT
 195   address dest = MacroAssembler::target_addr_for_insn(instruction_address());
 196 #endif
 197 }
 198 
 199 //--------------------------------------------------------------------------------
 200 
 201 void NativeJump::verify() { ; }
 202 
 203 
 204 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
 205 }
 206 
 207 
 208 address NativeJump::jump_destination() const          {
 209   address dest = MacroAssembler::target_addr_for_insn(instruction_address());
 210 
 211   // We use jump to self as the unresolved address which the inline
 212   // cache code (and relocs) know about
 213 
 214   // return -1 if jump to self
 215   dest = (dest == (address) this) ? (address) -1 : dest;
 216   return dest;
 217 }
 218 
 219 void NativeJump::set_jump_destination(address dest) {
 220   // We use jump to self as the unresolved address which the inline
 221   // cache code (and relocs) know about
 222   if (dest == (address) -1)
 223     dest = instruction_address();
 224 
 225   MacroAssembler::pd_patch_instruction(instruction_address(), dest);
 226   ICache::invalidate_range(instruction_address(), instruction_size);
 227 };
 228 
 229 //-------------------------------------------------------------------
 230 
 231 address NativeGeneralJump::jump_destination() const {
 232   NativeMovConstReg* move = nativeMovConstReg_at(instruction_address());
 233   address dest = (address) move->data();
 234 
 235   // We use jump to self as the unresolved address which the inline
 236   // cache code (and relocs) know about
 237 
 238   // return -1 if jump to self
 239   dest = (dest == (address) this) ? (address) -1 : dest;
 240   return dest;
 241 }
 242 
 243 void NativeGeneralJump::set_jump_destination(address dest) {
 244   NativeMovConstReg* move = nativeMovConstReg_at(instruction_address());
 245 
 246   // We use jump to self as the unresolved address which the inline
 247   // cache code (and relocs) know about
 248   if (dest == (address) -1) {
 249     dest = instruction_address();
 250   }
 251 
 252   move->set_data((uintptr_t) dest);
 253 };
 254 
 255 //-------------------------------------------------------------------
 256 
 257 bool NativeInstruction::is_safepoint_poll() {
 258   // a safepoint_poll is implemented in two steps as either
 259   //
 260   // adrp(reg, polling_page);
 261   // ldr(zr, [reg, #offset]);
 262   //
 263   // or
 264   //
 265   // mov(reg, polling_page);
 266   // ldr(zr, [reg, #offset]);
 267   //
 268   // or
 269   //
 270   // ldr(reg, [rthread, #offset]);
 271   // ldr(zr, [reg, #offset]);
 272   //
 273   // however, we cannot rely on the polling page address load always
 274   // directly preceding the read from the page. C1 does that but C2
 275   // has to do the load and read as two independent instruction
 276   // generation steps. that's because with a single macro sequence the
 277   // generic C2 code can only add the oop map before the mov/adrp and
 278   // the trap handler expects an oop map to be associated with the
 279   // load. with the load scheuled as a prior step the oop map goes
 280   // where it is needed.
 281   //
 282   // so all we can do here is check that marked instruction is a load
 283   // word to zr
 284   return is_ldrw_to_zr(address(this));
 285 }
 286 
 287 bool NativeInstruction::is_adrp_at(address instr) {
 288   unsigned insn = *(unsigned*)instr;
 289   return (Instruction_aarch64::extract(insn, 31, 24) & 0b10011111) == 0b10010000;
 290 }
 291 
 292 bool NativeInstruction::is_ldr_literal_at(address instr) {
 293   unsigned insn = *(unsigned*)instr;
 294   return (Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000;
 295 }
 296 
 297 bool NativeInstruction::is_ldrw_to_zr(address instr) {
 298   unsigned insn = *(unsigned*)instr;
 299   return (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
 300           Instruction_aarch64::extract(insn, 4, 0) == 0b11111);
 301 }
 302 
 303 bool NativeInstruction::is_general_jump() {
 304   if (is_movz()) {
 305     NativeInstruction* inst1 = nativeInstruction_at(addr_at(instruction_size * 1));
 306     if (inst1->is_movk()) {
 307       NativeInstruction* inst2 = nativeInstruction_at(addr_at(instruction_size * 2));
 308       if (inst2->is_movk()) {
 309         NativeInstruction* inst3 = nativeInstruction_at(addr_at(instruction_size * 3));
 310         if (inst3->is_blr()) {
 311           return true;
 312         }
 313       }
 314     }
 315   }
 316   return false;
 317 }
 318 
 319 bool NativeInstruction::is_movz() {
 320   return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b10100101;
 321 }
 322 
 323 bool NativeInstruction::is_movk() {
 324   return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b11100101;
 325 }
 326 
 327 bool NativeInstruction::is_sigill_zombie_not_entrant() {
 328   return uint_at(0) == 0xd4bbd5a1; // dcps1 #0xdead
 329 }
 330 
 331 void NativeIllegalInstruction::insert(address code_pos) {
 332   *(juint*)code_pos = 0xd4bbd5a1; // dcps1 #0xdead
 333 }
 334 
 335 //-------------------------------------------------------------------
 336 
 337 // MT-safe inserting of a jump over a jump or a nop (used by
 338 // nmethod::make_not_entrant_or_zombie)
 339 
 340 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
 341 
 342   assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
 343   assert(nativeInstruction_at(verified_entry)->is_jump_or_nop()
 344          || nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(),
 345          "Aarch64 cannot replace non-jump with jump");
 346 
 347   // Patch this nmethod atomically.
 348   if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
 349     ptrdiff_t disp = dest - verified_entry;
 350     guarantee(disp < 1 << 27 && disp > - (1 << 27), "branch overflow");
 351 
 352     unsigned int insn = (0b000101 << 26) | ((disp >> 2) & 0x3ffffff);
 353     *(unsigned int*)verified_entry = insn;
 354   } else {
 355     // We use an illegal instruction for marking a method as
 356     // not_entrant or zombie.
 357     NativeIllegalInstruction::insert(verified_entry);
 358   }
 359 
 360   ICache::invalidate_range(verified_entry, instruction_size);
 361 }
 362 
 363 void NativeGeneralJump::verify() {  }
 364 
 365 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
 366   NativeGeneralJump* n_jump = (NativeGeneralJump*)code_pos;
 367 
 368   CodeBuffer cb(code_pos, instruction_size);
 369   MacroAssembler a(&cb);
 370 
 371   a.mov(rscratch1, entry);
 372   a.br(rscratch1);
 373 
 374   ICache::invalidate_range(code_pos, instruction_size);
 375 }
 376 
 377 // MT-safe patching of a long jump instruction.
 378 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
 379   ShouldNotCallThis();
 380 }
 381 
 382 address NativeCallTrampolineStub::destination(nmethod *nm) const {
 383   return ptr_at(data_offset);
 384 }
 385 
 386 void NativeCallTrampolineStub::set_destination(address new_destination) {
 387   set_ptr_at(data_offset, new_destination);
 388   OrderAccess::release();
 389 }
 390 
 391 // Generate a trampoline for a branch to dest.  If there's no need for a
 392 // trampoline, simply patch the call directly to dest.
 393 address NativeCall::trampoline_jump(CodeBuffer &cbuf, address dest) {
 394   MacroAssembler a(&cbuf);
 395   address stub = NULL;
 396 
 397   if (a.far_branches()
 398       && ! is_NativeCallTrampolineStub_at(instruction_address() + displacement())) {
 399     stub = a.emit_trampoline_stub(instruction_address() - cbuf.insts()->start(), dest);
 400   }
 401 
 402   if (stub == NULL) {
 403     // If we generated no stub, patch this call directly to dest.
 404     // This will happen if we don't need far branches or if there
 405     // already was a trampoline.
 406     set_destination(dest);
 407   }
 408 
 409   return stub;
 410 }