1 /*
   2  * Copyright (c) 2013, Red Hat Inc.
   3  * Copyright (c) 1997, 2010, Oracle and/or its affiliates.
   4  * All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "nativeInst_aarch64.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/handles.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "runtime/stubRoutines.hpp"
  35 #include "utilities/ostream.hpp"
  36 #ifdef COMPILER1
  37 #include "c1/c1_Runtime1.hpp"
  38 #endif
  39 
  40 void NativeCall::verify() { ; }
  41 
  42 address NativeCall::destination() const {
  43   address addr = (address)this;
  44   address destination = instruction_address() + displacement();
  45 
  46   // Do we use a trampoline stub for this call?
  47   CodeBlob* cb = CodeCache::find_blob_unsafe(addr);   // Else we get assertion if nmethod is zombie.
  48   assert(cb && cb->is_nmethod(), "sanity");
  49   nmethod *nm = (nmethod *)cb;
  50   if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
  51     // Yes we do, so get the destination from the trampoline stub.
  52     const address trampoline_stub_addr = destination;
  53     destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination();
  54   }
  55 
  56   return destination;
  57 }
  58 
  59 // Similar to replace_mt_safe, but just changes the destination. The
  60 // important thing is that free-running threads are able to execute this
  61 // call instruction at all times.
  62 //
  63 // Used in the runtime linkage of calls; see class CompiledIC.
  64 //
  65 // Add parameter assert_lock to switch off assertion
  66 // during code generation, where no patching lock is needed.
  67 void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
  68   assert(!assert_lock ||
  69          (Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()),
  70          "concurrent code patching");
  71 
  72   ResourceMark rm;
  73   int code_size = NativeInstruction::instruction_size;
  74   address addr_call = addr_at(0);
  75   assert(NativeCall::is_call_at(addr_call), "unexpected code at call site");
  76 
  77   // Patch the constant in the call's trampoline stub.
  78   address trampoline_stub_addr = get_trampoline();
  79   if (trampoline_stub_addr != NULL) {
  80     assert (! is_NativeCallTrampolineStub_at(dest), "chained trampolines");
  81     nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
  82   }
  83 
  84   // Patch the call.
  85   if (Assembler::reachable_from_branch_at(addr_call, dest)) {
  86     set_destination(dest);
  87   } else {
  88     assert (trampoline_stub_addr != NULL, "we need a trampoline");
  89     set_destination(trampoline_stub_addr);
  90   }
  91 
  92   ICache::invalidate_range(addr_call, instruction_size);
  93 }
  94 
  95 address NativeCall::get_trampoline() {
  96   address call_addr = addr_at(0);
  97 
  98   CodeBlob *code = CodeCache::find_blob(call_addr);
  99   assert(code != NULL, "Could not find the containing code blob");
 100 
 101   address bl_destination
 102     = MacroAssembler::pd_call_destination(call_addr);
 103   if (code->content_contains(bl_destination) &&
 104       is_NativeCallTrampolineStub_at(bl_destination))
 105     return bl_destination;
 106 
 107   // If the codeBlob is not a nmethod, this is because we get here from the
 108   // CodeBlob constructor, which is called within the nmethod constructor.
 109   return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code);
 110 }
 111 
 112 // Inserts a native call instruction at a given pc
 113 void NativeCall::insert(address code_pos, address entry) { Unimplemented(); }
 114 
 115 //-------------------------------------------------------------------
 116 
 117 void NativeMovConstReg::verify() {
 118   // make sure code pattern is actually mov reg64, imm64 instructions
 119 }
 120 
 121 
 122 intptr_t NativeMovConstReg::data() const {
 123   // das(uint64_t(instruction_address()),2);
 124   address addr = MacroAssembler::target_addr_for_insn(instruction_address());
 125   if (maybe_cpool_ref(instruction_address())) {
 126     return *(intptr_t*)addr;
 127   } else {
 128     return (intptr_t)addr;
 129   }
 130 }
 131 
 132 void NativeMovConstReg::set_data(intptr_t x) {
 133   if (maybe_cpool_ref(instruction_address())) {
 134     address addr = MacroAssembler::target_addr_for_insn(instruction_address());
 135     *(intptr_t*)addr = x;
 136   } else {
 137     MacroAssembler::pd_patch_instruction(instruction_address(), (address)x);
 138     ICache::invalidate_range(instruction_address(), instruction_size);
 139   }
 140 };
 141 
 142 void NativeMovConstReg::print() {
 143   tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
 144                 p2i(instruction_address()), data());
 145 }
 146 
 147 //-------------------------------------------------------------------
 148 
 149 address NativeMovRegMem::instruction_address() const      { return addr_at(instruction_offset); }
 150 
 151 int NativeMovRegMem::offset() const  {
 152   address pc = instruction_address();
 153   unsigned insn = *(unsigned*)pc;
 154   if (Instruction_aarch64::extract(insn, 28, 24) == 0b10000) {
 155     address addr = MacroAssembler::target_addr_for_insn(pc);
 156     return *addr;
 157   } else {
 158     return (int)(intptr_t)MacroAssembler::target_addr_for_insn(instruction_address());
 159   }
 160 }
 161 
 162 void NativeMovRegMem::set_offset(int x) {
 163   address pc = instruction_address();
 164   unsigned insn = *(unsigned*)pc;
 165   if (maybe_cpool_ref(pc)) {
 166     address addr = MacroAssembler::target_addr_for_insn(pc);
 167     *(long*)addr = x;
 168   } else {
 169     MacroAssembler::pd_patch_instruction(pc, (address)intptr_t(x));
 170     ICache::invalidate_range(instruction_address(), instruction_size);
 171   }
 172 }
 173 
 174 void NativeMovRegMem::verify() {
 175 #ifdef ASSERT
 176   address dest = MacroAssembler::target_addr_for_insn(instruction_address());
 177 #endif
 178 }
 179 
 180 //--------------------------------------------------------------------------------
 181 
 182 void NativeJump::verify() { ; }
 183 
 184 
 185 void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
 186 }
 187 
 188 
 189 address NativeJump::jump_destination() const          {
 190   address dest = MacroAssembler::target_addr_for_insn(instruction_address());
 191 
 192   // We use jump to self as the unresolved address which the inline
 193   // cache code (and relocs) know about
 194 
 195   // return -1 if jump to self
 196   dest = (dest == (address) this) ? (address) -1 : dest;
 197   return dest;
 198 }
 199 
 200 void NativeJump::set_jump_destination(address dest) {
 201   // We use jump to self as the unresolved address which the inline
 202   // cache code (and relocs) know about
 203   if (dest == (address) -1)
 204     dest = instruction_address();
 205 
 206   MacroAssembler::pd_patch_instruction(instruction_address(), dest);
 207   ICache::invalidate_range(instruction_address(), instruction_size);
 208 };
 209 
 210 //-------------------------------------------------------------------
 211 
 212 bool NativeInstruction::is_safepoint_poll() {
 213   // a safepoint_poll is implemented in two steps as either
 214   //
 215   // adrp(reg, polling_page);
 216   // ldr(zr, [reg, #offset]);
 217   //
 218   // or
 219   //
 220   // mov(reg, polling_page);
 221   // ldr(zr, [reg, #offset]);
 222   //
 223   // however, we cannot rely on the polling page address load always
 224   // directly preceding the read from the page. C1 does that but C2
 225   // has to do the load and read as two independent instruction
 226   // generation steps. that's because with a single macro sequence the
 227   // generic C2 code can only add the oop map before the mov/adrp and
 228   // the trap handler expects an oop map to be associated with the
 229   // load. with the load scheuled as a prior step the oop map goes
 230   // where it is needed.
 231   //
 232   // so all we can do here is check that marked instruction is a load
 233   // word to zr
 234   return is_ldrw_to_zr(address(this));
 235 }
 236 
 237 bool NativeInstruction::is_adrp_at(address instr) {
 238   unsigned insn = *(unsigned*)instr;
 239   return (Instruction_aarch64::extract(insn, 31, 24) & 0b10011111) == 0b10010000;
 240 }
 241 
 242 bool NativeInstruction::is_ldr_literal_at(address instr) {
 243   unsigned insn = *(unsigned*)instr;
 244   return (Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000;
 245 }
 246 
 247 bool NativeInstruction::is_ldrw_to_zr(address instr) {
 248   unsigned insn = *(unsigned*)instr;
 249   return (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
 250           Instruction_aarch64::extract(insn, 4, 0) == 0b11111);
 251 }
 252 
 253 bool NativeInstruction::is_movz() {
 254   return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b10100101;
 255 }
 256 
 257 bool NativeInstruction::is_movk() {
 258   return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b11100101;
 259 }
 260 
 261 bool NativeInstruction::is_sigill_zombie_not_entrant() {
 262   return uint_at(0) == 0xd4bbd5a1; // dcps1 #0xdead
 263 }
 264 
 265 void NativeIllegalInstruction::insert(address code_pos) {
 266   *(juint*)code_pos = 0xd4bbd5a1; // dcps1 #0xdead
 267 }
 268 
 269 //-------------------------------------------------------------------
 270 
 271 // MT safe inserting of a jump over a jump or a nop (used by
 272 // nmethod::makeZombie)
 273 
 274 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
 275 
 276   assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
 277   assert(nativeInstruction_at(verified_entry)->is_jump_or_nop()
 278          || nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(),
 279          "Aarch64 cannot replace non-jump with jump");
 280 
 281   // Patch this nmethod atomically.
 282   if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
 283     ptrdiff_t disp = dest - verified_entry;
 284     guarantee(disp < 1 << 27 && disp > - (1 << 27), "branch overflow");
 285 
 286     unsigned int insn = (0b000101 << 26) | ((disp >> 2) & 0x3ffffff);
 287     *(unsigned int*)verified_entry = insn;
 288   } else {
 289     // We use an illegal instruction for marking a method as
 290     // not_entrant or zombie.
 291     NativeIllegalInstruction::insert(verified_entry);
 292   }
 293 
 294   ICache::invalidate_range(verified_entry, instruction_size);
 295 }
 296 
 297 void NativeGeneralJump::verify() {  }
 298 
 299 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
 300   NativeGeneralJump* n_jump = (NativeGeneralJump*)code_pos;
 301 
 302   CodeBuffer cb(code_pos, instruction_size);
 303   MacroAssembler a(&cb);
 304 
 305   a.movptr(rscratch1, (uintptr_t)entry);
 306   a.br(rscratch1);
 307 
 308   ICache::invalidate_range(code_pos, instruction_size);
 309 }
 310 
 311 // MT-safe patching of a long jump instruction.
 312 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
 313   ShouldNotCallThis();
 314 }
 315 
 316 bool NativeInstruction::is_dtrace_trap() { return false; }
 317 
 318 address NativeCallTrampolineStub::destination(nmethod *nm) const {
 319   return ptr_at(data_offset);
 320 }
 321 
 322 void NativeCallTrampolineStub::set_destination(address new_destination) {
 323   set_ptr_at(data_offset, new_destination);
 324   OrderAccess::release();
 325 }