1 /*
   2  * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.inline.hpp"
  27 #include "assembler_arm.inline.hpp"
  28 #include "code/relocInfo.hpp"
  29 #include "nativeInst_arm.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/safepoint.hpp"
  32 
  33 void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
  34 
  35   NativeMovConstReg* ni = nativeMovConstReg_at(addr());
  36 #if defined(AARCH64) && defined(COMPILER2)
  37   if (ni->is_movz()) {
  38     assert(type() == relocInfo::oop_type, "!");
  39     if (verify_only) {
  40       uintptr_t d = ni->data();
  41       guarantee((d >> 32) == 0, "not narrow oop");
  42       narrowOop no = d;
  43       oop o = oopDesc::decode_heap_oop(no);
  44       guarantee(cast_from_oop<intptr_t>(o) == (intptr_t)x, "instructions must match");
  45     } else {
  46       ni->set_data((intptr_t)x);
  47     }
  48     return;
  49   }
  50 #endif
  51   if (verify_only) {
  52     guarantee(ni->data() == (intptr_t)(x + o), "instructions must match");
  53   } else {
  54     ni->set_data((intptr_t)(x + o));
  55   }
  56 }
  57 
  58 address Relocation::pd_call_destination(address orig_addr) {
  59   address pc = addr();
  60 
  61   int adj = 0;
  62   if (orig_addr != NULL) {
  63     // We just moved this call instruction from orig_addr to addr().
  64     // This means that, when relative, its target will appear to have grown by addr() - orig_addr.
  65     adj = orig_addr - pc;
  66   }
  67 
  68   RawNativeInstruction* ni = rawNativeInstruction_at(pc);
  69 
  70 #if (!defined(AARCH64))
  71   if (NOT_AARCH64(ni->is_add_lr()) AARCH64_ONLY(ni->is_adr_aligned_lr())) {
  72     // On arm32, skip the optional 'add LR, PC, #offset'
  73     // (Allowing the jump support code to handle fat_call)
  74     pc = ni->next_raw_instruction_address();
  75     ni = nativeInstruction_at(pc);
  76   }
  77 #endif
  78 
  79   if (AARCH64_ONLY(ni->is_call()) NOT_AARCH64(ni->is_bl())) {
  80     // For arm32, fat_call are handled by is_jump for the new 'ni',
  81     // requiring only to support is_bl.
  82     //
  83     // For AARCH64, skipping a leading adr is not sufficient
  84     // to reduce calls to a simple bl.
  85     return rawNativeCall_at(pc)->destination(adj);
  86   }
  87 
  88   if (ni->is_jump()) {
  89     return rawNativeJump_at(pc)->jump_destination(adj);
  90   }
  91   ShouldNotReachHere();
  92   return NULL;
  93 }
  94 
  95 void Relocation::pd_set_call_destination(address x) {
  96   address pc = addr();
  97   NativeInstruction* ni = nativeInstruction_at(pc);
  98 
  99 #if (!defined(AARCH64))
 100   if (NOT_AARCH64(ni->is_add_lr()) AARCH64_ONLY(ni->is_adr_aligned_lr())) {
 101     // On arm32, skip the optional 'add LR, PC, #offset'
 102     // (Allowing the jump support code to handle fat_call)
 103     pc = ni->next_raw_instruction_address();
 104     ni = nativeInstruction_at(pc);
 105   }
 106 #endif
 107 
 108   if (AARCH64_ONLY(ni->is_call()) NOT_AARCH64(ni->is_bl())) {
 109     // For arm32, fat_call are handled by is_jump for the new 'ni',
 110     // requiring only to support is_bl.
 111     //
 112     // For AARCH64, skipping a leading adr is not sufficient
 113     // to reduce calls to a simple bl.
 114     rawNativeCall_at(pc)->set_destination(x);
 115     return;
 116   }
 117 
 118   if (ni->is_jump()) { // raw jump
 119     rawNativeJump_at(pc)->set_jump_destination(x);
 120     return;
 121   }
 122   ShouldNotReachHere();
 123 }
 124 
 125 
 126 address* Relocation::pd_address_in_code() {
 127   return (address*)addr();
 128 }
 129 
 130 address Relocation::pd_get_address_from_code() {
 131   return *pd_address_in_code();
 132 }
 133 
 134 void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
 135 }
 136 
 137 void metadata_Relocation::pd_fix_value(address x) {
 138   assert(! addr_in_const(), "Do not use");
 139 #ifdef AARCH64
 140 #ifdef COMPILER2
 141   NativeMovConstReg* ni = nativeMovConstReg_at(addr());
 142   if (ni->is_mov_slow()) {
 143     return;
 144   }
 145 #endif
 146   set_value(x);
 147 #else
 148   if (!VM_Version::supports_movw()) {
 149     set_value(x);
 150 #ifdef ASSERT
 151   } else {
 152     // the movw/movt data should be correct
 153     NativeMovConstReg* ni = nativeMovConstReg_at(addr());
 154     assert(ni->is_movw(), "not a movw");
 155     // The following assert should be correct but the shared code
 156     // currently 'fixes' the metadata instructions before the
 157     // metadata_table is copied in the new method (see
 158     // JDK-8042845). This means that 'x' (which comes from the table)
 159     // does not match the value inlined in the code (which is
 160     // correct). Failure can be temporarily ignored since the code is
 161     // correct and the table is copied shortly afterward.
 162     //
 163     // assert(ni->data() == (int)x, "metadata relocation mismatch");
 164 #endif
 165   }
 166 #endif // !AARCH64
 167 }