1 /*
   2  * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/barrierSetAssembler.hpp"
  28 #include "gc/shared/barrierSetNMethod.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "interpreter/interp_masm.hpp"
  31 #include "memory/universe.hpp"
  32 #include "runtime/jniHandles.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "runtime/thread.hpp"
  35 
  36 
  37 #define __ masm->
  38 
  39 void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
  40                                   Register dst, Address src, Register tmp1, Register tmp_thread) {
  41 
  42   // LR is live.  It must be saved around calls.
  43 
  44   bool in_heap = (decorators & IN_HEAP) != 0;
  45   bool in_native = (decorators & IN_NATIVE) != 0;
  46   bool is_not_null = (decorators & IS_NOT_NULL) != 0;
  47   switch (type) {
  48   case T_OBJECT:
  49   case T_ARRAY: {
  50     if (in_heap) {
  51       if (UseCompressedOops) {
  52         __ ldrw(dst, src);
  53         if (is_not_null) {
  54           __ decode_heap_oop_not_null(dst);
  55         } else {
  56           __ decode_heap_oop(dst);
  57         }
  58       } else {
  59         __ ldr(dst, src);
  60       }
  61     } else {
  62       assert(in_native, "why else?");
  63       __ ldr(dst, src);
  64     }
  65     break;
  66   }
  67   case T_BOOLEAN: __ load_unsigned_byte (dst, src); break;
  68   case T_BYTE:    __ load_signed_byte   (dst, src); break;
  69   case T_CHAR:    __ load_unsigned_short(dst, src); break;
  70   case T_SHORT:   __ load_signed_short  (dst, src); break;
  71   case T_INT:     __ ldrw               (dst, src); break;
  72   case T_LONG:    __ ldr                (dst, src); break;
  73   case T_ADDRESS: __ ldr                (dst, src); break;
  74   case T_FLOAT:   __ ldrs               (v0, src);  break;
  75   case T_DOUBLE:  __ ldrd               (v0, src);  break;
  76   default: Unimplemented();
  77   }
  78 }
  79 
  80 void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
  81                                    Address dst, Register val, Register tmp1, Register tmp2) {
  82   bool in_heap = (decorators & IN_HEAP) != 0;
  83   bool in_native = (decorators & IN_NATIVE) != 0;
  84   switch (type) {
  85   case T_OBJECT:
  86   case T_ARRAY: {
  87     val = val == noreg ? zr : val;
  88     if (in_heap) {
  89       if (UseCompressedOops) {
  90         assert(!dst.uses(val), "not enough registers");
  91         if (val != zr) {
  92           __ encode_heap_oop(val);
  93         }
  94         __ strw(val, dst);
  95       } else {
  96         __ str(val, dst);
  97       }
  98     } else {
  99       assert(in_native, "why else?");
 100       __ str(val, dst);
 101     }
 102     break;
 103   }
 104   case T_BOOLEAN:
 105     __ andw(val, val, 0x1);  // boolean is true if LSB is 1
 106     __ strb(val, dst);
 107     break;
 108   case T_BYTE:    __ strb(val, dst); break;
 109   case T_CHAR:    __ strh(val, dst); break;
 110   case T_SHORT:   __ strh(val, dst); break;
 111   case T_INT:     __ strw(val, dst); break;
 112   case T_LONG:    __ str (val, dst); break;
 113   case T_ADDRESS: __ str (val, dst); break;
 114   case T_FLOAT:   __ strs(v0,  dst); break;
 115   case T_DOUBLE:  __ strd(v0,  dst); break;
 116   default: Unimplemented();
 117   }
 118 }
 119 
 120 void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
 121                                      Register obj1, Register obj2) {
 122   __ cmp(obj1, obj2);
 123 }
 124 
 125 void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
 126                                                         Register obj, Register tmp, Label& slowpath) {
 127   // If mask changes we need to ensure that the inverse is still encodable as an immediate
 128   STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);
 129   __ andr(obj, obj, ~JNIHandles::weak_tag_mask);
 130   __ ldr(obj, Address(obj, 0));             // *obj
 131 }
 132 
 133 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 134 void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,
 135                                         Register var_size_in_bytes,
 136                                         int con_size_in_bytes,
 137                                         Register t1,
 138                                         Register t2,
 139                                         Label& slow_case) {
 140   assert_different_registers(obj, t2);
 141   assert_different_registers(obj, var_size_in_bytes);
 142   Register end = t2;
 143 
 144   // verify_tlab();
 145 
 146   __ ldr(obj, Address(rthread, JavaThread::tlab_top_offset()));
 147   if (var_size_in_bytes == noreg) {
 148     __ lea(end, Address(obj, con_size_in_bytes));
 149   } else {
 150     __ lea(end, Address(obj, var_size_in_bytes));
 151   }
 152   __ ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset()));
 153   __ cmp(end, rscratch1);
 154   __ br(Assembler::HI, slow_case);
 155 
 156   // update the tlab top pointer
 157   __ str(end, Address(rthread, JavaThread::tlab_top_offset()));
 158 
 159   // recover var_size_in_bytes if necessary
 160   if (var_size_in_bytes == end) {
 161     __ sub(var_size_in_bytes, var_size_in_bytes, obj);
 162   }
 163   // verify_tlab();
 164 }
 165 
 166 // Defines obj, preserves var_size_in_bytes
 167 void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj,
 168                                         Register var_size_in_bytes,
 169                                         int con_size_in_bytes,
 170                                         Register t1,
 171                                         Label& slow_case) {
 172   assert_different_registers(obj, var_size_in_bytes, t1);
 173   if (!Universe::heap()->supports_inline_contig_alloc()) {
 174     __ b(slow_case);
 175   } else {
 176     Register end = t1;
 177     Register heap_end = rscratch2;
 178     Label retry;
 179     __ bind(retry);
 180     {
 181       uint64_t offset;
 182       __ adrp(rscratch1, ExternalAddress((address) Universe::heap()->end_addr()), offset);
 183       __ ldr(heap_end, Address(rscratch1, offset));
 184     }
 185 
 186     ExternalAddress heap_top((address) Universe::heap()->top_addr());
 187 
 188     // Get the current top of the heap
 189     {
 190       uint64_t offset;
 191       __ adrp(rscratch1, heap_top, offset);
 192       // Use add() here after ARDP, rather than lea().
 193       // lea() does not generate anything if its offset is zero.
 194       // However, relocs expect to find either an ADD or a load/store
 195       // insn after an ADRP.  add() always generates an ADD insn, even
 196       // for add(Rn, Rn, 0).
 197       __ add(rscratch1, rscratch1, offset);
 198       __ ldaxr(obj, rscratch1);
 199     }
 200 
 201     // Adjust it my the size of our new object
 202     if (var_size_in_bytes == noreg) {
 203       __ lea(end, Address(obj, con_size_in_bytes));
 204     } else {
 205       __ lea(end, Address(obj, var_size_in_bytes));
 206     }
 207 
 208     // if end < obj then we wrapped around high memory
 209     __ cmp(end, obj);
 210     __ br(Assembler::LO, slow_case);
 211 
 212     __ cmp(end, heap_end);
 213     __ br(Assembler::HI, slow_case);
 214 
 215     // If heap_top hasn't been changed by some other thread, update it.
 216     __ stlxr(rscratch2, end, rscratch1);
 217     __ cbnzw(rscratch2, retry);
 218 
 219     incr_allocated_bytes(masm, var_size_in_bytes, con_size_in_bytes, t1);
 220   }
 221 }
 222 
 223 void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm,
 224                                                Register var_size_in_bytes,
 225                                                int con_size_in_bytes,
 226                                                Register t1) {
 227   assert(t1->is_valid(), "need temp reg");
 228 
 229   __ ldr(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));
 230   if (var_size_in_bytes->is_valid()) {
 231     __ add(t1, t1, var_size_in_bytes);
 232   } else {
 233     __ add(t1, t1, con_size_in_bytes);
 234   }
 235   __ str(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));
 236 }
 237 
 238 void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
 239   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
 240 
 241   if (bs_nm == NULL) {
 242     return;
 243   }
 244 
 245   Label skip, guard;
 246   Address thread_disarmed_addr(rthread, in_bytes(bs_nm->thread_disarmed_offset()));
 247 
 248   __ ldrw(rscratch1, guard);
 249 
 250   // Subsequent loads of oops must occur after load of guard value.
 251   // BarrierSetNMethod::disarm sets guard with release semantics.
 252   __ membar(__ LoadLoad);
 253   __ ldrw(rscratch2, thread_disarmed_addr);
 254   __ cmpw(rscratch1, rscratch2);
 255   __ br(Assembler::EQ, skip);
 256 
 257   __ mov(rscratch1, StubRoutines::aarch64::method_entry_barrier());
 258   __ blr(rscratch1);
 259   __ b(skip);
 260 
 261   __ bind(guard);
 262 
 263   __ emit_int32(0);   // nmethod guard value. Skipped over in common case.
 264 
 265   __ bind(skip);
 266 }
 267 
 268 void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
 269   BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
 270   if (bs == NULL) {
 271     return;
 272   }
 273 
 274   Label bad_call;
 275   __ cbz(rmethod, bad_call);
 276 
 277   // Pointer chase to the method holder to find out if the method is concurrently unloading.
 278   Label method_live;
 279   __ load_method_holder_cld(rscratch1, rmethod);
 280 
 281   // Is it a strong CLD?
 282   __ ldr(rscratch2, Address(rscratch1, ClassLoaderData::keep_alive_offset()));
 283   __ cbnz(rscratch2, method_live);
 284 
 285   // Is it a weak but alive CLD?
 286   __ stp(r10, r11, Address(__ pre(sp, -2 * wordSize)));
 287   __ ldr(r10, Address(rscratch1, ClassLoaderData::holder_offset()));
 288 
 289   // Uses rscratch1 & rscratch2, so we must pass new temporaries.
 290   __ resolve_weak_handle(r10, r11);
 291   __ mov(rscratch1, r10);
 292   __ ldp(r10, r11, Address(__ post(sp, 2 * wordSize)));
 293   __ cbnz(rscratch1, method_live);
 294 
 295   __ bind(bad_call);
 296 
 297   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 298   __ bind(method_live);
 299 }
 300