1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSetAssembler.hpp"
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "memory/universe.hpp"
  29 #include "runtime/jniHandles.hpp"
  30 #include "runtime/thread.hpp"
  31 
  32 #define __ masm->
  33 
  34 void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
  35                                   Register dst, Address src, Register tmp1, Register tmp_thread) {
  36 
  37   // LR is live.  It must be saved around calls.
  38 
  39   bool in_heap = (decorators & IN_HEAP) != 0;
  40   bool in_native = (decorators & IN_NATIVE) != 0;
  41   bool is_not_null = (decorators & IS_NOT_NULL) != 0;
  42   switch (type) {
  43   case T_OBJECT:
  44   case T_ARRAY: {
  45     if (in_heap) {
  46       if (UseCompressedOops) {
  47         __ ldrw(dst, src);
  48         if (is_not_null) {
  49           __ decode_heap_oop_not_null(dst);
  50         } else {
  51           __ decode_heap_oop(dst);
  52         }
  53       } else {
  54         __ ldr(dst, src);
  55       }
  56     } else {
  57       assert(in_native, "why else?");
  58       __ ldr(dst, src);
  59     }
  60     break;
  61   }
  62   case T_BOOLEAN: __ load_unsigned_byte (dst, src); break;
  63   case T_BYTE:    __ load_signed_byte   (dst, src); break;
  64   case T_CHAR:    __ load_unsigned_short(dst, src); break;
  65   case T_SHORT:   __ load_signed_short  (dst, src); break;
  66   case T_INT:     __ ldrw               (dst, src); break;
  67   case T_LONG:    __ ldr                (dst, src); break;
  68   case T_ADDRESS: __ ldr                (dst, src); break;
  69   case T_FLOAT:   __ ldrs               (v0, src);  break;
  70   case T_DOUBLE:  __ ldrd               (v0, src);  break;
  71   default: Unimplemented();
  72   }
  73 }
  74 
  75 void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
  76                                    Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
  77   bool in_heap = (decorators & IN_HEAP) != 0;
  78   bool in_native = (decorators & IN_NATIVE) != 0;
  79   switch (type) {
  80   case T_OBJECT:
  81   case T_ARRAY: {
  82     val = val == noreg ? zr : val;
  83     if (in_heap) {
  84       if (UseCompressedOops) {
  85         assert(!dst.uses(val), "not enough registers");
  86         if (val != zr) {
  87           __ encode_heap_oop(val);
  88         }
  89         __ strw(val, dst);
  90       } else {
  91         __ str(val, dst);
  92       }
  93     } else {
  94       assert(in_native, "why else?");
  95       __ str(val, dst);
  96     }
  97     break;
  98   }
  99   case T_BOOLEAN:
 100     __ andw(val, val, 0x1);  // boolean is true if LSB is 1
 101     __ strb(val, dst);
 102     break;
 103   case T_BYTE:    __ strb(val, dst); break;
 104   case T_CHAR:    __ strh(val, dst); break;
 105   case T_SHORT:   __ strh(val, dst); break;
 106   case T_INT:     __ strw(val, dst); break;
 107   case T_LONG:    __ str (val, dst); break;
 108   case T_ADDRESS: __ str (val, dst); break;
 109   case T_FLOAT:   __ strs(v0,  dst); break;
 110   case T_DOUBLE:  __ strd(v0,  dst); break;
 111   default: Unimplemented();
 112   }
 113 }
 114 
 115 void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
 116                                      Register obj1, Register obj2) {
 117   __ cmp(obj1, obj2);
 118 }
 119 
 120 void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
 121                                                         Register obj, Register tmp, Label& slowpath) {
 122   // If mask changes we need to ensure that the inverse is still encodable as an immediate
 123   STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);
 124   __ andr(obj, obj, ~JNIHandles::weak_tag_mask);
 125   __ ldr(obj, Address(obj, 0));             // *obj
 126 }
 127 
 128 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 129 void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,
 130                                         Register var_size_in_bytes,
 131                                         int con_size_in_bytes,
 132                                         Register t1,
 133                                         Register t2,
 134                                         Label& slow_case) {
 135   assert_different_registers(obj, t2);
 136   assert_different_registers(obj, var_size_in_bytes);
 137   Register end = t2;
 138 
 139   // verify_tlab();
 140 
 141   __ ldr(obj, Address(rthread, JavaThread::tlab_top_offset()));
 142   if (var_size_in_bytes == noreg) {
 143     __ lea(end, Address(obj, con_size_in_bytes));
 144   } else {
 145     __ lea(end, Address(obj, var_size_in_bytes));
 146   }
 147   __ ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset()));
 148   __ cmp(end, rscratch1);
 149   __ br(Assembler::HI, slow_case);
 150 
 151   // update the tlab top pointer
 152   __ str(end, Address(rthread, JavaThread::tlab_top_offset()));
 153 
 154   // recover var_size_in_bytes if necessary
 155   if (var_size_in_bytes == end) {
 156     __ sub(var_size_in_bytes, var_size_in_bytes, obj);
 157   }
 158   // verify_tlab();
 159 }
 160 
 161 // Defines obj, preserves var_size_in_bytes
 162 void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj,
 163                                         Register var_size_in_bytes,
 164                                         int con_size_in_bytes,
 165                                         Register t1,
 166                                         Label& slow_case) {
 167   assert_different_registers(obj, var_size_in_bytes, t1);
 168   if (!Universe::heap()->supports_inline_contig_alloc()) {
 169     __ b(slow_case);
 170   } else {
 171     Register end = t1;
 172     Register heap_end = rscratch2;
 173     Label retry;
 174     __ bind(retry);
 175     {
 176       unsigned long offset;
 177       __ adrp(rscratch1, ExternalAddress((address) Universe::heap()->end_addr()), offset);
 178       __ ldr(heap_end, Address(rscratch1, offset));
 179     }
 180 
 181     ExternalAddress heap_top((address) Universe::heap()->top_addr());
 182 
 183     // Get the current top of the heap
 184     {
 185       unsigned long offset;
 186       __ adrp(rscratch1, heap_top, offset);
 187       // Use add() here after ARDP, rather than lea().
 188       // lea() does not generate anything if its offset is zero.
 189       // However, relocs expect to find either an ADD or a load/store
 190       // insn after an ADRP.  add() always generates an ADD insn, even
 191       // for add(Rn, Rn, 0).
 192       __ add(rscratch1, rscratch1, offset);
 193       __ ldaxr(obj, rscratch1);
 194     }
 195 
 196     // Adjust it my the size of our new object
 197     if (var_size_in_bytes == noreg) {
 198       __ lea(end, Address(obj, con_size_in_bytes));
 199     } else {
 200       __ lea(end, Address(obj, var_size_in_bytes));
 201     }
 202 
 203     // if end < obj then we wrapped around high memory
 204     __ cmp(end, obj);
 205     __ br(Assembler::LO, slow_case);
 206 
 207     __ cmp(end, heap_end);
 208     __ br(Assembler::HI, slow_case);
 209 
 210     // If heap_top hasn't been changed by some other thread, update it.
 211     __ stlxr(rscratch2, end, rscratch1);
 212     __ cbnzw(rscratch2, retry);
 213 
 214     incr_allocated_bytes(masm, var_size_in_bytes, con_size_in_bytes, t1);
 215   }
 216 }
 217 
 218 void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm,
 219                                                Register var_size_in_bytes,
 220                                                int con_size_in_bytes,
 221                                                Register t1) {
 222   assert(t1->is_valid(), "need temp reg");
 223 
 224   __ ldr(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));
 225   if (var_size_in_bytes->is_valid()) {
 226     __ add(t1, t1, var_size_in_bytes);
 227   } else {
 228     __ add(t1, t1, con_size_in_bytes);
 229   }
 230   __ str(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));
 231 }
 232 
 233 void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm)  {
 234 // DMS CHECK: 8210498: nmethod entry barriers is not implemented
 235 #if 0
 236  BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
 237   if (bs_nm == NULL) {
 238     return;
 239   }
 240   Label continuation;
 241   Address disarmed_addr(rthread, in_bytes(bs_nm->thread_disarmed_offset()));
 242   __ align(8);
 243   __ ldr(rscratch1, disarmed_addr);
 244   __ cbz(rscratch1, continuation);
 245   __ blr(RuntimeAddress(StubRoutines::aarch64::method_entry_barrier()));
 246   __ bind(continuation);
 247 #endif
 248 }
 249 
--- EOF ---