1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSetAssembler.hpp"
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "runtime/jniHandles.hpp"
  29 #include "runtime/thread.hpp"
  30 
  31 #define __ masm->
  32 
  33 void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
  34                                   Register dst, Address src, Register tmp1, Register tmp_thread) {
  35 
  36   // LR is live.  It must be saved around calls.
  37 
  38   bool in_heap = (decorators & IN_HEAP) != 0;
  39   bool in_native = (decorators & IN_NATIVE) != 0;
  40   bool oop_not_null = (decorators & OOP_NOT_NULL) != 0;
  41   switch (type) {
  42   case T_OBJECT:
  43   case T_ARRAY: {
  44     if (in_heap) {
  45       if (UseCompressedOops) {
  46         __ ldrw(dst, src);
  47         if (oop_not_null) {
  48           __ decode_heap_oop_not_null(dst);
  49         } else {
  50           __ decode_heap_oop(dst);
  51         }
  52       } else {
  53         __ ldr(dst, src);
  54       }
  55     } else {
  56       assert(in_native, "why else?");
  57       __ ldr(dst, src);
  58     }
  59     break;
  60   }
  61   case T_BOOLEAN: __ load_unsigned_byte (dst, src); break;
  62   case T_BYTE:    __ load_signed_byte   (dst, src); break;
  63   case T_CHAR:    __ load_unsigned_short(dst, src); break;
  64   case T_SHORT:   __ load_signed_short  (dst, src); break;
  65   case T_INT:     __ ldrw               (dst, src); break;
  66   case T_LONG:    __ ldr                (dst, src); break;
  67   case T_ADDRESS: __ ldr                (dst, src); break;
  68   case T_FLOAT:   __ ldrs               (v0, src);  break;
  69   case T_DOUBLE:  __ ldrd               (v0, src);  break;
  70   default: Unimplemented();
  71   }
  72 }
  73 
  74 void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
  75                                    Address dst, Register val, Register tmp1, Register tmp2) {
  76   bool in_heap = (decorators & IN_HEAP) != 0;
  77   bool in_native = (decorators & IN_NATIVE) != 0;
  78   switch (type) {
  79   case T_OBJECT:
  80   case T_ARRAY: {
  81     val = val == noreg ? zr : val;
  82     if (in_heap) {
  83       if (UseCompressedOops) {
  84         assert(!dst.uses(val), "not enough registers");
  85         if (val != zr) {
  86           __ encode_heap_oop(val);
  87         }
  88         __ strw(val, dst);
  89       } else {
  90         __ str(val, dst);
  91       }
  92     } else {
  93       assert(in_native, "why else?");
  94       __ str(val, dst);
  95     }
  96     break;
  97   }
  98   case T_BOOLEAN:
  99     __ andw(val, val, 0x1);  // boolean is true if LSB is 1
 100     __ strb(val, dst);
 101     break;
 102   case T_BYTE:    __ strb(val, dst); break;
 103   case T_CHAR:    __ strh(val, dst); break;
 104   case T_SHORT:   __ strh(val, dst); break;
 105   case T_INT:     __ strw(val, dst); break;
 106   case T_LONG:    __ str (val, dst); break;
 107   case T_ADDRESS: __ str (val, dst); break;
 108   case T_FLOAT:   __ strs(v0,  dst); break;
 109   case T_DOUBLE:  __ strd(v0,  dst); break;
 110   default: Unimplemented();
 111   }
 112 }
 113 
 114 void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
 115                                      Register obj1, Register obj2) {
 116   __ cmp(obj1, obj2);
 117 }
 118 
 119 void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
 120                                                         Register obj, Register tmp, Label& slowpath) {
 121   // If mask changes we need to ensure that the inverse is still encodable as an immediate
 122   STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);
 123   __ andr(obj, obj, ~JNIHandles::weak_tag_mask);
 124   __ ldr(obj, Address(obj, 0));             // *obj
 125 }
 126 
 127 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 128 void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,
 129                                         Register var_size_in_bytes,
 130                                         int con_size_in_bytes,
 131                                         Register t1,
 132                                         Register t2,
 133                                         Label& slow_case) {
 134   assert_different_registers(obj, t2);
 135   assert_different_registers(obj, var_size_in_bytes);
 136   Register end = t2;
 137 
 138   // verify_tlab();
 139 
 140   __ ldr(obj, Address(rthread, JavaThread::tlab_top_offset()));
 141   if (var_size_in_bytes == noreg) {
 142     __ lea(end, Address(obj, con_size_in_bytes));
 143   } else {
 144     __ lea(end, Address(obj, var_size_in_bytes));
 145   }
 146   __ ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset()));
 147   __ cmp(end, rscratch1);
 148   __ br(Assembler::HI, slow_case);
 149 
 150   // update the tlab top pointer
 151   __ str(end, Address(rthread, JavaThread::tlab_top_offset()));
 152 
 153   // recover var_size_in_bytes if necessary
 154   if (var_size_in_bytes == end) {
 155     __ sub(var_size_in_bytes, var_size_in_bytes, obj);
 156   }
 157   // verify_tlab();
 158 }
 159 
 160 // Defines obj, preserves var_size_in_bytes
 161 void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj,
 162                                         Register var_size_in_bytes,
 163                                         int con_size_in_bytes,
 164                                         Register t1,
 165                                         Label& slow_case) {
 166   assert_different_registers(obj, var_size_in_bytes, t1);
 167   if (!Universe::heap()->supports_inline_contig_alloc()) {
 168     __ b(slow_case);
 169   } else {
 170     Register end = t1;
 171     Register heap_end = rscratch2;
 172     Label retry;
 173     __ bind(retry);
 174     {
 175       unsigned long offset;
 176       __ adrp(rscratch1, ExternalAddress((address) Universe::heap()->end_addr()), offset);
 177       __ ldr(heap_end, Address(rscratch1, offset));
 178     }
 179 
 180     ExternalAddress heap_top((address) Universe::heap()->top_addr());
 181 
 182     // Get the current top of the heap
 183     {
 184       unsigned long offset;
 185       __ adrp(rscratch1, heap_top, offset);
 186       // Use add() here after ARDP, rather than lea().
 187       // lea() does not generate anything if its offset is zero.
 188       // However, relocs expect to find either an ADD or a load/store
 189       // insn after an ADRP.  add() always generates an ADD insn, even
 190       // for add(Rn, Rn, 0).
 191       __ add(rscratch1, rscratch1, offset);
 192       __ ldaxr(obj, rscratch1);
 193     }
 194 
 195     // Adjust it my the size of our new object
 196     if (var_size_in_bytes == noreg) {
 197       __ lea(end, Address(obj, con_size_in_bytes));
 198     } else {
 199       __ lea(end, Address(obj, var_size_in_bytes));
 200     }
 201 
 202     // if end < obj then we wrapped around high memory
 203     __ cmp(end, obj);
 204     __ br(Assembler::LO, slow_case);
 205 
 206     __ cmp(end, heap_end);
 207     __ br(Assembler::HI, slow_case);
 208 
 209     // If heap_top hasn't been changed by some other thread, update it.
 210     __ stlxr(rscratch2, end, rscratch1);
 211     __ cbnzw(rscratch2, retry);
 212 
 213     incr_allocated_bytes(masm, var_size_in_bytes, con_size_in_bytes, t1);
 214   }
 215 }
 216 
 217 void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm,
 218                                                Register var_size_in_bytes,
 219                                                int con_size_in_bytes,
 220                                                Register t1) {
 221   assert(t1->is_valid(), "need temp reg");
 222 
 223   __ ldr(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));
 224   if (var_size_in_bytes->is_valid()) {
 225     __ add(t1, t1, var_size_in_bytes);
 226   } else {
 227     __ add(t1, t1, con_size_in_bytes);
 228   }
 229   __ str(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));
 230 }
 231