1 /* 2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2015-2018, Azul Systems, Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "gc/shared/barrierSetAssembler.hpp" 28 #include "gc/shared/collectedHeap.hpp" 29 #include "runtime/jniHandles.hpp" 30 #include "runtime/thread.hpp" 31 32 #define __ masm-> 33 34 void BarrierSetAssembler::load_word_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 35 Register dst, Address src, Register tmp1, Register tmp_thread) { 36 37 // LR is live. It must be saved around calls. 38 39 bool in_heap = (decorators & IN_HEAP) != 0; 40 bool in_native = (decorators & IN_NATIVE) != 0; 41 bool is_not_null = (decorators & IS_NOT_NULL) != 0; 42 switch (type) { 43 case T_OBJECT: 44 case T_ARRAY: { 45 assert(in_heap || in_native, "why else?"); 46 __ ldr(dst, src); 47 break; 48 } 49 case T_INT: __ ldr(dst, src); break; 50 case T_ADDRESS: __ ldr(dst, src); break; 51 default: Unimplemented(); 52 } 53 } 54 55 void BarrierSetAssembler::store_word_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 56 Address dst, Register val, Register tmp1, Register tmp2) { 57 bool in_heap = (decorators & IN_HEAP) != 0; 58 bool in_native = (decorators & IN_NATIVE) != 0; 59 switch (type) { 60 case T_OBJECT: 61 case T_ARRAY: { 62 if (val == noreg) { 63 assert(tmp1 != noreg, "must provide valid register"); 64 __ mov(tmp1, 0); 65 val = tmp1; 66 } 67 assert(in_heap || in_native, "why else?"); 68 __ str(val, dst); 69 break; 70 } 71 case T_INT: __ str(val, dst); break; 72 case T_ADDRESS: __ str(val, dst); break; 73 default: Unimplemented(); 74 } 75 } 76 77 void BarrierSetAssembler::load_tos_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 78 Address src, Register tmp1, Register tmp_thread) { 79 80 // LR is live. It must be saved around calls. 81 82 bool in_heap = (decorators & IN_HEAP) != 0; 83 bool in_native = (decorators & IN_NATIVE) != 0; 84 bool is_not_null = (decorators & IS_NOT_NULL) != 0; 85 bool atomic = (decorators & MO_SEQ_CST) != 0; 86 switch (type) { 87 case T_OBJECT: 88 case T_ARRAY: { 89 assert(in_heap || in_native, "why else?"); 90 __ ldr(r0, src); 91 break; 92 } 93 case T_BOOLEAN: __ load_unsigned_byte (r0, src); break; 94 case T_BYTE: __ load_signed_byte (r0, src); break; 95 case T_CHAR: __ load_unsigned_short(r0, src); break; 96 case T_SHORT: __ load_signed_short (r0, src); break; 97 case T_DOUBLE: 98 if (hasFPU()) { 99 if (!src.is_safe_for(atomic ? Address::IDT_ATOMIC : Address::IDT_DOUBLE)) { 100 assert(tmp1 != noreg, "must be"); 101 __ lea(tmp1, src); 102 src = Address(tmp1); 103 } 104 if (atomic) { 105 __ atomic_ldrd(r0, r1, src.base()); 106 __ vmov_f64(d0, r0, r1); 107 } else { 108 __ vldr_f64(d0, src); 109 } 110 break; 111 } 112 // else fall-through 113 case T_LONG: 114 if (atomic) { 115 if (!src.is_safe_for(Address::IDT_ATOMIC)) { 116 assert(tmp1 != noreg, "must be"); 117 __ lea(tmp1, src); 118 src = Address(tmp1); 119 } 120 __ atomic_ldrd(r0, r1, src.base()); 121 } else { 122 __ ldrd(r0, r1, src); 123 } 124 break; 125 case T_FLOAT: 126 if (hasFPU()) { 127 if (!src.is_safe_for(Address::IDT_FLOAT)) { 128 assert(tmp1 != noreg, "must be"); 129 __ lea(tmp1, src); 130 src = Address(tmp1); 131 } 132 __ vldr_f32(f0, src); 133 break; 134 } 135 // else fall-through 136 case T_ADDRESS: 137 // fall-through 138 case T_INT: __ ldr (r0, src); break; 139 default: Unimplemented(); 140 } 141 } 142 143 void BarrierSetAssembler::store_tos_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 144 Address dst, Register tmp1, Register tmp2) { 145 bool in_heap = (decorators & IN_HEAP) != 0; 146 bool in_native = (decorators & IN_NATIVE) != 0; 147 bool atomic = (decorators & MO_SEQ_CST) != 0; 148 switch (type) { 149 case T_OBJECT: 150 case T_ARRAY: { 151 assert(in_heap || in_native, "why else?"); 152 __ str(r0, dst); 153 break; 154 } 155 case T_BOOLEAN: 156 __ andr(r0, r0, 0x1); // boolean is true if LSB is 1 157 __ strb(r0, dst); 158 break; 159 case T_BYTE: __ strb (r0, dst); break; 160 case T_CHAR: __ strh (r0, dst); break; 161 case T_SHORT: __ strh (r0, dst); break; 162 case T_FLOAT: 163 if (hasFPU()) { 164 if (!dst.is_safe_for(Address::IDT_FLOAT)) { 165 assert(tmp1 != noreg, "must be"); 166 __ lea(tmp1, dst); 167 dst = Address(tmp1); 168 } 169 __ vstr_f32(d0, dst); 170 break; 171 } 172 // else fall-through 173 case T_INT: __ str (r0, dst); break; 174 case T_DOUBLE: 175 if (hasFPU()) { 176 if (atomic) { 177 __ vmov_f64(r0, r1, d0); 178 // fall-through to T_LONG 179 } else { 180 if (!dst.is_safe_for(Address::IDT_DOUBLE)) { 181 assert(tmp1 != noreg, "must be"); 182 __ lea(tmp1, dst); 183 dst = Address(tmp1); 184 } 185 __ vstr_f64(d0, dst); 186 break; 187 } 188 } 189 // else fall-through 190 case T_LONG: 191 if (atomic) { 192 assert(tmp1 != noreg && tmp2 != noreg, "must be"); 193 assert_different_registers(rscratch1, tmp1, tmp2); 194 Register base; 195 if (!dst.is_safe_for(Address::IDT_ATOMIC) || 196 dst.uses(tmp1) || dst.uses(tmp2)) { 197 __ lea(rscratch1, dst); 198 base = rscratch1; 199 } else { 200 base = dst.base(); // strexd only supports [base] addressing 201 } 202 __ atomic_strd(r0, r1, base, tmp1, tmp2); 203 } else { 204 __ strd(r0, r1, dst); 205 } 206 break; 207 case T_ADDRESS: __ str (r0, dst); break; 208 default: Unimplemented(); 209 } 210 } 211 212 void BarrierSetAssembler::obj_equals(MacroAssembler* masm, 213 Register obj1, Register obj2) { 214 __ cmp(obj1, obj2); 215 } 216 217 void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, 218 Register obj, Register tmp, Label& slowpath) { 219 // If mask changes we need to ensure that the inverse is still encodable as an immediate 220 STATIC_ASSERT(JNIHandles::weak_tag_mask == 1); 221 __ bic(obj, obj, JNIHandles::weak_tag_mask); 222 223 __ ldr(obj, Address(obj, 0)); // *obj 224 } 225 226 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 227 void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj, 228 Register var_size_in_bytes, 229 int con_size_in_bytes, 230 Register t1, 231 Register t2, 232 Label& slow_case) { 233 assert_different_registers(obj, t2); 234 assert_different_registers(obj, var_size_in_bytes); 235 Register end = t2; 236 237 // verify_tlab(); 238 239 __ ldr(obj, Address(rthread, JavaThread::tlab_top_offset())); 240 if (var_size_in_bytes == noreg) { 241 __ lea(end, Address(obj, con_size_in_bytes)); 242 } else { 243 __ lea(end, Address(obj, var_size_in_bytes)); 244 } 245 __ ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset())); 246 __ cmp(end, rscratch1); 247 __ b(slow_case, Assembler::HI); 248 249 // update the tlab top pointer 250 __ str(end, Address(rthread, JavaThread::tlab_top_offset())); 251 252 // recover var_size_in_bytes if necessary 253 if (var_size_in_bytes == end) { 254 __ sub(var_size_in_bytes, var_size_in_bytes, obj); 255 } 256 // verify_tlab(); 257 } 258 259 // Defines obj, preserves var_size_in_bytes. uses rscratch1 and rscratch2 260 void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj, 261 Register var_size_in_bytes, 262 int con_size_in_bytes, 263 Register t1, 264 Label& slow_case) { 265 assert_different_registers(obj, var_size_in_bytes, t1); 266 if (!Universe::heap()->supports_inline_contig_alloc()) { 267 __ b(slow_case); 268 } else { 269 Register end = t1; 270 Register heap_end = rscratch2; 271 Label retry; 272 __ bind(retry); 273 274 __ mov(rscratch1, ExternalAddress((address) Universe::heap()->end_addr())); 275 __ ldr(heap_end, Address(rscratch1)); 276 277 ExternalAddress heap_top((address) Universe::heap()->top_addr()); 278 __ mov(rscratch1, heap_top); 279 __ ldrex(obj, rscratch1); 280 281 // Adjust it my the size of our new object 282 if (var_size_in_bytes == noreg) { 283 __ lea(end, Address(obj, con_size_in_bytes)); 284 } else { 285 __ lea(end, Address(obj, var_size_in_bytes)); 286 } 287 288 // if end < obj then we wrapped around high memory 289 __ cmp(end, obj); 290 __ b(slow_case, Assembler::LO); 291 292 __ cmp(end, heap_end); 293 __ b(slow_case, Assembler::HI); 294 295 // If heap_top hasn't been changed by some other thread, update it. 296 __ mov(rscratch2, rscratch1); 297 __ strex(rscratch1, end, rscratch2); 298 __ cmp(rscratch1, 0); 299 __ b(retry, Assembler::NE); 300 301 incr_allocated_bytes(masm, var_size_in_bytes, con_size_in_bytes, t1); 302 } 303 } 304 305 void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm, 306 Register var_size_in_bytes, 307 int con_size_in_bytes, 308 Register t1) { 309 assert(t1->is_valid(), "need temp reg"); 310 311 __ ldr(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset()))); 312 if (var_size_in_bytes->is_valid()) { 313 __ add(t1, t1, var_size_in_bytes); 314 } else { 315 __ add(t1, t1, con_size_in_bytes); 316 } 317 __ str(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset()))); 318 }