1 /* 2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/barrierSetAssembler.hpp" 27 #include "interpreter/interp_masm.hpp" 28 #include "runtime/jniHandles.hpp" 29 30 #define __ masm-> 31 32 void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 33 Register dst, Address src, Register tmp1, Register tmp_thread) { 34 bool on_heap = (decorators & IN_HEAP) != 0; 35 bool in_native = (decorators & IN_NATIVE) != 0; 36 bool oop_not_null = (decorators & OOP_NOT_NULL) != 0; 37 bool atomic = (decorators & MO_RELAXED) != 0; 38 39 switch (type) { 40 case T_OBJECT: 41 case T_ARRAY: { 42 if (on_heap) { 43 #ifdef _LP64 44 if (UseCompressedOops) { 45 __ movl(dst, src); 46 if (oop_not_null) { 47 __ decode_heap_oop_not_null(dst); 48 } else { 49 __ decode_heap_oop(dst); 50 } 51 } else 52 #endif 53 { 54 __ movptr(dst, src); 55 } 56 } else { 57 assert(in_native, "why else?"); 58 __ movptr(dst, src); 59 } 60 break; 61 } 62 case T_BOOLEAN: __ load_unsigned_byte(dst, src); break; 63 case T_BYTE: __ load_signed_byte(dst, src); break; 64 case T_CHAR: __ load_unsigned_short(dst, src); break; 65 case T_SHORT: __ load_signed_short(dst, src); break; 66 case T_INT: __ movl (dst, src); break; 67 case T_ADDRESS: __ movptr(dst, src); break; 68 case T_FLOAT: 69 assert(dst == noreg, "only to ftos"); 70 __ load_float(src); 71 break; 72 case T_DOUBLE: 73 assert(dst == noreg, "only to dtos"); 74 __ load_double(src); 75 break; 76 case T_LONG: 77 assert(dst == noreg, "only to ltos"); 78 #ifdef _LP64 79 __ movq(rax, src); 80 #else 81 if (atomic) { 82 __ fild_d(src); // Must load atomically 83 __ subptr(rsp,2*wordSize); // Make space for store 84 __ fistp_d(Address(rsp,0)); 85 __ pop(rax); 86 __ pop(rdx); 87 } else { 88 __ movl(rax, src); 89 __ movl(rdx, src.plus_disp(wordSize)); 90 } 91 #endif 92 break; 93 default: Unimplemented(); 94 } 95 } 96 97 void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, 98 Address dst, Register val, Register tmp1, Register tmp2) { 99 bool on_heap = (decorators & IN_HEAP) != 0; 100 bool in_native = (decorators & IN_NATIVE) != 0; 101 bool oop_not_null = (decorators & OOP_NOT_NULL) != 0; 102 bool atomic = (decorators & MO_RELAXED) != 0; 103 104 switch (type) { 105 case T_OBJECT: 106 case T_ARRAY: { 107 if (on_heap) { 108 if (val == noreg) { 109 assert(!oop_not_null, "inconsistent access"); 110 #ifdef _LP64 111 if (UseCompressedOops) { 112 __ movl(dst, (int32_t)NULL_WORD); 113 } else { 114 __ movslq(dst, (int32_t)NULL_WORD); 115 } 116 #else 117 __ movl(dst, (int32_t)NULL_WORD); 118 #endif 119 } else { 120 #ifdef _LP64 121 if (UseCompressedOops) { 122 assert(!dst.uses(val), "not enough registers"); 123 if (oop_not_null) { 124 __ encode_heap_oop_not_null(val); 125 } else { 126 __ encode_heap_oop(val); 127 } 128 __ movl(dst, val); 129 } else 130 #endif 131 { 132 __ movptr(dst, val); 133 } 134 } 135 } else { 136 assert(in_native, "why else?"); 137 assert(val != noreg, "not supported"); 138 __ movptr(dst, val); 139 } 140 break; 141 } 142 case T_BOOLEAN: 143 __ andl(val, 0x1); // boolean is true if LSB is 1 144 __ movb(dst, val); 145 break; 146 case T_BYTE: 147 __ movb(dst, val); 148 break; 149 case T_SHORT: 150 __ movw(dst, val); 151 break; 152 case T_CHAR: 153 __ movw(dst, val); 154 break; 155 case T_INT: 156 __ movl(dst, val); 157 break; 158 case T_LONG: 159 assert(val == noreg, "only tos"); 160 #ifdef _LP64 161 __ movq(dst, rax); 162 #else 163 if (atomic) { 164 __ push(rdx); 165 __ push(rax); // Must update atomically with FIST 166 __ fild_d(Address(rsp,0)); // So load into FPU register 167 __ fistp_d(dst); // and put into memory atomically 168 __ addptr(rsp, 2*wordSize); 169 } else { 170 __ movptr(dst, rax); 171 __ movptr(dst.plus_disp(wordSize), rdx); 172 } 173 #endif 174 break; 175 case T_FLOAT: 176 assert(val == noreg, "only tos"); 177 __ store_float(dst); 178 break; 179 case T_DOUBLE: 180 assert(val == noreg, "only tos"); 181 __ store_double(dst); 182 break; 183 case T_ADDRESS: 184 __ movptr(dst, val); 185 break; 186 default: Unimplemented(); 187 } 188 } 189 190 void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env, 191 Register obj, Register tmp, Label& slowpath) { 192 __ clear_jweak_tag(obj); 193 __ movptr(obj, Address(obj, 0)); 194 }