1 /* 2 * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_Defs.hpp" 27 #include "c1/c1_LIRGenerator.hpp" 28 #include "gc/shared/c1BarrierSetCodeGen.hpp" 29 #include "utilities/macros.hpp" 30 31 #ifndef PATCHED_ADDR 32 #define PATCHED_ADDR (max_jint) 33 #endif 34 35 #ifdef ASSERT 36 #define __ lir_generator->lir(__FILE__, __LINE__)-> 37 #else 38 #define __ lir_generator->lir()-> 39 #endif 40 41 LIR_Opr C1BarrierSetCodeGen::resolve_address(LIRGenerator *lir_generator, C1DecoratorSet decorators, BasicType type, 42 LIRItem& base, LIR_Opr offset, bool resolve_in_register) { 43 bool on_array = (decorators & C1_ACCESS_ON_ARRAY) != 0; 44 bool is_oop = type == T_OBJECT || type == T_ARRAY; 45 bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0; 46 LIR_Opr addr; 47 if (on_array) { 48 addr = LIR_OprFact::address(lir_generator->emit_array_address(base.result(), offset, type, is_oop)); 49 } else if (needs_patching) { 50 // we need to patch the offset in the instruction so don't allow 51 // generate_address to try to be smart about emitting the -1. 52 // Otherwise the patching code won't know how to find the 53 // instruction to patch. 54 addr = LIR_OprFact::address(new LIR_Address(base.result(), PATCHED_ADDR, type)); 55 } else { 56 addr = LIR_OprFact::address(lir_generator->generate_address(base.result(), offset, 0, 0, type)); 57 } 58 59 if (resolve_in_register) { 60 LIR_Opr resolved_addr = lir_generator->new_pointer_register(); 61 __ leal(addr, resolved_addr); 62 resolved_addr = LIR_OprFact::address(new LIR_Address(resolved_addr, type)); 63 return resolved_addr; 64 } else { 65 return addr; 66 } 67 } 68 69 void C1BarrierSetCodeGen::store_at(LIRGenerator *lir_generator, C1DecoratorSet decorators, BasicType type, 70 LIRItem& base, LIR_Opr offset, LIR_Opr value, 71 CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) { 72 LIR_Opr addr = resolve_address(lir_generator, decorators, type, base, offset, false); 73 store_at_resolved(lir_generator, decorators, type, addr, base, offset, value, patch_info, store_emit_info); 74 } 75 76 LIR_Opr C1BarrierSetCodeGen::load_at(LIRGenerator *lir_generator, C1DecoratorSet decorators, BasicType type, 77 LIRItem& base, LIR_Opr offset, 78 CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) { 79 LIR_Opr addr = resolve_address(lir_generator, decorators, type, base, offset, false); 80 return load_at_resolved(lir_generator, decorators, type, addr, base, offset, patch_info, load_emit_info); 81 } 82 83 LIR_Opr C1BarrierSetCodeGen::cas_at(LIRGenerator *lir_generator, C1DecoratorSet decorators, BasicType type, 84 LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) { 85 base.load_item(); 86 offset.load_nonconstant(); 87 88 LIR_Opr addr = resolve_address(lir_generator, decorators, type, base, offset.result(), true); 89 return cas_at_resolved(lir_generator, decorators, type, addr, base, offset, cmp_value, new_value); 90 } 91 92 LIR_Opr C1BarrierSetCodeGen::swap_at(LIRGenerator* lir_generator, C1DecoratorSet decorators, BasicType type, 93 LIRItem& base, LIRItem& offset, LIRItem& value) { 94 base.load_item(); 95 offset.load_nonconstant(); 96 97 LIR_Opr addr = resolve_address(lir_generator, decorators, type, base, offset.result(), true); 98 return swap_at_resolved(lir_generator, decorators, type, addr, base, offset, value); 99 } 100 101 LIR_Opr C1BarrierSetCodeGen::add_at(LIRGenerator* lir_generator, C1DecoratorSet decorators, BasicType type, 102 LIRItem& base, LIRItem& offset, LIRItem& value) { 103 base.load_item(); 104 offset.load_nonconstant(); 105 106 LIR_Opr addr = resolve_address(lir_generator, decorators, type, base, offset.result(), true); 107 return add_at_resolved(lir_generator, decorators, type, addr, base, offset, value); 108 } 109 110 void C1BarrierSetCodeGen::store_at_resolved(LIRGenerator *lir_generator, C1DecoratorSet decorators, BasicType type, 111 LIR_Opr addr, LIRItem& base, LIR_Opr offset, LIR_Opr value, 112 CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) { 113 bool is_volatile = (((decorators & C1_MO_VOLATILE) != 0) || AlwaysAtomicAccesses) && os::is_MP(); 114 bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0; 115 bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0; 116 117 if (mask_boolean) { 118 value = lir_generator->mask_boolean(base.result(), value, store_emit_info); 119 } 120 121 if (is_volatile && os::is_MP()) { 122 __ membar_release(); 123 } 124 125 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none; 126 if (is_volatile && !needs_patching) { 127 lir_generator->volatile_field_store(value, addr->as_address_ptr(), store_emit_info); 128 } else { 129 __ store(value, addr->as_address_ptr(), store_emit_info, patch_code); 130 } 131 132 if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) { 133 __ membar(); 134 } 135 } 136 137 LIR_Opr C1BarrierSetCodeGen::load_at_resolved(LIRGenerator *lir_generator, C1DecoratorSet decorators, BasicType type, 138 LIR_Opr addr, LIRItem& base, LIR_Opr offset, 139 CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) { 140 bool is_volatile = (((decorators & C1_MO_VOLATILE) != 0) || AlwaysAtomicAccesses) && os::is_MP(); 141 bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0; 142 bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0; 143 BasicType val_bt = as_BasicType(as_ValueType(type)); 144 145 LIR_Opr result = lir_generator->new_register(val_bt); 146 147 if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile) { 148 __ membar(); 149 } 150 151 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none; 152 if (is_volatile && !needs_patching) { 153 lir_generator->volatile_field_load(addr->as_address_ptr(), result, load_emit_info); 154 } else { 155 __ load(addr->as_address_ptr(), result, load_emit_info, patch_code); 156 } 157 158 if (is_volatile && os::is_MP()) { 159 __ membar_acquire(); 160 } 161 162 /* Normalize boolean value returned by unsafe operation, i.e., value != 0 ? value = true : value false. */ 163 if (mask_boolean) { 164 LabelObj* equalZeroLabel = new LabelObj(); 165 __ cmp(lir_cond_equal, result, 0); 166 __ branch(lir_cond_equal, T_BOOLEAN, equalZeroLabel->label()); 167 __ move(LIR_OprFact::intConst(1), result); 168 __ branch_destination(equalZeroLabel->label()); 169 } 170 171 return result; 172 } 173 174 LIR_Opr C1BarrierSetCodeGen::cas_at_resolved(LIRGenerator *lir_generator, C1DecoratorSet decorators, BasicType type, 175 LIR_Opr addr, LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) { 176 BasicType val_bt = as_BasicType(as_ValueType(type)); 177 return lir_generator->cas(val_bt, addr, cmp_value, new_value); 178 } 179 180 LIR_Opr C1BarrierSetCodeGen::swap_at_resolved(LIRGenerator* lir_generator, C1DecoratorSet decorators, BasicType type, 181 LIR_Opr addr, LIRItem& base, LIRItem& offset, LIRItem& value) { 182 BasicType val_bt = as_BasicType(as_ValueType(type)); 183 return lir_generator->swap(val_bt, addr, value); 184 } 185 186 LIR_Opr C1BarrierSetCodeGen::add_at_resolved(LIRGenerator* lir_generator, C1DecoratorSet decorators, BasicType type, 187 LIR_Opr addr, LIRItem& base, LIRItem& offset, LIRItem& value) { 188 BasicType val_bt = as_BasicType(as_ValueType(type)); 189 return lir_generator->add(val_bt, addr, value); 190 } 191 192 void C1BarrierSetCodeGen::generate_referent_check(LIRGenerator* lir_generator, LIRItem& base, LIR_Opr offset, LabelObj* cont) { 193 // We might be reading the value of the referent field of a 194 // Reference object in order to attach it back to the live 195 // object graph. If G1 is enabled then we need to record 196 // the value that is being returned in an SATB log buffer. 197 // 198 // We need to generate code similar to the following... 199 // 200 // if (offset == java_lang_ref_Reference::referent_offset) { 201 // if (src != NULL) { 202 // if (klass(src)->reference_type() != REF_NONE) { 203 // pre_barrier(..., value, ...); 204 // } 205 // } 206 // } 207 208 bool gen_pre_barrier = true; // Assume we need to generate pre_barrier. 209 bool gen_offset_check = true; // Assume we need to generate the offset guard. 210 bool gen_source_check = true; // Assume we need to check the src object for null. 211 bool gen_type_check = true; // Assume we need to check the reference_type. 212 213 if (offset->is_constant()) { 214 LIR_Const* constant = offset->as_constant_ptr(); 215 jlong off_con = (constant->type() == T_INT ? 216 (jlong)constant->as_jint() : 217 constant->as_jlong()); 218 219 220 if (off_con != (jlong) java_lang_ref_Reference::referent_offset) { 221 // The constant offset is something other than referent_offset. 222 // We can skip generating/checking the remaining guards and 223 // skip generation of the code stub. 224 gen_pre_barrier = false; 225 } else { 226 // The constant offset is the same as referent_offset - 227 // we do not need to generate a runtime offset check. 228 gen_offset_check = false; 229 } 230 } 231 232 // We don't need to generate stub if the source object is an array 233 if (gen_pre_barrier && base.type()->is_array()) { 234 gen_pre_barrier = false; 235 } 236 237 if (gen_pre_barrier) { 238 // We still need to continue with the checks. 239 if (base.is_constant()) { 240 ciObject* src_con = base.get_jobject_constant(); 241 guarantee(src_con != NULL, "no source constant"); 242 243 if (src_con->is_null_object()) { 244 // The constant src object is null - We can skip 245 // generating the code stub. 246 gen_pre_barrier = false; 247 } else { 248 // Non-null constant source object. We still have to generate 249 // the slow stub - but we don't need to generate the runtime 250 // null object check. 251 gen_source_check = false; 252 } 253 } 254 } 255 if (gen_pre_barrier && !PatchALot) { 256 // Can the klass of object be statically determined to be 257 // a sub-class of Reference? 258 ciType* type = base.value()->declared_type(); 259 if ((type != NULL) && type->is_loaded()) { 260 if (type->is_subtype_of(lir_generator->compilation()->env()->Reference_klass())) { 261 gen_type_check = false; 262 } else if (type->is_klass() && 263 !lir_generator->compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) { 264 // Not Reference and not Object klass. 265 gen_pre_barrier = false; 266 } 267 } 268 } 269 270 if (gen_pre_barrier) { 271 // We can have generate one runtime check here. Let's start with 272 // the offset check. 273 if (gen_offset_check) { 274 // if (offset != referent_offset) -> continue 275 // If offset is an int then we can do the comparison with the 276 // referent_offset constant; otherwise we need to move 277 // referent_offset into a temporary register and generate 278 // a reg-reg compare. 279 280 LIR_Opr referent_off; 281 282 if (offset->type() == T_INT) { 283 referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset); 284 } else { 285 assert(offset->type() == T_LONG, "what else?"); 286 referent_off = lir_generator->new_register(T_LONG); 287 __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off); 288 } 289 __ cmp(lir_cond_notEqual, offset, referent_off); 290 __ branch(lir_cond_notEqual, offset->type(), cont->label()); 291 } 292 if (gen_source_check) { 293 // offset is a const and equals referent offset 294 // if (source == null) -> continue 295 __ cmp(lir_cond_equal, base.result(), LIR_OprFact::oopConst(NULL)); 296 __ branch(lir_cond_equal, T_OBJECT, cont->label()); 297 } 298 LIR_Opr src_klass = lir_generator->new_register(T_OBJECT); 299 if (gen_type_check) { 300 // We have determined that offset == referent_offset && src != null. 301 // if (src->_klass->_reference_type == REF_NONE) -> continue 302 __ move(new LIR_Address(base.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass); 303 LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE); 304 LIR_Opr reference_type = lir_generator->new_register(T_INT); 305 __ move(reference_type_addr, reference_type); 306 __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE)); 307 __ branch(lir_cond_equal, T_INT, cont->label()); 308 } 309 } 310 }