1 /* 2 * Copyright (c) 2018, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 #include "c1/c1_IR.hpp" 26 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" 27 #include "gc/shenandoah/shenandoahHeap.hpp" 28 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 29 #include "gc/shenandoah/shenandoahSATBMarkQueue.hpp" 30 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 31 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp" 32 33 #ifdef ASSERT 34 #define __ gen->lir(__FILE__, __LINE__)-> 35 #else 36 #define __ gen->lir()-> 37 #endif 38 39 void ShenandoahPreBarrierStub::emit_code(LIR_Assembler* ce) { 40 ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); 41 bs->gen_pre_barrier_stub(ce, this); 42 } 43 44 void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) { 45 ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); 46 bs->gen_load_reference_barrier_stub(ce, this); 47 } 48 49 ShenandoahBarrierSetC1::ShenandoahBarrierSetC1() : 50 _pre_barrier_c1_runtime_code_blob(NULL), 51 _load_reference_barrier_rt_code_blob(NULL) {} 52 53 void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val) { 54 // First we test whether marking is in progress. 55 BasicType flag_type; 56 bool patch = (decorators & C1_NEEDS_PATCHING) != 0; 57 bool do_load = pre_val == LIR_OprFact::illegalOpr; 58 if (in_bytes(ShenandoahSATBMarkQueue::byte_width_of_active()) == 4) { 59 flag_type = T_INT; 60 } else { 61 guarantee(in_bytes(ShenandoahSATBMarkQueue::byte_width_of_active()) == 1, 62 "Assumption"); 63 // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM, 64 // need to use unsigned instructions to use the large offset to load the satb_mark_queue. 65 flag_type = T_BOOLEAN; 66 } 67 LIR_Opr thrd = gen->getThreadPointer(); 68 LIR_Address* mark_active_flag_addr = 69 new LIR_Address(thrd, 70 in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()), 71 flag_type); 72 // Read the marking-in-progress flag. 73 LIR_Opr flag_val = gen->new_register(T_INT); 74 __ load(mark_active_flag_addr, flag_val); 75 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); 76 77 LIR_PatchCode pre_val_patch_code = lir_patch_none; 78 79 CodeStub* slow; 80 81 if (do_load) { 82 assert(pre_val == LIR_OprFact::illegalOpr, "sanity"); 83 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity"); 84 85 if (patch) 86 pre_val_patch_code = lir_patch_normal; 87 88 pre_val = gen->new_register(T_OBJECT); 89 90 if (!addr_opr->is_address()) { 91 assert(addr_opr->is_register(), "must be"); 92 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT)); 93 } 94 slow = new ShenandoahPreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info ? new CodeEmitInfo(info) : NULL); 95 } else { 96 assert(addr_opr == LIR_OprFact::illegalOpr, "sanity"); 97 assert(pre_val->is_register(), "must be"); 98 assert(pre_val->type() == T_OBJECT, "must be an object"); 99 100 slow = new ShenandoahPreBarrierStub(pre_val); 101 } 102 103 __ branch(lir_cond_notEqual, T_INT, slow); 104 __ branch_destination(slow->continuation()); 105 } 106 107 LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr) { 108 if (ShenandoahLoadRefBarrier) { 109 return load_reference_barrier_impl(gen, obj, addr); 110 } else { 111 return obj; 112 } 113 } 114 115 LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr) { 116 assert(ShenandoahLoadRefBarrier, "Should be enabled"); 117 118 obj = ensure_in_register(gen, obj); 119 assert(obj->is_register(), "must be a register at this point"); 120 addr = ensure_in_register(gen, addr); 121 assert(addr->is_register(), "must be a register at this point"); 122 LIR_Opr result = gen->result_register_for(obj->value_type()); 123 __ move(obj, result); 124 LIR_Opr tmp1 = gen->new_register(T_OBJECT); 125 LIR_Opr tmp2 = gen->new_register(T_OBJECT); 126 127 LIR_Opr thrd = gen->getThreadPointer(); 128 LIR_Address* active_flag_addr = 129 new LIR_Address(thrd, 130 in_bytes(ShenandoahThreadLocalData::gc_state_offset()), 131 T_BYTE); 132 // Read and check the gc-state-flag. 133 LIR_Opr flag_val = gen->new_register(T_INT); 134 __ load(active_flag_addr, flag_val); 135 LIR_Opr mask = LIR_OprFact::intConst(ShenandoahHeap::HAS_FORWARDED | 136 ShenandoahHeap::EVACUATION | 137 ShenandoahHeap::TRAVERSAL); 138 LIR_Opr mask_reg = gen->new_register(T_INT); 139 __ move(mask, mask_reg); 140 141 if (TwoOperandLIRForm) { 142 __ logical_and(flag_val, mask_reg, flag_val); 143 } else { 144 LIR_Opr masked_flag = gen->new_register(T_INT); 145 __ logical_and(flag_val, mask_reg, masked_flag); 146 flag_val = masked_flag; 147 } 148 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); 149 150 CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, addr, result, tmp1, tmp2); 151 __ branch(lir_cond_notEqual, T_INT, slow); 152 __ branch_destination(slow->continuation()); 153 154 return result; 155 } 156 157 LIR_Opr ShenandoahBarrierSetC1::ensure_in_register(LIRGenerator* gen, LIR_Opr obj) { 158 if (!obj->is_register()) { 159 LIR_Opr obj_reg; 160 if (obj->is_constant()) { 161 obj_reg = gen->new_register(T_OBJECT); 162 __ move(obj, obj_reg); 163 } else { 164 #ifdef AARCH64 165 // AArch64 expects double-size register. 166 obj_reg = gen->new_pointer_register(); 167 #else 168 // x86 expects single-size register. 169 obj_reg = gen->new_register(T_OBJECT); 170 #endif 171 __ leal(obj, obj_reg); 172 } 173 obj = obj_reg; 174 } 175 return obj; 176 } 177 178 LIR_Opr ShenandoahBarrierSetC1::storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators) { 179 if (ShenandoahStoreValEnqueueBarrier) { 180 obj = ensure_in_register(gen, obj); 181 pre_barrier(gen, info, decorators, LIR_OprFact::illegalOpr, obj); 182 } 183 return obj; 184 } 185 186 void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) { 187 if (access.is_oop()) { 188 if (ShenandoahSATBBarrier) { 189 pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), access.resolved_addr(), LIR_OprFact::illegalOpr /* pre_val */); 190 } 191 value = storeval_barrier(access.gen(), value, access.access_emit_info(), access.decorators()); 192 } 193 BarrierSetC1::store_at_resolved(access, value); 194 } 195 196 LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) { 197 // We must resolve in register when patching. This is to avoid 198 // having a patch area in the load barrier stub, since the call 199 // into the runtime to patch will not have the proper oop map. 200 const bool patch_before_barrier = access.is_oop() && (access.decorators() & C1_NEEDS_PATCHING) != 0; 201 return BarrierSetC1::resolve_address(access, resolve_in_register || patch_before_barrier); 202 } 203 204 void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) { 205 if (!access.is_oop()) { 206 BarrierSetC1::load_at_resolved(access, result); 207 return; 208 } 209 210 LIRGenerator *gen = access.gen(); 211 212 if (ShenandoahLoadRefBarrier) { 213 LIR_Opr tmp = gen->new_register(T_OBJECT); 214 BarrierSetC1::load_at_resolved(access, tmp); 215 tmp = load_reference_barrier(access.gen(), tmp, access.resolved_addr()); 216 __ move(tmp, result); 217 } else { 218 BarrierSetC1::load_at_resolved(access, result); 219 } 220 221 if (ShenandoahKeepAliveBarrier) { 222 DecoratorSet decorators = access.decorators(); 223 bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0; 224 bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; 225 bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; 226 if (is_weak || is_phantom || is_anonymous) { 227 // Register the value in the referent field with the pre-barrier 228 LabelObj *Lcont_anonymous; 229 if (is_anonymous) { 230 Lcont_anonymous = new LabelObj(); 231 generate_referent_check(access, Lcont_anonymous); 232 } 233 pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr /* addr_opr */, 234 result /* pre_val */); 235 if (is_anonymous) { 236 __ branch_destination(Lcont_anonymous->label()); 237 } 238 } 239 } 240 } 241 242 class C1ShenandoahPreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure { 243 virtual OopMapSet* generate_code(StubAssembler* sasm) { 244 ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); 245 bs->generate_c1_pre_barrier_runtime_stub(sasm); 246 return NULL; 247 } 248 }; 249 250 class C1ShenandoahLoadReferenceBarrierCodeGenClosure : public StubAssemblerCodeGenClosure { 251 virtual OopMapSet* generate_code(StubAssembler* sasm) { 252 ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); 253 bs->generate_c1_load_reference_barrier_runtime_stub(sasm); 254 return NULL; 255 } 256 }; 257 258 void ShenandoahBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) { 259 C1ShenandoahPreBarrierCodeGenClosure pre_code_gen_cl; 260 _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, -1, 261 "shenandoah_pre_barrier_slow", 262 false, &pre_code_gen_cl); 263 if (ShenandoahLoadRefBarrier) { 264 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_code_gen_cl; 265 _load_reference_barrier_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1, 266 "shenandoah_load_reference_barrier_slow", 267 false, &lrb_code_gen_cl); 268 } 269 }