1 /*
   2  * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "c1/c1_IR.hpp"
  26 #include "gc/shared/satbMarkQueue.hpp"
  27 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  28 #include "gc/shenandoah/shenandoahHeap.hpp"
  29 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  30 #include "gc/shenandoah/shenandoahRuntime.hpp"
  31 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  32 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  33 
  34 #ifdef ASSERT
  35 #define __ gen->lir(__FILE__, __LINE__)->
  36 #else
  37 #define __ gen->lir()->
  38 #endif
  39 
  40 void ShenandoahPreBarrierStub::emit_code(LIR_Assembler* ce) {
  41   ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
  42   bs->gen_pre_barrier_stub(ce, this);
  43 }
  44 
  45 void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) {
  46   ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
  47   bs->gen_load_reference_barrier_stub(ce, this);
  48 }
  49 
  50 ShenandoahBarrierSetC1::ShenandoahBarrierSetC1() :
  51   _pre_barrier_c1_runtime_code_blob(NULL),
  52   _load_reference_barrier_rt_code_blob(NULL) {}
  53 
  54 void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val) {
  55   // First we test whether marking is in progress.
  56   BasicType flag_type;
  57   bool patch = (decorators & C1_NEEDS_PATCHING) != 0;
  58   bool do_load = pre_val == LIR_OprFact::illegalOpr;
  59   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
  60     flag_type = T_INT;
  61   } else {
  62     guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
  63               "Assumption");
  64     // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM,
  65     // need to use unsigned instructions to use the large offset to load the satb_mark_queue.
  66     flag_type = T_BOOLEAN;
  67   }
  68   LIR_Opr thrd = gen->getThreadPointer();
  69   LIR_Address* mark_active_flag_addr =
  70     new LIR_Address(thrd,
  71                     in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()),
  72                     flag_type);
  73   // Read the marking-in-progress flag.
  74   LIR_Opr flag_val = gen->new_register(T_INT);
  75   __ load(mark_active_flag_addr, flag_val);
  76   __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
  77 
  78   LIR_PatchCode pre_val_patch_code = lir_patch_none;
  79 
  80   CodeStub* slow;
  81 
  82   if (do_load) {
  83     assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
  84     assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
  85 
  86     if (patch)
  87       pre_val_patch_code = lir_patch_normal;
  88 
  89     pre_val = gen->new_register(T_OBJECT);
  90 
  91     if (!addr_opr->is_address()) {
  92       assert(addr_opr->is_register(), "must be");
  93       addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
  94     }
  95     slow = new ShenandoahPreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info ? new CodeEmitInfo(info) : NULL);
  96   } else {
  97     assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
  98     assert(pre_val->is_register(), "must be");
  99     assert(pre_val->type() == T_OBJECT, "must be an object");
 100 
 101     slow = new ShenandoahPreBarrierStub(pre_val);
 102   }
 103 
 104   __ branch(lir_cond_notEqual, T_INT, slow);
 105   __ branch_destination(slow->continuation());
 106 }
 107 
 108 LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr) {
 109   if (ShenandoahLoadRefBarrier) {
 110     return load_reference_barrier_impl(gen, obj, addr);
 111   } else {
 112     return obj;
 113   }
 114 }
 115 
 116 LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr) {
 117   assert(ShenandoahLoadRefBarrier, "Should be enabled");
 118 
 119   obj = ensure_in_register(gen, obj);
 120   assert(obj->is_register(), "must be a register at this point");
 121   addr = ensure_in_register(gen, addr);
 122   assert(addr->is_register(), "must be a register at this point");
 123   LIR_Opr result = gen->result_register_for(obj->value_type());
 124   __ move(obj, result);
 125   LIR_Opr tmp1 = gen->new_register(T_OBJECT);
 126   LIR_Opr tmp2 = gen->new_register(T_OBJECT);
 127 
 128   LIR_Opr thrd = gen->getThreadPointer();
 129   LIR_Address* active_flag_addr =
 130     new LIR_Address(thrd,
 131                     in_bytes(ShenandoahThreadLocalData::gc_state_offset()),
 132                     T_BYTE);
 133   // Read and check the gc-state-flag.
 134   LIR_Opr flag_val = gen->new_register(T_INT);
 135   __ load(active_flag_addr, flag_val);
 136   LIR_Opr mask = LIR_OprFact::intConst(ShenandoahHeap::HAS_FORWARDED |
 137                                        ShenandoahHeap::EVACUATION |
 138                                        ShenandoahHeap::TRAVERSAL);
 139   LIR_Opr mask_reg = gen->new_register(T_INT);
 140   __ move(mask, mask_reg);
 141 
 142   if (TwoOperandLIRForm) {
 143     __ logical_and(flag_val, mask_reg, flag_val);
 144   } else {
 145     LIR_Opr masked_flag = gen->new_register(T_INT);
 146     __ logical_and(flag_val, mask_reg, masked_flag);
 147     flag_val = masked_flag;
 148   }
 149   __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
 150 
 151   CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, addr, result, tmp1, tmp2);
 152   __ branch(lir_cond_notEqual, T_INT, slow);
 153   __ branch_destination(slow->continuation());
 154 
 155   return result;
 156 }
 157 
 158 LIR_Opr ShenandoahBarrierSetC1::ensure_in_register(LIRGenerator* gen, LIR_Opr obj) {
 159   if (!obj->is_register()) {
 160     LIR_Opr obj_reg;
 161     if (obj->is_constant()) {
 162       obj_reg = gen->new_register(T_OBJECT);
 163       __ move(obj, obj_reg);
 164     } else {
 165 #ifdef AARCH64
 166       // AArch64 expects double-size register.
 167       obj_reg = gen->new_pointer_register();
 168 #else
 169       // x86 expects single-size register.
 170       obj_reg = gen->new_register(T_OBJECT);
 171 #endif
 172       __ leal(obj, obj_reg);
 173     }
 174     obj = obj_reg;
 175   }
 176   return obj;
 177 }
 178 
 179 LIR_Opr ShenandoahBarrierSetC1::storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators) {
 180   if (ShenandoahStoreValEnqueueBarrier) {
 181     obj = ensure_in_register(gen, obj);
 182     pre_barrier(gen, info, decorators, LIR_OprFact::illegalOpr, obj);
 183   }
 184   return obj;
 185 }
 186 
 187 void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
 188   if (access.is_oop()) {
 189     if (ShenandoahSATBBarrier) {
 190       pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), access.resolved_addr(), LIR_OprFact::illegalOpr /* pre_val */);
 191     }
 192     value = storeval_barrier(access.gen(), value, access.access_emit_info(), access.decorators());
 193   }
 194   BarrierSetC1::store_at_resolved(access, value);
 195 }
 196 
 197 LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
 198   // We must resolve in register when patching. This is to avoid
 199   // having a patch area in the load barrier stub, since the call
 200   // into the runtime to patch will not have the proper oop map.
 201   const bool patch_before_barrier = access.is_oop() && (access.decorators() & C1_NEEDS_PATCHING) != 0;
 202   return BarrierSetC1::resolve_address(access, resolve_in_register || patch_before_barrier);
 203 }
 204 
 205 void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
 206   if (!access.is_oop()) {
 207     BarrierSetC1::load_at_resolved(access, result);
 208     return;
 209   }
 210 
 211   LIRGenerator* gen = access.gen();
 212 
 213   DecoratorSet decorators = access.decorators();
 214   bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
 215 
 216   if ((decorators & IN_NATIVE) != 0 && !is_traversal_mode) {
 217     assert(access.is_oop(), "IN_NATIVE access only for oop values");
 218     BarrierSetC1::load_at_resolved(access, result);
 219     LIR_OprList* args = new LIR_OprList();
 220     LIR_Opr addr = access.resolved_addr();
 221     addr = ensure_in_register(gen, addr);
 222     args->append(result);
 223     args->append(addr);
 224     BasicTypeList signature;
 225     signature.append(T_OBJECT);
 226     signature.append(T_ADDRESS);
 227     LIR_Opr call_result = gen->call_runtime(&signature, args,
 228                                             CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native),
 229                                             objectType, NULL);
 230     __ move(call_result, result);
 231   } else {
 232     if (ShenandoahLoadRefBarrier) {
 233       LIR_Opr tmp = gen->new_register(T_OBJECT);
 234       BarrierSetC1::load_at_resolved(access, tmp);
 235       tmp = load_reference_barrier(access.gen(), tmp, access.resolved_addr());
 236       __ move(tmp, result);
 237     } else {
 238       BarrierSetC1::load_at_resolved(access, result);
 239     }
 240   }
 241 
 242   if (ShenandoahKeepAliveBarrier) {
 243     bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0;
 244     bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
 245     bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
 246     bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0 || is_traversal_mode;
 247 
 248     if ((is_weak || is_phantom || is_anonymous) && keep_alive) {
 249       // Register the value in the referent field with the pre-barrier
 250       LabelObj *Lcont_anonymous;
 251       if (is_anonymous) {
 252         Lcont_anonymous = new LabelObj();
 253         generate_referent_check(access, Lcont_anonymous);
 254       }
 255       pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr /* addr_opr */,
 256                   result /* pre_val */);
 257       if (is_anonymous) {
 258         __ branch_destination(Lcont_anonymous->label());
 259       }
 260     }
 261   }
 262 }
 263 
 264 class C1ShenandoahPreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
 265   virtual OopMapSet* generate_code(StubAssembler* sasm) {
 266     ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
 267     bs->generate_c1_pre_barrier_runtime_stub(sasm);
 268     return NULL;
 269   }
 270 };
 271 
 272 class C1ShenandoahLoadReferenceBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
 273   virtual OopMapSet* generate_code(StubAssembler* sasm) {
 274     ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
 275     bs->generate_c1_load_reference_barrier_runtime_stub(sasm);
 276     return NULL;
 277   }
 278 };
 279 
 280 void ShenandoahBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) {
 281   C1ShenandoahPreBarrierCodeGenClosure pre_code_gen_cl;
 282   _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, -1,
 283                                                               "shenandoah_pre_barrier_slow",
 284                                                               false, &pre_code_gen_cl);
 285   if (ShenandoahLoadRefBarrier) {
 286     C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_code_gen_cl;
 287     _load_reference_barrier_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
 288                                                                   "shenandoah_load_reference_barrier_slow",
 289                                                                   false, &lrb_code_gen_cl);
 290   }
 291 }
 292 
 293 const char* ShenandoahBarrierSetC1::rtcall_name_for_address(address entry) {
 294   if (entry == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native)) {
 295     return "ShenandoahRuntime::load_reference_barrier_native";
 296   }
 297   return NULL;
 298 }