1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Defs.hpp"
  27 #include "c1/c1_LIRGenerator.hpp"
  28 #include "gc/shared/c1/barrierSetC1.hpp"
  29 #include "utilities/macros.hpp"
  30 
  31 #ifndef PATCHED_ADDR
  32 #define PATCHED_ADDR  (max_jint)
  33 #endif
  34 
  35 #ifdef ASSERT
  36 #define __ gen->lir(__FILE__, __LINE__)->
  37 #else
  38 #define __ gen->lir()->
  39 #endif
  40 
  41 LIR_Opr BarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
  42   DecoratorSet decorators = access.decorators();
  43   bool is_array = (decorators & IS_ARRAY) != 0;
  44   bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
  45 
  46   LIRItem& base = access.base().item();
  47   LIR_Opr offset = access.offset().opr();
  48   LIRGenerator *gen = access.gen();
  49 
  50   LIR_Opr addr_opr;
  51   if (is_array) {
  52     addr_opr = LIR_OprFact::address(gen->emit_array_address(base.result(), offset, access.type()));
  53   } else if (needs_patching) {
  54     // we need to patch the offset in the instruction so don't allow
  55     // generate_address to try to be smart about emitting the -1.
  56     // Otherwise the patching code won't know how to find the
  57     // instruction to patch.
  58     addr_opr = LIR_OprFact::address(new LIR_Address(base.result(), PATCHED_ADDR, access.type()));
  59   } else {
  60     addr_opr = LIR_OprFact::address(gen->generate_address(base.result(), offset, 0, 0, access.type()));
  61   }
  62 
  63   if (resolve_in_register) {
  64     LIR_Opr resolved_addr = gen->new_pointer_register();
  65     __ leal(addr_opr, resolved_addr);
  66     resolved_addr = LIR_OprFact::address(new LIR_Address(resolved_addr, access.type()));
  67     return resolved_addr;
  68   } else {
  69     return addr_opr;
  70   }
  71 }
  72 
  73 void BarrierSetC1::store_at(LIRAccess& access, LIR_Opr value) {
  74   DecoratorSet decorators = access.decorators();
  75   bool in_heap = (decorators & IN_HEAP) != 0;
  76   assert(in_heap, "not supported yet");
  77 
  78   LIR_Opr resolved = resolve_address(access, false);
  79   access.set_resolved_addr(resolved);
  80   store_at_resolved(access, value);
  81 }
  82 
  83 void BarrierSetC1::load_at(LIRAccess& access, LIR_Opr result) {
  84   DecoratorSet decorators = access.decorators();
  85   bool in_heap = (decorators & IN_HEAP) != 0;
  86   assert(in_heap, "not supported yet");
  87 
  88   LIR_Opr resolved = resolve_address(access, false);
  89   access.set_resolved_addr(resolved);
  90   load_at_resolved(access, result);
  91 }
  92 
  93 void BarrierSetC1::load(LIRAccess& access, LIR_Opr result) {
  94   DecoratorSet decorators = access.decorators();
  95   bool in_heap = (decorators & IN_HEAP) != 0;
  96   assert(!in_heap, "consider using load_at");
  97   load_at_resolved(access, result);
  98 }
  99 
 100 LIR_Opr BarrierSetC1::atomic_cmpxchg_at(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
 101   DecoratorSet decorators = access.decorators();
 102   bool in_heap = (decorators & IN_HEAP) != 0;
 103   assert(in_heap, "not supported yet");
 104 
 105   access.load_address();
 106 
 107   LIR_Opr resolved = resolve_address(access, true);
 108   access.set_resolved_addr(resolved);
 109   return atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
 110 }
 111 
 112 LIR_Opr BarrierSetC1::atomic_xchg_at(LIRAccess& access, LIRItem& value) {
 113   DecoratorSet decorators = access.decorators();
 114   bool in_heap = (decorators & IN_HEAP) != 0;
 115   assert(in_heap, "not supported yet");
 116 
 117   access.load_address();
 118 
 119   LIR_Opr resolved = resolve_address(access, true);
 120   access.set_resolved_addr(resolved);
 121   return atomic_xchg_at_resolved(access, value);
 122 }
 123 
 124 LIR_Opr BarrierSetC1::atomic_add_at(LIRAccess& access, LIRItem& value) {
 125   DecoratorSet decorators = access.decorators();
 126   bool in_heap = (decorators & IN_HEAP) != 0;
 127   assert(in_heap, "not supported yet");
 128 
 129   access.load_address();
 130 
 131   LIR_Opr resolved = resolve_address(access, true);
 132   access.set_resolved_addr(resolved);
 133   return atomic_add_at_resolved(access, value);
 134 }
 135 
 136 void BarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
 137   DecoratorSet decorators = access.decorators();
 138   bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses);
 139   bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
 140   bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
 141   LIRGenerator* gen = access.gen();
 142 
 143   if (mask_boolean) {
 144     value = gen->mask_boolean(access.base().opr(), value, access.access_emit_info());
 145   }
 146 
 147   if (is_volatile) {
 148     __ membar_release();
 149   }
 150 
 151   LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
 152   if (is_volatile && !needs_patching) {
 153     gen->volatile_field_store(value, access.resolved_addr()->as_address_ptr(), access.access_emit_info());
 154   } else {
 155     __ store(value, access.resolved_addr()->as_address_ptr(), access.access_emit_info(), patch_code);
 156   }
 157 
 158   if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
 159     __ membar();
 160   }
 161 }
 162 
 163 void BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
 164   LIRGenerator *gen = access.gen();
 165   DecoratorSet decorators = access.decorators();
 166   bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses);
 167   bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
 168   bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
 169   bool in_native = (decorators & IN_NATIVE) != 0;
 170 
 171   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile) {
 172     __ membar();
 173   }
 174 
 175   LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
 176   if (in_native) {
 177     __ move_wide(access.resolved_addr()->as_address_ptr(), result);
 178   } else if (is_volatile && !needs_patching) {
 179     gen->volatile_field_load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info());
 180   } else {
 181     __ load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info(), patch_code);
 182   }
 183 
 184   if (is_volatile) {
 185     __ membar_acquire();
 186   }
 187 
 188   /* Normalize boolean value returned by unsafe operation, i.e., value  != 0 ? value = true : value false. */
 189   if (mask_boolean) {
 190     LabelObj* equalZeroLabel = new LabelObj();
 191     __ cmp(lir_cond_equal, result, 0);
 192     __ branch(lir_cond_equal, T_BOOLEAN, equalZeroLabel->label());
 193     __ move(LIR_OprFact::intConst(1), result);
 194     __ branch_destination(equalZeroLabel->label());
 195   }
 196 }
 197 
 198 LIR_Opr BarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
 199   LIRGenerator *gen = access.gen();
 200   return gen->atomic_cmpxchg(access.type(), access.resolved_addr(), cmp_value, new_value);
 201 }
 202 
 203 LIR_Opr BarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
 204   LIRGenerator *gen = access.gen();
 205   return gen->atomic_xchg(access.type(), access.resolved_addr(), value);
 206 }
 207 
 208 LIR_Opr BarrierSetC1::atomic_add_at_resolved(LIRAccess& access, LIRItem& value) {
 209   LIRGenerator *gen = access.gen();
 210   return gen->atomic_add(access.type(), access.resolved_addr(), value);
 211 }
 212 
 213 void BarrierSetC1::generate_referent_check(LIRAccess& access, LabelObj* cont) {
 214   // We might be reading the value of the referent field of a
 215   // Reference object in order to attach it back to the live
 216   // object graph. If G1 is enabled then we need to record
 217   // the value that is being returned in an SATB log buffer.
 218   //
 219   // We need to generate code similar to the following...
 220   //
 221   // if (offset == java_lang_ref_Reference::referent_offset) {
 222   //   if (src != NULL) {
 223   //     if (klass(src)->reference_type() != REF_NONE) {
 224   //       pre_barrier(..., value, ...);
 225   //     }
 226   //   }
 227   // }
 228 
 229   bool gen_pre_barrier = true;     // Assume we need to generate pre_barrier.
 230   bool gen_offset_check = true;    // Assume we need to generate the offset guard.
 231   bool gen_source_check = true;    // Assume we need to check the src object for null.
 232   bool gen_type_check = true;      // Assume we need to check the reference_type.
 233 
 234   LIRGenerator *gen = access.gen();
 235 
 236   LIRItem& base = access.base().item();
 237   LIR_Opr offset = access.offset().opr();
 238 
 239   if (offset->is_constant()) {
 240     LIR_Const* constant = offset->as_constant_ptr();
 241     jlong off_con = (constant->type() == T_INT ?
 242                      (jlong)constant->as_jint() :
 243                      constant->as_jlong());
 244 
 245 
 246     if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
 247       // The constant offset is something other than referent_offset.
 248       // We can skip generating/checking the remaining guards and
 249       // skip generation of the code stub.
 250       gen_pre_barrier = false;
 251     } else {
 252       // The constant offset is the same as referent_offset -
 253       // we do not need to generate a runtime offset check.
 254       gen_offset_check = false;
 255     }
 256   }
 257 
 258   // We don't need to generate stub if the source object is an array
 259   if (gen_pre_barrier && base.type()->is_array()) {
 260     gen_pre_barrier = false;
 261   }
 262 
 263   if (gen_pre_barrier) {
 264     // We still need to continue with the checks.
 265     if (base.is_constant()) {
 266       ciObject* src_con = base.get_jobject_constant();
 267       guarantee(src_con != NULL, "no source constant");
 268 
 269       if (src_con->is_null_object()) {
 270         // The constant src object is null - We can skip
 271         // generating the code stub.
 272         gen_pre_barrier = false;
 273       } else {
 274         // Non-null constant source object. We still have to generate
 275         // the slow stub - but we don't need to generate the runtime
 276         // null object check.
 277         gen_source_check = false;
 278       }
 279     }
 280   }
 281   if (gen_pre_barrier && !PatchALot) {
 282     // Can the klass of object be statically determined to be
 283     // a sub-class of Reference?
 284     ciType* type = base.value()->declared_type();
 285     if ((type != NULL) && type->is_loaded()) {
 286       if (type->is_subtype_of(gen->compilation()->env()->Reference_klass())) {
 287         gen_type_check = false;
 288       } else if (type->is_klass() &&
 289                  !gen->compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
 290         // Not Reference and not Object klass.
 291         gen_pre_barrier = false;
 292       }
 293     }
 294   }
 295 
 296   if (gen_pre_barrier) {
 297     // We can have generate one runtime check here. Let's start with
 298     // the offset check.
 299     // Allocate temp register to base and load it here, otherwise
 300     // control flow below may confuse register allocator.
 301     LIR_Opr base_reg = gen->new_register(T_OBJECT);
 302     __ move(base.result(), base_reg);
 303     if (gen_offset_check) {
 304       // if (offset != referent_offset) -> continue
 305       // If offset is an int then we can do the comparison with the
 306       // referent_offset constant; otherwise we need to move
 307       // referent_offset into a temporary register and generate
 308       // a reg-reg compare.
 309 
 310       LIR_Opr referent_off;
 311 
 312       if (offset->type() == T_INT) {
 313         referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
 314       } else {
 315         assert(offset->type() == T_LONG, "what else?");
 316         referent_off = gen->new_register(T_LONG);
 317         __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
 318       }
 319       __ cmp(lir_cond_notEqual, offset, referent_off);
 320       __ branch(lir_cond_notEqual, offset->type(), cont->label());
 321     }
 322     if (gen_source_check) {
 323       // offset is a const and equals referent offset
 324       // if (source == null) -> continue
 325       __ cmp(lir_cond_equal, base_reg, LIR_OprFact::oopConst(NULL));
 326       __ branch(lir_cond_equal, T_OBJECT, cont->label());
 327     }
 328     LIR_Opr src_klass = gen->new_register(T_METADATA);
 329     if (gen_type_check) {
 330       // We have determined that offset == referent_offset && src != null.
 331       // if (src->_klass->_reference_type == REF_NONE) -> continue
 332       __ move(new LIR_Address(base_reg, oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
 333       LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
 334       LIR_Opr reference_type = gen->new_register(T_INT);
 335       __ move(reference_type_addr, reference_type);
 336       __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
 337       __ branch(lir_cond_equal, T_INT, cont->label());
 338     }
 339   }
 340 }
 341 
 342 LIR_Opr BarrierSetC1::resolve(LIRGenerator* gen, DecoratorSet decorators, LIR_Opr obj) {
 343   return obj;
 344 }