1 /*
  2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "c1/c1_Defs.hpp"
 27 #include "c1/c1_LIRGenerator.hpp"
 28 #include "gc/shared/c1/barrierSetC1.hpp"
 29 #include "utilities/macros.hpp"
 30 
 31 #ifndef PATCHED_ADDR
 32 #define PATCHED_ADDR  (max_jint)
 33 #endif
 34 
 35 #ifdef ASSERT
 36 #define __ lir_generator->lir(__FILE__, __LINE__)->
 37 #else
 38 #define __ lir_generator->lir()->
 39 #endif
 40 
 41 LIR_Opr BarrierSetC1::resolve_address(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
 42                                       LIRItem& base, LIR_Opr offset, bool resolve_in_register) {
 43   bool on_array = (decorators & IN_HEAP_ARRAY) != 0;
 44   bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
 45   LIR_Opr addr;
 46   if (on_array) {
 47     addr = LIR_OprFact::address(lir_generator->emit_array_address(base.result(), offset, type));
 48   } else if (needs_patching) {
 49     // we need to patch the offset in the instruction so don't allow
 50     // generate_address to try to be smart about emitting the -1.
 51     // Otherwise the patching code won't know how to find the
 52     // instruction to patch.
 53     addr = LIR_OprFact::address(new LIR_Address(base.result(), PATCHED_ADDR, type));
 54   } else {
 55     addr = LIR_OprFact::address(lir_generator->generate_address(base.result(), offset, 0, 0, type));
 56   }
 57 
 58   if (resolve_in_register) {
 59     LIR_Opr resolved_addr = lir_generator->new_pointer_register();
 60     __ leal(addr, resolved_addr);
 61     resolved_addr = LIR_OprFact::address(new LIR_Address(resolved_addr, type));
 62     return resolved_addr;
 63   } else {
 64     return addr;
 65   }
 66 }
 67 
 68 void BarrierSetC1::store_at(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
 69                             LIRItem& base, LIR_Opr offset, LIR_Opr value,
 70                             CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
 71   bool in_heap = (decorators & IN_HEAP) != 0;
 72   assert(in_heap, "not supported yet");
 73 
 74   LIR_Opr addr = resolve_address(lir_generator, decorators, type, base, offset, false);
 75   store_at_resolved(lir_generator, decorators, type, addr, base, offset, value, patch_info, store_emit_info);
 76 }
 77 
 78 void BarrierSetC1::load_at(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
 79                            LIRItem& base, LIR_Opr offset, LIR_Opr result,
 80                            CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
 81   bool in_heap = (decorators & IN_HEAP) != 0;
 82   assert(in_heap, "not supported yet");
 83 
 84   LIR_Opr addr = resolve_address(lir_generator, decorators, type, base, offset, false);
 85   load_at_resolved(lir_generator, decorators, type, addr, base, offset, result, patch_info, load_emit_info);
 86 }
 87 
 88 LIR_Opr BarrierSetC1::atomic_cmpxchg_at(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
 89                                         LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
 90   bool in_heap = (decorators & IN_HEAP) != 0;
 91   assert(in_heap, "not supported yet");
 92 
 93   base.load_item();
 94   offset.load_nonconstant();
 95 
 96   LIR_Opr addr = resolve_address(lir_generator, decorators, type, base, offset.result(), true);
 97   return atomic_cmpxchg_resolved(lir_generator, decorators, type, addr, base, offset, cmp_value, new_value);
 98 }
 99 
100 LIR_Opr BarrierSetC1::atomic_xchg(LIRGenerator* lir_generator, DecoratorSet decorators, BasicType type,
101                                   LIRItem& base, LIRItem& offset, LIRItem& value) {
102   bool in_heap = (decorators & IN_HEAP) != 0;
103   assert(in_heap, "not supported yet");
104 
105   base.load_item();
106   offset.load_nonconstant();
107 
108   LIR_Opr addr = resolve_address(lir_generator, decorators, type, base, offset.result(), true);
109   return atomic_xchg_resolved(lir_generator, decorators, type, addr, base, offset, value);
110 }
111 
112 LIR_Opr BarrierSetC1::atomic_add_at(LIRGenerator* lir_generator, DecoratorSet decorators, BasicType type,
113                                     LIRItem& base, LIRItem& offset, LIRItem& value) {
114   bool in_heap = (decorators & IN_HEAP) != 0;
115   assert(in_heap, "not supported yet");
116 
117   base.load_item();
118   offset.load_nonconstant();
119 
120   LIR_Opr addr = resolve_address(lir_generator, decorators, type, base, offset.result(), true);
121   return atomic_add_at_resolved(lir_generator, decorators, type, addr, base, offset, value);
122 }
123 
124 void BarrierSetC1::store_at_resolved(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
125                                      LIR_Opr addr, LIRItem& base, LIR_Opr offset, LIR_Opr value,
126                                      CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
127   bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses) && os::is_MP();
128   bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
129   bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
130 
131   if (mask_boolean) {
132     value = lir_generator->mask_boolean(base.result(), value, store_emit_info);
133   }
134 
135   if (is_volatile && os::is_MP()) {
136     __ membar_release();
137   }
138 
139   LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
140   if (is_volatile && !needs_patching) {
141     lir_generator->volatile_field_store(value, addr->as_address_ptr(), store_emit_info);
142   } else {
143     __ store(value, addr->as_address_ptr(), store_emit_info, patch_code);
144   }
145 
146   if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
147     __ membar();
148   }
149 }
150 
151 void BarrierSetC1::load_at_resolved(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
152                                     LIR_Opr addr, LIRItem& base, LIR_Opr offset, LIR_Opr result,
153                                     CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
154   bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses) && os::is_MP();
155   bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
156   bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
157 
158   if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile) {
159     __ membar();
160   }
161 
162   LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
163   if (is_volatile && !needs_patching) {
164     lir_generator->volatile_field_load(addr->as_address_ptr(), result, load_emit_info);
165   } else {
166     __ load(addr->as_address_ptr(), result, load_emit_info, patch_code);
167   }
168 
169   if (is_volatile && os::is_MP()) {
170     __ membar_acquire();
171   }
172 
173   /* Normalize boolean value returned by unsafe operation, i.e., value  != 0 ? value = true : value false. */
174   if (mask_boolean) {
175     LabelObj* equalZeroLabel = new LabelObj();
176     __ cmp(lir_cond_equal, result, 0);
177     __ branch(lir_cond_equal, T_BOOLEAN, equalZeroLabel->label());
178     __ move(LIR_OprFact::intConst(1), result);
179     __ branch_destination(equalZeroLabel->label());
180   }
181 }
182 
183 LIR_Opr BarrierSetC1::atomic_cmpxchg_resolved(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
184                                               LIR_Opr addr, LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
185   return lir_generator->atomic_cmpxchg(type, addr, cmp_value, new_value);
186 }
187 
188 LIR_Opr BarrierSetC1::atomic_xchg_resolved(LIRGenerator* lir_generator, DecoratorSet decorators, BasicType type,
189                                            LIR_Opr addr, LIRItem& base, LIRItem& offset, LIRItem& value) {
190   return lir_generator->atomic_xchg(type, addr, value);
191 }
192 
193 LIR_Opr BarrierSetC1::atomic_add_at_resolved(LIRGenerator* lir_generator, DecoratorSet decorators, BasicType type,
194                                              LIR_Opr addr, LIRItem& base, LIRItem& offset, LIRItem& value) {
195   return lir_generator->atomic_add(type, addr, value);
196 }
197 
198 void BarrierSetC1::generate_referent_check(LIRGenerator* lir_generator, LIRItem& base, LIR_Opr offset, LabelObj* cont) {
199   // We might be reading the value of the referent field of a
200   // Reference object in order to attach it back to the live
201   // object graph. If G1 is enabled then we need to record
202   // the value that is being returned in an SATB log buffer.
203   //
204   // We need to generate code similar to the following...
205   //
206   // if (offset == java_lang_ref_Reference::referent_offset) {
207   //   if (src != NULL) {
208   //     if (klass(src)->reference_type() != REF_NONE) {
209   //       pre_barrier(..., value, ...);
210   //     }
211   //   }
212   // }
213 
214   bool gen_pre_barrier = true;     // Assume we need to generate pre_barrier.
215   bool gen_offset_check = true;    // Assume we need to generate the offset guard.
216   bool gen_source_check = true;    // Assume we need to check the src object for null.
217   bool gen_type_check = true;      // Assume we need to check the reference_type.
218 
219   if (offset->is_constant()) {
220     LIR_Const* constant = offset->as_constant_ptr();
221     jlong off_con = (constant->type() == T_INT ?
222                      (jlong)constant->as_jint() :
223                      constant->as_jlong());
224 
225 
226     if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
227       // The constant offset is something other than referent_offset.
228       // We can skip generating/checking the remaining guards and
229       // skip generation of the code stub.
230       gen_pre_barrier = false;
231     } else {
232       // The constant offset is the same as referent_offset -
233       // we do not need to generate a runtime offset check.
234       gen_offset_check = false;
235     }
236   }
237 
238   // We don't need to generate stub if the source object is an array
239   if (gen_pre_barrier && base.type()->is_array()) {
240     gen_pre_barrier = false;
241   }
242 
243   if (gen_pre_barrier) {
244     // We still need to continue with the checks.
245     if (base.is_constant()) {
246       ciObject* src_con = base.get_jobject_constant();
247       guarantee(src_con != NULL, "no source constant");
248 
249       if (src_con->is_null_object()) {
250         // The constant src object is null - We can skip
251         // generating the code stub.
252         gen_pre_barrier = false;
253       } else {
254         // Non-null constant source object. We still have to generate
255         // the slow stub - but we don't need to generate the runtime
256         // null object check.
257         gen_source_check = false;
258       }
259     }
260   }
261   if (gen_pre_barrier && !PatchALot) {
262     // Can the klass of object be statically determined to be
263     // a sub-class of Reference?
264     ciType* type = base.value()->declared_type();
265     if ((type != NULL) && type->is_loaded()) {
266       if (type->is_subtype_of(lir_generator->compilation()->env()->Reference_klass())) {
267         gen_type_check = false;
268       } else if (type->is_klass() &&
269                  !lir_generator->compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
270         // Not Reference and not Object klass.
271         gen_pre_barrier = false;
272       }
273     }
274   }
275 
276   if (gen_pre_barrier) {
277     // We can have generate one runtime check here. Let's start with
278     // the offset check.
279     if (gen_offset_check) {
280       // if (offset != referent_offset) -> continue
281       // If offset is an int then we can do the comparison with the
282       // referent_offset constant; otherwise we need to move
283       // referent_offset into a temporary register and generate
284       // a reg-reg compare.
285 
286       LIR_Opr referent_off;
287 
288       if (offset->type() == T_INT) {
289         referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
290       } else {
291         assert(offset->type() == T_LONG, "what else?");
292         referent_off = lir_generator->new_register(T_LONG);
293         __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
294       }
295       __ cmp(lir_cond_notEqual, offset, referent_off);
296       __ branch(lir_cond_notEqual, offset->type(), cont->label());
297     }
298     if (gen_source_check) {
299       // offset is a const and equals referent offset
300       // if (source == null) -> continue
301       __ cmp(lir_cond_equal, base.result(), LIR_OprFact::oopConst(NULL));
302       __ branch(lir_cond_equal, T_OBJECT, cont->label());
303     }
304     LIR_Opr src_klass = lir_generator->new_register(T_OBJECT);
305     if (gen_type_check) {
306       // We have determined that offset == referent_offset && src != null.
307       // if (src->_klass->_reference_type == REF_NONE) -> continue
308       __ move(new LIR_Address(base.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
309       LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
310       LIR_Opr reference_type = lir_generator->new_register(T_INT);
311       __ move(reference_type_addr, reference_type);
312       __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
313       __ branch(lir_cond_equal, T_INT, cont->label());
314     }
315   }
316 }