15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "c1/c1_Defs.hpp"
26 #include "c1/c1_LIRGenerator.hpp"
27 #include "gc/shared/c1/barrierSetC1.hpp"
28 #include "utilities/macros.hpp"
29
30 #ifndef PATCHED_ADDR
31 #define PATCHED_ADDR (max_jint)
32 #endif
33
34 #ifdef ASSERT
35 #define __ lir_generator->lir(__FILE__, __LINE__)->
36 #else
37 #define __ lir_generator->lir()->
38 #endif
39
40 LIR_Opr BarrierSetC1::resolve_address(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
41 LIRItem& base, LIR_Opr offset, bool resolve_in_register) {
42 bool on_array = (decorators & IN_HEAP_ARRAY) != 0;
43 bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
44 LIR_Opr addr;
45 if (on_array) {
46 addr = LIR_OprFact::address(lir_generator->emit_array_address(base.result(), offset, type));
47 } else if (needs_patching) {
48 // we need to patch the offset in the instruction so don't allow
49 // generate_address to try to be smart about emitting the -1.
50 // Otherwise the patching code won't know how to find the
51 // instruction to patch.
52 addr = LIR_OprFact::address(new LIR_Address(base.result(), PATCHED_ADDR, type));
53 } else {
54 addr = LIR_OprFact::address(lir_generator->generate_address(base.result(), offset, 0, 0, type));
55 }
56
57 if (resolve_in_register) {
58 LIR_Opr resolved_addr = lir_generator->new_pointer_register();
59 __ leal(addr, resolved_addr);
60 resolved_addr = LIR_OprFact::address(new LIR_Address(resolved_addr, type));
61 return resolved_addr;
62 } else {
63 return addr;
64 }
65 }
66
67 void BarrierSetC1::store_at(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
68 LIRItem& base, LIR_Opr offset, LIR_Opr value,
69 CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
70 bool in_heap = (decorators & IN_HEAP) != 0;
71 assert(in_heap, "not supported yet");
72
73 LIR_Opr addr = resolve_address(lir_generator, decorators, type, base, offset, false);
74 store_at_resolved(lir_generator, decorators, type, addr, base, offset, value, patch_info, store_emit_info);
75 }
76
77 void BarrierSetC1::load_at(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
78 LIRItem& base, LIR_Opr offset, LIR_Opr result,
79 CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
80 bool in_heap = (decorators & IN_HEAP) != 0;
81 assert(in_heap, "not supported yet");
82
83 LIR_Opr addr = resolve_address(lir_generator, decorators, type, base, offset, false);
84 load_at_resolved(lir_generator, decorators, type, addr, base, offset, result, patch_info, load_emit_info);
85 }
86
87 LIR_Opr BarrierSetC1::atomic_cmpxchg_at(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
88 LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
89 bool in_heap = (decorators & IN_HEAP) != 0;
90 assert(in_heap, "not supported yet");
91
92 base.load_item();
93 offset.load_nonconstant();
94
95 LIR_Opr addr = resolve_address(lir_generator, decorators, type, base, offset.result(), true);
96 return atomic_cmpxchg_resolved(lir_generator, decorators, type, addr, base, offset, cmp_value, new_value);
97 }
98
99 LIR_Opr BarrierSetC1::atomic_xchg(LIRGenerator* lir_generator, DecoratorSet decorators, BasicType type,
100 LIRItem& base, LIRItem& offset, LIRItem& value) {
101 bool in_heap = (decorators & IN_HEAP) != 0;
102 assert(in_heap, "not supported yet");
103
104 base.load_item();
105 offset.load_nonconstant();
106
107 LIR_Opr addr = resolve_address(lir_generator, decorators, type, base, offset.result(), true);
108 return atomic_xchg_resolved(lir_generator, decorators, type, addr, base, offset, value);
109 }
110
111 LIR_Opr BarrierSetC1::atomic_add_at(LIRGenerator* lir_generator, DecoratorSet decorators, BasicType type,
112 LIRItem& base, LIRItem& offset, LIRItem& value) {
113 bool in_heap = (decorators & IN_HEAP) != 0;
114 assert(in_heap, "not supported yet");
115
116 base.load_item();
117 offset.load_nonconstant();
118
119 LIR_Opr addr = resolve_address(lir_generator, decorators, type, base, offset.result(), true);
120 return atomic_add_at_resolved(lir_generator, decorators, type, addr, base, offset, value);
121 }
122
123 void BarrierSetC1::store_at_resolved(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
124 LIR_Opr addr, LIRItem& base, LIR_Opr offset, LIR_Opr value,
125 CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
126 bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses) && os::is_MP();
127 bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
128 bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
129
130 if (mask_boolean) {
131 value = lir_generator->mask_boolean(base.result(), value, store_emit_info);
132 }
133
134 if (is_volatile && os::is_MP()) {
135 __ membar_release();
136 }
137
138 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
139 if (is_volatile && !needs_patching) {
140 lir_generator->volatile_field_store(value, addr->as_address_ptr(), store_emit_info);
141 } else {
142 __ store(value, addr->as_address_ptr(), store_emit_info, patch_code);
143 }
144
145 if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
146 __ membar();
147 }
148 }
149
150 void BarrierSetC1::load_at_resolved(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
151 LIR_Opr addr, LIRItem& base, LIR_Opr offset, LIR_Opr result,
152 CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
153 bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses) && os::is_MP();
154 bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
155 bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
156
157 if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile) {
158 __ membar();
159 }
160
161 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
162 if (is_volatile && !needs_patching) {
163 lir_generator->volatile_field_load(addr->as_address_ptr(), result, load_emit_info);
164 } else {
165 __ load(addr->as_address_ptr(), result, load_emit_info, patch_code);
166 }
167
168 if (is_volatile && os::is_MP()) {
169 __ membar_acquire();
170 }
171
172 /* Normalize boolean value returned by unsafe operation, i.e., value != 0 ? value = true : value false. */
173 if (mask_boolean) {
174 LabelObj* equalZeroLabel = new LabelObj();
175 __ cmp(lir_cond_equal, result, 0);
176 __ branch(lir_cond_equal, T_BOOLEAN, equalZeroLabel->label());
177 __ move(LIR_OprFact::intConst(1), result);
178 __ branch_destination(equalZeroLabel->label());
179 }
180 }
181
182 LIR_Opr BarrierSetC1::atomic_cmpxchg_resolved(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
183 LIR_Opr addr, LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
184 return lir_generator->atomic_cmpxchg(type, addr, cmp_value, new_value);
185 }
186
187 LIR_Opr BarrierSetC1::atomic_xchg_resolved(LIRGenerator* lir_generator, DecoratorSet decorators, BasicType type,
188 LIR_Opr addr, LIRItem& base, LIRItem& offset, LIRItem& value) {
189 return lir_generator->atomic_xchg(type, addr, value);
190 }
191
192 LIR_Opr BarrierSetC1::atomic_add_at_resolved(LIRGenerator* lir_generator, DecoratorSet decorators, BasicType type,
193 LIR_Opr addr, LIRItem& base, LIRItem& offset, LIRItem& value) {
194 return lir_generator->atomic_add(type, addr, value);
195 }
196
197 void BarrierSetC1::generate_referent_check(LIRGenerator* lir_generator, LIRItem& base, LIR_Opr offset, LabelObj* cont) {
198 // We might be reading the value of the referent field of a
199 // Reference object in order to attach it back to the live
200 // object graph. If G1 is enabled then we need to record
201 // the value that is being returned in an SATB log buffer.
202 //
203 // We need to generate code similar to the following...
204 //
205 // if (offset == java_lang_ref_Reference::referent_offset) {
206 // if (src != NULL) {
207 // if (klass(src)->reference_type() != REF_NONE) {
208 // pre_barrier(..., value, ...);
209 // }
210 // }
211 // }
212
213 bool gen_pre_barrier = true; // Assume we need to generate pre_barrier.
214 bool gen_offset_check = true; // Assume we need to generate the offset guard.
215 bool gen_source_check = true; // Assume we need to check the src object for null.
216 bool gen_type_check = true; // Assume we need to check the reference_type.
217
218 if (offset->is_constant()) {
219 LIR_Const* constant = offset->as_constant_ptr();
220 jlong off_con = (constant->type() == T_INT ?
221 (jlong)constant->as_jint() :
222 constant->as_jlong());
223
224
225 if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
226 // The constant offset is something other than referent_offset.
227 // We can skip generating/checking the remaining guards and
228 // skip generation of the code stub.
229 gen_pre_barrier = false;
230 } else {
231 // The constant offset is the same as referent_offset -
232 // we do not need to generate a runtime offset check.
233 gen_offset_check = false;
234 }
235 }
236
|
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "c1/c1_Defs.hpp"
26 #include "c1/c1_LIRGenerator.hpp"
27 #include "gc/shared/c1/barrierSetC1.hpp"
28 #include "utilities/macros.hpp"
29
30 #ifndef PATCHED_ADDR
31 #define PATCHED_ADDR (max_jint)
32 #endif
33
34 #ifdef ASSERT
35 #define __ gen->lir(__FILE__, __LINE__)->
36 #else
37 #define __ gen->lir()->
38 #endif
39
40 LIR_Opr BarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
41 DecoratorSet decorators = access.decorators();
42 bool on_array = (decorators & IN_HEAP_ARRAY) != 0;
43 bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
44
45 LIRItem& base = access.base().item();
46 LIR_Opr offset = access.offset().opr();
47 LIRGenerator *gen = access.gen();
48
49 LIR_Opr addr_opr;
50 if (on_array) {
51 addr_opr = LIR_OprFact::address(gen->emit_array_address(base.result(), offset, access.type()));
52 } else if (needs_patching) {
53 // we need to patch the offset in the instruction so don't allow
54 // generate_address to try to be smart about emitting the -1.
55 // Otherwise the patching code won't know how to find the
56 // instruction to patch.
57 addr_opr = LIR_OprFact::address(new LIR_Address(base.result(), PATCHED_ADDR, access.type()));
58 } else {
59 addr_opr = LIR_OprFact::address(gen->generate_address(base.result(), offset, 0, 0, access.type()));
60 }
61
62 if (resolve_in_register) {
63 LIR_Opr resolved_addr = gen->new_pointer_register();
64 __ leal(addr_opr, resolved_addr);
65 resolved_addr = LIR_OprFact::address(new LIR_Address(resolved_addr, access.type()));
66 return resolved_addr;
67 } else {
68 return addr_opr;
69 }
70 }
71
72 void BarrierSetC1::store_at(LIRAccess& access,LIR_Opr value) {
73 DecoratorSet decorators = access.decorators();
74 bool in_heap = (decorators & IN_HEAP) != 0;
75 assert(in_heap, "not supported yet");
76
77 LIR_Opr resolved = resolve_address(access, false);
78 access.set_resolved_addr(resolved);
79 store_at_resolved(access, value);
80 }
81
82 void BarrierSetC1::load_at(LIRAccess& access, LIR_Opr result) {
83 DecoratorSet decorators = access.decorators();
84 bool in_heap = (decorators & IN_HEAP) != 0;
85 assert(in_heap, "not supported yet");
86
87 LIR_Opr resolved = resolve_address(access, false);
88 access.set_resolved_addr(resolved);
89 load_at_resolved(access, result);
90 }
91
92 LIR_Opr BarrierSetC1::atomic_cmpxchg_at(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
93 DecoratorSet decorators = access.decorators();
94 bool in_heap = (decorators & IN_HEAP) != 0;
95 assert(in_heap, "not supported yet");
96
97 access.load_address();
98
99 LIR_Opr resolved = resolve_address(access, true);
100 access.set_resolved_addr(resolved);
101 return atomic_cmpxchg_resolved(access, cmp_value, new_value);
102 }
103
104 LIR_Opr BarrierSetC1::atomic_xchg(LIRAccess& access, LIRItem& value) {
105 DecoratorSet decorators = access.decorators();
106 bool in_heap = (decorators & IN_HEAP) != 0;
107 assert(in_heap, "not supported yet");
108
109 access.load_address();
110
111 LIR_Opr resolved = resolve_address(access, true);
112 access.set_resolved_addr(resolved);
113 return atomic_xchg_resolved(access, value);
114 }
115
116 LIR_Opr BarrierSetC1::atomic_add_at(LIRAccess& access, LIRItem& value) {
117 DecoratorSet decorators = access.decorators();
118 bool in_heap = (decorators & IN_HEAP) != 0;
119 assert(in_heap, "not supported yet");
120
121 access.load_address();
122
123 LIR_Opr resolved = resolve_address(access, true);
124 access.set_resolved_addr(resolved);
125 return atomic_add_at_resolved(access, value);
126 }
127
128 void BarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
129 DecoratorSet decorators = access.decorators();
130 bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses) && os::is_MP();
131 bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
132 bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
133 LIRGenerator* gen = access.gen();
134
135 if (mask_boolean) {
136 value = gen->mask_boolean(access.base().opr(), value, access.access_emit_info());
137 }
138
139 if (is_volatile && os::is_MP()) {
140 __ membar_release();
141 }
142
143 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
144 if (is_volatile && !needs_patching) {
145 gen->volatile_field_store(value, access.resolved_addr()->as_address_ptr(), access.access_emit_info());
146 } else {
147 __ store(value, access.resolved_addr()->as_address_ptr(), access.access_emit_info(), patch_code);
148 }
149
150 if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
151 __ membar();
152 }
153 }
154
155 void BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
156 LIRGenerator *gen = access.gen();
157 DecoratorSet decorators = access.decorators();
158 bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses) && os::is_MP();
159 bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
160 bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
161
162 if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile) {
163 __ membar();
164 }
165
166 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
167 if (is_volatile && !needs_patching) {
168 gen->volatile_field_load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info());
169 } else {
170 __ load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info(), patch_code);
171 }
172
173 if (is_volatile && os::is_MP()) {
174 __ membar_acquire();
175 }
176
177 /* Normalize boolean value returned by unsafe operation, i.e., value != 0 ? value = true : value false. */
178 if (mask_boolean) {
179 LabelObj* equalZeroLabel = new LabelObj();
180 __ cmp(lir_cond_equal, result, 0);
181 __ branch(lir_cond_equal, T_BOOLEAN, equalZeroLabel->label());
182 __ move(LIR_OprFact::intConst(1), result);
183 __ branch_destination(equalZeroLabel->label());
184 }
185 }
186
187 LIR_Opr BarrierSetC1::atomic_cmpxchg_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
188 LIRGenerator *gen = access.gen();
189 return gen->atomic_cmpxchg(access.type(), access.resolved_addr(), cmp_value, new_value);
190 }
191
192 LIR_Opr BarrierSetC1::atomic_xchg_resolved(LIRAccess& access, LIRItem& value) {
193 LIRGenerator *gen = access.gen();
194 return gen->atomic_xchg(access.type(), access.resolved_addr(), value);
195 }
196
197 LIR_Opr BarrierSetC1::atomic_add_at_resolved(LIRAccess& access, LIRItem& value) {
198 LIRGenerator *gen = access.gen();
199 return gen->atomic_add(access.type(), access.resolved_addr(), value);
200 }
201
202 void BarrierSetC1::generate_referent_check(LIRAccess& access, LabelObj* cont) {
203 // We might be reading the value of the referent field of a
204 // Reference object in order to attach it back to the live
205 // object graph. If G1 is enabled then we need to record
206 // the value that is being returned in an SATB log buffer.
207 //
208 // We need to generate code similar to the following...
209 //
210 // if (offset == java_lang_ref_Reference::referent_offset) {
211 // if (src != NULL) {
212 // if (klass(src)->reference_type() != REF_NONE) {
213 // pre_barrier(..., value, ...);
214 // }
215 // }
216 // }
217
218 bool gen_pre_barrier = true; // Assume we need to generate pre_barrier.
219 bool gen_offset_check = true; // Assume we need to generate the offset guard.
220 bool gen_source_check = true; // Assume we need to check the src object for null.
221 bool gen_type_check = true; // Assume we need to check the reference_type.
222
223 LIRGenerator *gen = access.gen();
224
225 LIRItem& base = access.base().item();
226 LIR_Opr offset = access.offset().opr();
227
228 if (offset->is_constant()) {
229 LIR_Const* constant = offset->as_constant_ptr();
230 jlong off_con = (constant->type() == T_INT ?
231 (jlong)constant->as_jint() :
232 constant->as_jlong());
233
234
235 if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
236 // The constant offset is something other than referent_offset.
237 // We can skip generating/checking the remaining guards and
238 // skip generation of the code stub.
239 gen_pre_barrier = false;
240 } else {
241 // The constant offset is the same as referent_offset -
242 // we do not need to generate a runtime offset check.
243 gen_offset_check = false;
244 }
245 }
246
|