175 type = TypeInstPtr::BOTTOM;
176 must_assert_null = true;
177 } else if (field->is_static_constant()) {
178 // This can happen if the constant oop is non-perm.
179 ciObject* con = field->constant_value().as_object();
180 // Do not "join" in the previous type; it doesn't add value,
181 // and may yield a vacuous result if the field is of interface type.
182 if (con->is_null_object()) {
183 type = TypePtr::NULL_PTR;
184 } else {
185 type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
186 }
187 assert(type != NULL, "field singleton type must be consistent");
188 } else {
189 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
190 }
191 } else {
192 type = Type::get_const_basic_type(bt);
193 }
194 if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) {
195 insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier
196 }
197 // Build the load.
198 //
199 MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
200 bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
201 Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, needs_atomic_access);
202
203 // Adjust Java stack
204 if (type2size[bt] == 1)
205 push(ld);
206 else
207 push_pair(ld);
208
209 if (must_assert_null) {
210 // Do not take a trap here. It's possible that the program
211 // will never load the field's class, and will happily see
212 // null values in this field forever. Don't stumble into a
213 // trap for such a program, or we might get a long series
214 // of useless recompilations. (Or, we might load a class
215 // which should not be loaded.) If we ever see a non-null
218 // already have been loaded if we ever see a non-null value.)
219 // uncommon_trap(iter().get_field_signature_index());
220 if (PrintOpto && (Verbose || WizardMode)) {
221 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
222 }
223 if (C->log() != NULL) {
224 C->log()->elem("assert_null reason='field' klass='%d'",
225 C->log()->identify(field->type()));
226 }
227 // If there is going to be a trap, put it at the next bytecode:
228 set_bci(iter().next_bci());
229 null_assert(peek());
230 set_bci(iter().cur_bci()); // put it back
231 }
232
233 // If reference is volatile, prevent following memory ops from
234 // floating up past the volatile read. Also prevents commoning
235 // another volatile read.
236 if (field->is_volatile()) {
237 // Memory barrier includes bogus read of value to force load BEFORE membar
238 insert_mem_bar(Op_MemBarAcquire, ld);
239 }
240 }
241
242 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
243 bool is_vol = field->is_volatile();
244 // If reference is volatile, prevent following memory ops from
245 // floating down past the volatile write. Also prevents commoning
246 // another volatile read.
247 if (is_vol) insert_mem_bar(Op_MemBarRelease);
248
249 // Compute address and memory type.
250 int offset = field->offset_in_bytes();
251 const TypePtr* adr_type = C->alias_type(field)->adr_type();
252 Node* adr = basic_plus_adr(obj, obj, offset);
253 BasicType bt = field->layout_type();
254 // Value to be stored
255 Node* val = type2size[bt] == 1 ? pop() : pop_pair();
256 // Round doubles before storing
257 if (bt == T_DOUBLE) val = dstore_rounding(val);
258
259 // Conservatively release stores of object references.
260 const MemNode::MemOrd mo =
261 is_vol ?
262 // Volatile fields need releasing stores.
263 MemNode::release :
264 // Non-volatile fields also need releasing stores if they hold an
265 // object reference, because the object reference might point to
266 // a freshly created object.
267 StoreNode::release_if_reference(bt);
269 // Store the value.
270 Node* store;
271 if (bt == T_OBJECT) {
272 const TypeOopPtr* field_type;
273 if (!field->type()->is_loaded()) {
274 field_type = TypeInstPtr::BOTTOM;
275 } else {
276 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
277 }
278 store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
279 } else {
280 bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
281 store = store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
282 }
283
284 // If reference is volatile, prevent following volatiles ops from
285 // floating up before the volatile write.
286 if (is_vol) {
287 // If not multiple copy atomic, we do the MemBarVolatile before the load.
288 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
289 insert_mem_bar(Op_MemBarVolatile); // Use fat membar
290 }
291 // Remember we wrote a volatile field.
292 // For not multiple copy atomic cpu (ppc64) a barrier should be issued
293 // in constructors which have such stores. See do_exits() in parse1.cpp.
294 if (is_field) {
295 set_wrote_volatile(true);
296 }
297 }
298
299 if (is_field) {
300 set_wrote_fields(true);
301 }
302
303 // If the field is final, the rules of Java say we are in <init> or <clinit>.
304 // Note the presence of writes to final non-static fields, so that we
305 // can insert a memory barrier later on to keep the writes from floating
306 // out of the constructor.
307 // Any method can write a @Stable field; insert memory barriers after those also.
308 if (is_field && (field->is_final() || field->is_stable())) {
309 if (field->is_final()) {
|
175 type = TypeInstPtr::BOTTOM;
176 must_assert_null = true;
177 } else if (field->is_static_constant()) {
178 // This can happen if the constant oop is non-perm.
179 ciObject* con = field->constant_value().as_object();
180 // Do not "join" in the previous type; it doesn't add value,
181 // and may yield a vacuous result if the field is of interface type.
182 if (con->is_null_object()) {
183 type = TypePtr::NULL_PTR;
184 } else {
185 type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
186 }
187 assert(type != NULL, "field singleton type must be consistent");
188 } else {
189 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
190 }
191 } else {
192 type = Type::get_const_basic_type(bt);
193 }
194 if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) {
195 insert_mem_bar(Opcodes::Op_MemBarVolatile); // StoreLoad barrier
196 }
197 // Build the load.
198 //
199 MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
200 bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
201 Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, needs_atomic_access);
202
203 // Adjust Java stack
204 if (type2size[bt] == 1)
205 push(ld);
206 else
207 push_pair(ld);
208
209 if (must_assert_null) {
210 // Do not take a trap here. It's possible that the program
211 // will never load the field's class, and will happily see
212 // null values in this field forever. Don't stumble into a
213 // trap for such a program, or we might get a long series
214 // of useless recompilations. (Or, we might load a class
215 // which should not be loaded.) If we ever see a non-null
218 // already have been loaded if we ever see a non-null value.)
219 // uncommon_trap(iter().get_field_signature_index());
220 if (PrintOpto && (Verbose || WizardMode)) {
221 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
222 }
223 if (C->log() != NULL) {
224 C->log()->elem("assert_null reason='field' klass='%d'",
225 C->log()->identify(field->type()));
226 }
227 // If there is going to be a trap, put it at the next bytecode:
228 set_bci(iter().next_bci());
229 null_assert(peek());
230 set_bci(iter().cur_bci()); // put it back
231 }
232
233 // If reference is volatile, prevent following memory ops from
234 // floating up past the volatile read. Also prevents commoning
235 // another volatile read.
236 if (field->is_volatile()) {
237 // Memory barrier includes bogus read of value to force load BEFORE membar
238 insert_mem_bar(Opcodes::Op_MemBarAcquire, ld);
239 }
240 }
241
242 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
243 bool is_vol = field->is_volatile();
244 // If reference is volatile, prevent following memory ops from
245 // floating down past the volatile write. Also prevents commoning
246 // another volatile read.
247 if (is_vol) insert_mem_bar(Opcodes::Op_MemBarRelease);
248
249 // Compute address and memory type.
250 int offset = field->offset_in_bytes();
251 const TypePtr* adr_type = C->alias_type(field)->adr_type();
252 Node* adr = basic_plus_adr(obj, obj, offset);
253 BasicType bt = field->layout_type();
254 // Value to be stored
255 Node* val = type2size[bt] == 1 ? pop() : pop_pair();
256 // Round doubles before storing
257 if (bt == T_DOUBLE) val = dstore_rounding(val);
258
259 // Conservatively release stores of object references.
260 const MemNode::MemOrd mo =
261 is_vol ?
262 // Volatile fields need releasing stores.
263 MemNode::release :
264 // Non-volatile fields also need releasing stores if they hold an
265 // object reference, because the object reference might point to
266 // a freshly created object.
267 StoreNode::release_if_reference(bt);
269 // Store the value.
270 Node* store;
271 if (bt == T_OBJECT) {
272 const TypeOopPtr* field_type;
273 if (!field->type()->is_loaded()) {
274 field_type = TypeInstPtr::BOTTOM;
275 } else {
276 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
277 }
278 store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
279 } else {
280 bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
281 store = store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
282 }
283
284 // If reference is volatile, prevent following volatiles ops from
285 // floating up before the volatile write.
286 if (is_vol) {
287 // If not multiple copy atomic, we do the MemBarVolatile before the load.
288 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
289 insert_mem_bar(Opcodes::Op_MemBarVolatile); // Use fat membar
290 }
291 // Remember we wrote a volatile field.
292 // For not multiple copy atomic cpu (ppc64) a barrier should be issued
293 // in constructors which have such stores. See do_exits() in parse1.cpp.
294 if (is_field) {
295 set_wrote_volatile(true);
296 }
297 }
298
299 if (is_field) {
300 set_wrote_fields(true);
301 }
302
303 // If the field is final, the rules of Java say we are in <init> or <clinit>.
304 // Note the presence of writes to final non-static fields, so that we
305 // can insert a memory barrier later on to keep the writes from floating
306 // out of the constructor.
307 // Any method can write a @Stable field; insert memory barriers after those also.
308 if (is_field && (field->is_final() || field->is_stable())) {
309 if (field->is_final()) {
|