179 return;
180 } else {
181 // final or stable non-static field
182 // Treat final non-static fields of trusted classes (classes in
183 // java.lang.invoke and sun.invoke packages and subpackages) as
184 // compile time constants.
185 if (obj->is_Con()) {
186 const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr();
187 ciObject* constant_oop = oop_ptr->const_oop();
188 ciConstant constant = field->constant_value_of(constant_oop);
189 if (FoldStableValues && field->is_stable() && constant.is_null_or_zero()) {
190 // fall through to field load; the field is not yet initialized
191 } else {
192 if (push_constant(constant, true, false, stable_type))
193 return;
194 }
195 }
196 }
197 }
198
199 ciType* field_klass = field->type();
200 bool is_vol = field->is_volatile();
201
202 // Compute address and memory type.
203 int offset = field->offset_in_bytes();
204 const TypePtr* adr_type = C->alias_type(field)->adr_type();
205 Node *adr = basic_plus_adr(obj, obj, offset);
206 BasicType bt = field->layout_type();
207
208 // Build the resultant type of the load
209 const Type *type;
210
211 bool must_assert_null = false;
212
213 if( bt == T_OBJECT ) {
214 if (!field->type()->is_loaded()) {
215 type = TypeInstPtr::BOTTOM;
216 must_assert_null = true;
217 } else if (field->is_constant() && field->is_static()) {
218 // This can happen if the constant oop is non-perm.
219 ciObject* con = field->constant_value().as_object();
220 // Do not "join" in the previous type; it doesn't add value,
221 // and may yield a vacuous result if the field is of interface type.
222 type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
223 assert(type != NULL, "field singleton type must be consistent");
224 } else {
225 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
226 }
227 } else {
228 type = Type::get_const_basic_type(bt);
229 }
230 if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) {
231 insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier
232 }
233 // Build the load.
234 //
235 MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
236 Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
237
238 // Adjust Java stack
239 if (type2size[bt] == 1)
240 push(ld);
241 else
242 push_pair(ld);
243
244 if (must_assert_null) {
245 // Do not take a trap here. It's possible that the program
246 // will never load the field's class, and will happily see
247 // null values in this field forever. Don't stumble into a
248 // trap for such a program, or we might get a long series
249 // of useless recompilations. (Or, we might load a class
250 // which should not be loaded.) If we ever see a non-null
251 // value, we will then trap and recompile. (The trap will
255 #ifndef PRODUCT
256 if (PrintOpto && (Verbose || WizardMode)) {
257 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
258 }
259 #endif
260 if (C->log() != NULL) {
261 C->log()->elem("assert_null reason='field' klass='%d'",
262 C->log()->identify(field->type()));
263 }
264 // If there is going to be a trap, put it at the next bytecode:
265 set_bci(iter().next_bci());
266 null_assert(peek());
267 set_bci(iter().cur_bci()); // put it back
268 }
269
270 // If reference is volatile, prevent following memory ops from
271 // floating up past the volatile read. Also prevents commoning
272 // another volatile read.
273 if (field->is_volatile()) {
274 // Memory barrier includes bogus read of value to force load BEFORE membar
275 insert_mem_bar(Op_MemBarAcquire, ld);
276 }
277 }
278
279 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
280 bool is_vol = field->is_volatile();
281 // If reference is volatile, prevent following memory ops from
282 // floating down past the volatile write. Also prevents commoning
283 // another volatile read.
284 if (is_vol) insert_mem_bar(Op_MemBarRelease);
285
286 // Compute address and memory type.
287 int offset = field->offset_in_bytes();
288 const TypePtr* adr_type = C->alias_type(field)->adr_type();
289 Node* adr = basic_plus_adr(obj, obj, offset);
290 BasicType bt = field->layout_type();
291 // Value to be stored
292 Node* val = type2size[bt] == 1 ? pop() : pop_pair();
293 // Round doubles before storing
294 if (bt == T_DOUBLE) val = dstore_rounding(val);
295
296 // Conservatively release stores of object references.
297 const MemNode::MemOrd mo =
298 is_vol ?
299 // Volatile fields need releasing stores.
300 MemNode::release :
301 // Non-volatile fields also need releasing stores if they hold an
302 // object reference, because the object reference might point to
303 // a freshly created object.
304 StoreNode::release_if_reference(bt);
305
306 // Store the value.
307 Node* store;
308 if (bt == T_OBJECT) {
309 const TypeOopPtr* field_type;
310 if (!field->type()->is_loaded()) {
311 field_type = TypeInstPtr::BOTTOM;
312 } else {
313 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
314 }
315 store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
316 } else {
317 store = store_to_memory(control(), adr, val, bt, adr_type, mo, is_vol);
318 }
319
320 // If reference is volatile, prevent following volatiles ops from
321 // floating up before the volatile write.
322 if (is_vol) {
323 // If not multiple copy atomic, we do the MemBarVolatile before the load.
324 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
325 insert_mem_bar(Op_MemBarVolatile); // Use fat membar
326 }
327 // Remember we wrote a volatile field.
328 // For not multiple copy atomic cpu (ppc64) a barrier should be issued
329 // in constructors which have such stores. See do_exits() in parse1.cpp.
330 if (is_field) {
331 set_wrote_volatile(true);
332 }
333 }
334
335 // If the field is final, the rules of Java say we are in <init> or <clinit>.
336 // Note the presence of writes to final non-static fields, so that we
337 // can insert a memory barrier later on to keep the writes from floating
338 // out of the constructor.
339 // Any method can write a @Stable field; insert memory barriers after those also.
340 if (is_field && (field->is_final() || field->is_stable())) {
341 set_wrote_final(true);
342 // Preserve allocation ptr to create precedent edge to it in membar
343 // generated on exit from constructor.
344 if (C->eliminate_boxing() &&
345 adr_type->isa_oopptr() && adr_type->is_oopptr()->is_ptr_to_boxed_value() &&
|
179 return;
180 } else {
181 // final or stable non-static field
182 // Treat final non-static fields of trusted classes (classes in
183 // java.lang.invoke and sun.invoke packages and subpackages) as
184 // compile time constants.
185 if (obj->is_Con()) {
186 const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr();
187 ciObject* constant_oop = oop_ptr->const_oop();
188 ciConstant constant = field->constant_value_of(constant_oop);
189 if (FoldStableValues && field->is_stable() && constant.is_null_or_zero()) {
190 // fall through to field load; the field is not yet initialized
191 } else {
192 if (push_constant(constant, true, false, stable_type))
193 return;
194 }
195 }
196 }
197 }
198
199 Node* leading_membar = NULL;
200 ciType* field_klass = field->type();
201 bool is_vol = field->is_volatile();
202
203 // Compute address and memory type.
204 int offset = field->offset_in_bytes();
205 const TypePtr* adr_type = C->alias_type(field)->adr_type();
206 Node *adr = basic_plus_adr(obj, obj, offset);
207 BasicType bt = field->layout_type();
208
209 // Build the resultant type of the load
210 const Type *type;
211
212 bool must_assert_null = false;
213
214 if( bt == T_OBJECT ) {
215 if (!field->type()->is_loaded()) {
216 type = TypeInstPtr::BOTTOM;
217 must_assert_null = true;
218 } else if (field->is_constant() && field->is_static()) {
219 // This can happen if the constant oop is non-perm.
220 ciObject* con = field->constant_value().as_object();
221 // Do not "join" in the previous type; it doesn't add value,
222 // and may yield a vacuous result if the field is of interface type.
223 type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
224 assert(type != NULL, "field singleton type must be consistent");
225 } else {
226 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
227 }
228 } else {
229 type = Type::get_const_basic_type(bt);
230 }
231 if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) {
232 leading_membar = insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier
233 }
234 // Build the load.
235 //
236 MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
237 Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
238
239 // Adjust Java stack
240 if (type2size[bt] == 1)
241 push(ld);
242 else
243 push_pair(ld);
244
245 if (must_assert_null) {
246 // Do not take a trap here. It's possible that the program
247 // will never load the field's class, and will happily see
248 // null values in this field forever. Don't stumble into a
249 // trap for such a program, or we might get a long series
250 // of useless recompilations. (Or, we might load a class
251 // which should not be loaded.) If we ever see a non-null
252 // value, we will then trap and recompile. (The trap will
256 #ifndef PRODUCT
257 if (PrintOpto && (Verbose || WizardMode)) {
258 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
259 }
260 #endif
261 if (C->log() != NULL) {
262 C->log()->elem("assert_null reason='field' klass='%d'",
263 C->log()->identify(field->type()));
264 }
265 // If there is going to be a trap, put it at the next bytecode:
266 set_bci(iter().next_bci());
267 null_assert(peek());
268 set_bci(iter().cur_bci()); // put it back
269 }
270
271 // If reference is volatile, prevent following memory ops from
272 // floating up past the volatile read. Also prevents commoning
273 // another volatile read.
274 if (field->is_volatile()) {
275 // Memory barrier includes bogus read of value to force load BEFORE membar
276 assert(leading_membar == NULL || support_IRIW_for_not_multiple_copy_atomic_cpu, "no leading membar expected");
277 Node* mb = insert_mem_bar(Op_MemBarAcquire, ld);
278 mb->as_MemBar()->set_trailing_load();
279 }
280 }
281
282 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
283 Node* leading_membar = NULL;
284 bool is_vol = field->is_volatile();
285 // If reference is volatile, prevent following memory ops from
286 // floating down past the volatile write. Also prevents commoning
287 // another volatile read.
288 if (is_vol) {
289 leading_membar = insert_mem_bar(Op_MemBarRelease);
290 }
291
292 // Compute address and memory type.
293 int offset = field->offset_in_bytes();
294 const TypePtr* adr_type = C->alias_type(field)->adr_type();
295 Node* adr = basic_plus_adr(obj, obj, offset);
296 BasicType bt = field->layout_type();
297 // Value to be stored
298 Node* val = type2size[bt] == 1 ? pop() : pop_pair();
299 // Round doubles before storing
300 if (bt == T_DOUBLE) val = dstore_rounding(val);
301
302 // Conservatively release stores of object references.
303 const MemNode::MemOrd mo =
304 is_vol ?
305 // Volatile fields need releasing stores.
306 MemNode::release :
307 // Non-volatile fields also need releasing stores if they hold an
308 // object reference, because the object reference might point to
309 // a freshly created object.
310 StoreNode::release_if_reference(bt);
311
312 // Store the value.
313 Node* store;
314 if (bt == T_OBJECT) {
315 const TypeOopPtr* field_type;
316 if (!field->type()->is_loaded()) {
317 field_type = TypeInstPtr::BOTTOM;
318 } else {
319 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
320 }
321 store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
322 } else {
323 store = store_to_memory(control(), adr, val, bt, adr_type, mo, is_vol);
324 }
325
326 // If reference is volatile, prevent following volatiles ops from
327 // floating up before the volatile write.
328 if (is_vol) {
329 // If not multiple copy atomic, we do the MemBarVolatile before the load.
330 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
331 Node* mb = insert_mem_bar(Op_MemBarVolatile, store); // Use fat membar
332 MemBarNode::set_store_pair(leading_membar->as_MemBar(), mb->as_MemBar());
333 }
334 // Remember we wrote a volatile field.
335 // For not multiple copy atomic cpu (ppc64) a barrier should be issued
336 // in constructors which have such stores. See do_exits() in parse1.cpp.
337 if (is_field) {
338 set_wrote_volatile(true);
339 }
340 }
341
342 // If the field is final, the rules of Java say we are in <init> or <clinit>.
343 // Note the presence of writes to final non-static fields, so that we
344 // can insert a memory barrier later on to keep the writes from floating
345 // out of the constructor.
346 // Any method can write a @Stable field; insert memory barriers after those also.
347 if (is_field && (field->is_final() || field->is_stable())) {
348 set_wrote_final(true);
349 // Preserve allocation ptr to create precedent edge to it in membar
350 // generated on exit from constructor.
351 if (C->eliminate_boxing() &&
352 adr_type->isa_oopptr() && adr_type->is_oopptr()->is_ptr_to_boxed_value() &&
|