src/share/vm/opto/parse3.cpp

Print this page
rev 7258 : 8064611: AARCH64: Changes to HotSpot shared code
Summary: Everything except cpu/ and os_cpu/.
Reviewed-by: kvn


 266     // If there is going to be a trap, put it at the next bytecode:
 267     set_bci(iter().next_bci());
 268     null_assert(peek());
 269     set_bci(iter().cur_bci()); // put it back
 270   }
 271 
 272   // If reference is volatile, prevent following memory ops from
 273   // floating up past the volatile read.  Also prevents commoning
 274   // another volatile read.
 275   if (field->is_volatile()) {
 276     // Memory barrier includes bogus read of value to force load BEFORE membar
 277     insert_mem_bar(Op_MemBarAcquire, ld);
 278   }
 279 }
 280 
 281 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
 282   bool is_vol = field->is_volatile();
 283   // If reference is volatile, prevent following memory ops from
 284   // floating down past the volatile write.  Also prevents commoning
 285   // another volatile read.
 286   if (is_vol)  insert_mem_bar(Op_MemBarRelease);




 287 
 288   // Compute address and memory type.
 289   int offset = field->offset_in_bytes();
 290   const TypePtr* adr_type = C->alias_type(field)->adr_type();
 291   Node* adr = basic_plus_adr(obj, obj, offset);
 292   BasicType bt = field->layout_type();
 293   // Value to be stored
 294   Node* val = type2size[bt] == 1 ? pop() : pop_pair();
 295   // Round doubles before storing
 296   if (bt == T_DOUBLE)  val = dstore_rounding(val);
 297 
 298   // Conservatively release stores of object references.
 299   const MemNode::MemOrd mo =
 300     is_vol ?
 301     // Volatile fields need releasing stores.
 302     MemNode::release :
 303     // Non-volatile fields also need releasing stores if they hold an
 304     // object reference, because the object reference might point to
 305     // a freshly created object.
 306     StoreNode::release_if_reference(bt);


 308   // Store the value.
 309   Node* store;
 310   if (bt == T_OBJECT) {
 311     const TypeOopPtr* field_type;
 312     if (!field->type()->is_loaded()) {
 313       field_type = TypeInstPtr::BOTTOM;
 314     } else {
 315       field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
 316     }
 317     store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
 318   } else {
 319     bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
 320     store = store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
 321   }
 322 
 323   // If reference is volatile, prevent following volatiles ops from
 324   // floating up before the volatile write.
 325   if (is_vol) {
 326     // If not multiple copy atomic, we do the MemBarVolatile before the load.
 327     if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
 328       insert_mem_bar(Op_MemBarVolatile); // Use fat membar
 329     }
 330     // Remember we wrote a volatile field.
 331     // For not multiple copy atomic cpu (ppc64) a barrier should be issued
 332     // in constructors which have such stores. See do_exits() in parse1.cpp.
 333     if (is_field) {
 334       set_wrote_volatile(true);
 335     }
 336   }
 337 
 338   if (is_field) {
 339     set_wrote_fields(true);
 340   }
 341 
 342   // If the field is final, the rules of Java say we are in <init> or <clinit>.
 343   // Note the presence of writes to final non-static fields, so that we
 344   // can insert a memory barrier later on to keep the writes from floating
 345   // out of the constructor.
 346   // Any method can write a @Stable field; insert memory barriers after those also.
 347   if (is_field && (field->is_final() || field->is_stable())) {
 348     if (field->is_final()) {




 266     // If there is going to be a trap, put it at the next bytecode:
 267     set_bci(iter().next_bci());
 268     null_assert(peek());
 269     set_bci(iter().cur_bci()); // put it back
 270   }
 271 
 272   // If reference is volatile, prevent following memory ops from
 273   // floating up past the volatile read.  Also prevents commoning
 274   // another volatile read.
 275   if (field->is_volatile()) {
 276     // Memory barrier includes bogus read of value to force load BEFORE membar
 277     insert_mem_bar(Op_MemBarAcquire, ld);
 278   }
 279 }
 280 
 281 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
 282   bool is_vol = field->is_volatile();
 283   // If reference is volatile, prevent following memory ops from
 284   // floating down past the volatile write.  Also prevents commoning
 285   // another volatile read.
 286   // AArch64 uses store release (which does everything we need to keep
 287   // the machine in order) but we still need a compiler barrier here.
 288   if (is_vol) {
 289     insert_mem_bar(NOT_AARCH64(Op_MemBarRelease) AARCH64_ONLY(Op_MemBarCPUOrder));
 290   }
 291 
 292   // Compute address and memory type.
 293   int offset = field->offset_in_bytes();
 294   const TypePtr* adr_type = C->alias_type(field)->adr_type();
 295   Node* adr = basic_plus_adr(obj, obj, offset);
 296   BasicType bt = field->layout_type();
 297   // Value to be stored
 298   Node* val = type2size[bt] == 1 ? pop() : pop_pair();
 299   // Round doubles before storing
 300   if (bt == T_DOUBLE)  val = dstore_rounding(val);
 301 
 302   // Conservatively release stores of object references.
 303   const MemNode::MemOrd mo =
 304     is_vol ?
 305     // Volatile fields need releasing stores.
 306     MemNode::release :
 307     // Non-volatile fields also need releasing stores if they hold an
 308     // object reference, because the object reference might point to
 309     // a freshly created object.
 310     StoreNode::release_if_reference(bt);


 312   // Store the value.
 313   Node* store;
 314   if (bt == T_OBJECT) {
 315     const TypeOopPtr* field_type;
 316     if (!field->type()->is_loaded()) {
 317       field_type = TypeInstPtr::BOTTOM;
 318     } else {
 319       field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
 320     }
 321     store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
 322   } else {
 323     bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
 324     store = store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
 325   }
 326 
 327   // If reference is volatile, prevent following volatiles ops from
 328   // floating up before the volatile write.
 329   if (is_vol) {
 330     // If not multiple copy atomic, we do the MemBarVolatile before the load.
 331     if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
 332       insert_mem_bar(NOT_AARCH64(Op_MemBarVolatile) AARCH64_ONLY(Op_MemBarCPUOrder)); // Use fat membar
 333     }
 334     // Remember we wrote a volatile field.
 335     // For not multiple copy atomic cpu (ppc64) a barrier should be issued
 336     // in constructors which have such stores. See do_exits() in parse1.cpp.
 337     if (is_field) {
 338       set_wrote_volatile(true);
 339     }
 340   }
 341 
 342   if (is_field) {
 343     set_wrote_fields(true);
 344   }
 345 
 346   // If the field is final, the rules of Java say we are in <init> or <clinit>.
 347   // Note the presence of writes to final non-static fields, so that we
 348   // can insert a memory barrier later on to keep the writes from floating
 349   // out of the constructor.
 350   // Any method can write a @Stable field; insert memory barriers after those also.
 351   if (is_field && (field->is_final() || field->is_stable())) {
 352     if (field->is_final()) {