< prev index next >

src/share/vm/opto/parse3.cpp

Print this page




 163     if (con_type != NULL) {
 164       push_node(con_type->basic_type(), makecon(con_type));
 165       return;
 166     }
 167   }
 168 
 169   ciType* field_klass = field->type();
 170   bool is_vol = field->is_volatile();
 171 
 172   // Compute address and memory type.
 173   int offset = field->offset_in_bytes();
 174   const TypePtr* adr_type = C->alias_type(field)->adr_type();
 175   Node *adr = basic_plus_adr(obj, obj, offset);
 176   BasicType bt = field->layout_type();
 177 
 178   // Build the resultant type of the load
 179   const Type *type;
 180 
 181   bool must_assert_null = false;
 182 
 183   if( bt == T_OBJECT ) {
 184     if (!field->type()->is_loaded()) {
 185       type = TypeInstPtr::BOTTOM;
 186       must_assert_null = true;
 187     } else if (field->is_constant() && field->is_static()) {
 188       // This can happen if the constant oop is non-perm.
 189       ciObject* con = field->constant_value().as_object();
 190       // Do not "join" in the previous type; it doesn't add value,
 191       // and may yield a vacuous result if the field is of interface type.
 192       type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
 193       assert(type != NULL, "field singleton type must be consistent");
 194     } else {
 195       type = TypeOopPtr::make_from_klass(field_klass->as_klass());
 196     }
 197   } else {
 198     type = Type::get_const_basic_type(bt);
 199   }
 200   if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) {
 201     insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
 202   }








 203   // Build the load.
 204   //
 205   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
 206   bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
 207   Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, needs_atomic_access);
 208 
 209   // Adjust Java stack
 210   if (type2size[bt] == 1)
 211     push(ld);
 212   else
 213     push_pair(ld);
 214 
 215   if (must_assert_null) {
 216     // Do not take a trap here.  It's possible that the program
 217     // will never load the field's class, and will happily see
 218     // null values in this field forever.  Don't stumble into a
 219     // trap for such a program, or we might get a long series
 220     // of useless recompilations.  (Or, we might load a class
 221     // which should not be loaded.)  If we ever see a non-null
 222     // value, we will then trap and recompile.  (The trap will


 265   // Conservatively release stores of object references.
 266   const MemNode::MemOrd mo =
 267     is_vol ?
 268     // Volatile fields need releasing stores.
 269     MemNode::release :
 270     // Non-volatile fields also need releasing stores if they hold an
 271     // object reference, because the object reference might point to
 272     // a freshly created object.
 273     StoreNode::release_if_reference(bt);
 274 
 275   // Store the value.
 276   Node* store;
 277   if (bt == T_OBJECT) {
 278     const TypeOopPtr* field_type;
 279     if (!field->type()->is_loaded()) {
 280       field_type = TypeInstPtr::BOTTOM;
 281     } else {
 282       field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
 283     }
 284     store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);



 285   } else {
 286     bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
 287     store = store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
 288   }
 289 
 290   // If reference is volatile, prevent following volatiles ops from
 291   // floating up before the volatile write.
 292   if (is_vol) {
 293     // If not multiple copy atomic, we do the MemBarVolatile before the load.
 294     if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
 295       insert_mem_bar(Op_MemBarVolatile); // Use fat membar
 296     }
 297     // Remember we wrote a volatile field.
 298     // For not multiple copy atomic cpu (ppc64) a barrier should be issued
 299     // in constructors which have such stores. See do_exits() in parse1.cpp.
 300     if (is_field) {
 301       set_wrote_volatile(true);
 302     }
 303   }
 304 




 163     if (con_type != NULL) {
 164       push_node(con_type->basic_type(), makecon(con_type));
 165       return;
 166     }
 167   }
 168 
 169   ciType* field_klass = field->type();
 170   bool is_vol = field->is_volatile();
 171 
 172   // Compute address and memory type.
 173   int offset = field->offset_in_bytes();
 174   const TypePtr* adr_type = C->alias_type(field)->adr_type();
 175   Node *adr = basic_plus_adr(obj, obj, offset);
 176   BasicType bt = field->layout_type();
 177 
 178   // Build the resultant type of the load
 179   const Type *type;
 180 
 181   bool must_assert_null = false;
 182 
 183   if (bt == T_OBJECT || bt == T_VALUETYPE) {
 184     if (!field->type()->is_loaded()) {
 185       type = TypeInstPtr::BOTTOM;
 186       must_assert_null = true;
 187     } else if (field->is_constant() && field->is_static()) {
 188       // This can happen if the constant oop is non-perm.
 189       ciObject* con = field->constant_value().as_object();
 190       // Do not "join" in the previous type; it doesn't add value,
 191       // and may yield a vacuous result if the field is of interface type.
 192       type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
 193       assert(type != NULL, "field singleton type must be consistent");
 194     } else {
 195       type = TypeOopPtr::make_from_klass(field_klass->as_klass());
 196     }
 197   } else {
 198     type = Type::get_const_basic_type(bt);
 199   }
 200   if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) {
 201     insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
 202   }
 203 
 204   if (type->isa_valuetypeptr()) {
 205     // Load value type from flattened field
 206     Node* vt = ValueTypeNode::make(_gvn, field_klass->as_value_klass(), map()->memory(), field->holder(), obj, offset);
 207     push_node(bt, vt);
 208     return;
 209   }
 210 
 211   // Build the load.
 212   //
 213   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
 214   bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
 215   Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, needs_atomic_access);
 216 
 217   // Adjust Java stack
 218   if (type2size[bt] == 1)
 219     push(ld);
 220   else
 221     push_pair(ld);
 222 
 223   if (must_assert_null) {
 224     // Do not take a trap here.  It's possible that the program
 225     // will never load the field's class, and will happily see
 226     // null values in this field forever.  Don't stumble into a
 227     // trap for such a program, or we might get a long series
 228     // of useless recompilations.  (Or, we might load a class
 229     // which should not be loaded.)  If we ever see a non-null
 230     // value, we will then trap and recompile.  (The trap will


 273   // Conservatively release stores of object references.
 274   const MemNode::MemOrd mo =
 275     is_vol ?
 276     // Volatile fields need releasing stores.
 277     MemNode::release :
 278     // Non-volatile fields also need releasing stores if they hold an
 279     // object reference, because the object reference might point to
 280     // a freshly created object.
 281     StoreNode::release_if_reference(bt);
 282 
 283   // Store the value.
 284   Node* store;
 285   if (bt == T_OBJECT) {
 286     const TypeOopPtr* field_type;
 287     if (!field->type()->is_loaded()) {
 288       field_type = TypeInstPtr::BOTTOM;
 289     } else {
 290       field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
 291     }
 292     store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
 293   } else if (bt == T_VALUETYPE) {
 294     // Store value type to flattened field
 295     val->as_ValueType()->store_to_field(this, field->holder(), obj, offset);
 296   } else {
 297     bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
 298     store = store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
 299   }
 300 
 301   // If reference is volatile, prevent following volatiles ops from
 302   // floating up before the volatile write.
 303   if (is_vol) {
 304     // If not multiple copy atomic, we do the MemBarVolatile before the load.
 305     if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
 306       insert_mem_bar(Op_MemBarVolatile); // Use fat membar
 307     }
 308     // Remember we wrote a volatile field.
 309     // For not multiple copy atomic cpu (ppc64) a barrier should be issued
 310     // in constructors which have such stores. See do_exits() in parse1.cpp.
 311     if (is_field) {
 312       set_wrote_volatile(true);
 313     }
 314   }
 315 


< prev index next >