< prev index next >

src/share/vm/opto/parse3.cpp

Print this page
rev 8961 : [mq]: diff-shenandoah.patch


 145 }
 146 
 147 
 148 void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
 149   // Does this field have a constant value?  If so, just push the value.
 150   if (field->is_constant()) {
 151     // final or stable field
 152     const Type* con_type = Type::make_constant(field, obj);
 153     if (con_type != NULL) {
 154       push_node(con_type->basic_type(), makecon(con_type));
 155       return;
 156     }
 157   }
 158 
 159   ciType* field_klass = field->type();
 160   bool is_vol = field->is_volatile();
 161 
 162   // Compute address and memory type.
 163   int offset = field->offset_in_bytes();
 164   const TypePtr* adr_type = C->alias_type(field)->adr_type();




 165   Node *adr = basic_plus_adr(obj, obj, offset);
 166   BasicType bt = field->layout_type();
 167 
 168   // Build the resultant type of the load
 169   const Type *type;
 170 
 171   bool must_assert_null = false;
 172 
 173   if( bt == T_OBJECT ) {
 174     if (!field->type()->is_loaded()) {
 175       type = TypeInstPtr::BOTTOM;
 176       must_assert_null = true;
 177     } else if (field->is_constant() && field->is_static()) {
 178       // This can happen if the constant oop is non-perm.
 179       ciObject* con = field->constant_value().as_object();
 180       // Do not "join" in the previous type; it doesn't add value,
 181       // and may yield a vacuous result if the field is of interface type.
 182       type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
 183       assert(type != NULL, "field singleton type must be consistent");
 184     } else {


 227     null_assert(peek());
 228     set_bci(iter().cur_bci()); // put it back
 229   }
 230 
 231   // If reference is volatile, prevent following memory ops from
 232   // floating up past the volatile read.  Also prevents commoning
 233   // another volatile read.
 234   if (field->is_volatile()) {
 235     // Memory barrier includes bogus read of value to force load BEFORE membar
 236     insert_mem_bar(Op_MemBarAcquire, ld);
 237   }
 238 }
 239 
 240 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
 241   bool is_vol = field->is_volatile();
 242   // If reference is volatile, prevent following memory ops from
 243   // floating down past the volatile write.  Also prevents commoning
 244   // another volatile read.
 245   if (is_vol)  insert_mem_bar(Op_MemBarRelease);
 246 



 247   // Compute address and memory type.
 248   int offset = field->offset_in_bytes();
 249   const TypePtr* adr_type = C->alias_type(field)->adr_type();
 250   Node* adr = basic_plus_adr(obj, obj, offset);
 251   BasicType bt = field->layout_type();
 252   // Value to be stored
 253   Node* val = type2size[bt] == 1 ? pop() : pop_pair();
 254   // Round doubles before storing
 255   if (bt == T_DOUBLE)  val = dstore_rounding(val);
 256 
 257   // Conservatively release stores of object references.
 258   const MemNode::MemOrd mo =
 259     is_vol ?
 260     // Volatile fields need releasing stores.
 261     MemNode::release :
 262     // Non-volatile fields also need releasing stores if they hold an
 263     // object reference, because the object reference might point to
 264     // a freshly created object.
 265     StoreNode::release_if_reference(bt);
 266 
 267   // Store the value.
 268   Node* store;
 269   if (bt == T_OBJECT) {
 270     const TypeOopPtr* field_type;
 271     if (!field->type()->is_loaded()) {
 272       field_type = TypeInstPtr::BOTTOM;
 273     } else {
 274       field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
 275     }



 276     store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
 277   } else {
 278     bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
 279     store = store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
 280   }
 281 
 282   // If reference is volatile, prevent following volatiles ops from
 283   // floating up before the volatile write.
 284   if (is_vol) {
 285     // If not multiple copy atomic, we do the MemBarVolatile before the load.
 286     if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
 287       insert_mem_bar(Op_MemBarVolatile); // Use fat membar
 288     }
 289     // Remember we wrote a volatile field.
 290     // For not multiple copy atomic cpu (ppc64) a barrier should be issued
 291     // in constructors which have such stores. See do_exits() in parse1.cpp.
 292     if (is_field) {
 293       set_wrote_volatile(true);
 294     }
 295   }




 145 }
 146 
 147 
 148 void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
 149   // Does this field have a constant value?  If so, just push the value.
 150   if (field->is_constant()) {
 151     // final or stable field
 152     const Type* con_type = Type::make_constant(field, obj);
 153     if (con_type != NULL) {
 154       push_node(con_type->basic_type(), makecon(con_type));
 155       return;
 156     }
 157   }
 158 
 159   ciType* field_klass = field->type();
 160   bool is_vol = field->is_volatile();
 161 
 162   // Compute address and memory type.
 163   int offset = field->offset_in_bytes();
 164   const TypePtr* adr_type = C->alias_type(field)->adr_type();
 165 
 166   // Insert read barrier for Shenandoah.
 167   obj = shenandoah_read_barrier(obj);
 168 
 169   Node *adr = basic_plus_adr(obj, obj, offset);
 170   BasicType bt = field->layout_type();
 171 
 172   // Build the resultant type of the load
 173   const Type *type;
 174 
 175   bool must_assert_null = false;
 176 
 177   if( bt == T_OBJECT ) {
 178     if (!field->type()->is_loaded()) {
 179       type = TypeInstPtr::BOTTOM;
 180       must_assert_null = true;
 181     } else if (field->is_constant() && field->is_static()) {
 182       // This can happen if the constant oop is non-perm.
 183       ciObject* con = field->constant_value().as_object();
 184       // Do not "join" in the previous type; it doesn't add value,
 185       // and may yield a vacuous result if the field is of interface type.
 186       type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
 187       assert(type != NULL, "field singleton type must be consistent");
 188     } else {


 231     null_assert(peek());
 232     set_bci(iter().cur_bci()); // put it back
 233   }
 234 
 235   // If reference is volatile, prevent following memory ops from
 236   // floating up past the volatile read.  Also prevents commoning
 237   // another volatile read.
 238   if (field->is_volatile()) {
 239     // Memory barrier includes bogus read of value to force load BEFORE membar
 240     insert_mem_bar(Op_MemBarAcquire, ld);
 241   }
 242 }
 243 
 244 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
 245   bool is_vol = field->is_volatile();
 246   // If reference is volatile, prevent following memory ops from
 247   // floating down past the volatile write.  Also prevents commoning
 248   // another volatile read.
 249   if (is_vol)  insert_mem_bar(Op_MemBarRelease);
 250 
 251   // Insert write barrier for Shenandoah.
 252   obj = shenandoah_write_barrier(obj);
 253 
 254   // Compute address and memory type.
 255   int offset = field->offset_in_bytes();
 256   const TypePtr* adr_type = C->alias_type(field)->adr_type();
 257   Node* adr = basic_plus_adr(obj, obj, offset);
 258   BasicType bt = field->layout_type();
 259   // Value to be stored
 260   Node* val = type2size[bt] == 1 ? pop() : pop_pair();
 261   // Round doubles before storing
 262   if (bt == T_DOUBLE)  val = dstore_rounding(val);
 263 
 264   // Conservatively release stores of object references.
 265   const MemNode::MemOrd mo =
 266     is_vol ?
 267     // Volatile fields need releasing stores.
 268     MemNode::release :
 269     // Non-volatile fields also need releasing stores if they hold an
 270     // object reference, because the object reference might point to
 271     // a freshly created object.
 272     StoreNode::release_if_reference(bt);
 273 
 274   // Store the value.
 275   Node* store;
 276   if (bt == T_OBJECT) {
 277     const TypeOopPtr* field_type;
 278     if (!field->type()->is_loaded()) {
 279       field_type = TypeInstPtr::BOTTOM;
 280     } else {
 281       field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
 282     }
 283 
 284     val = shenandoah_read_barrier_nomem(val);
 285 
 286     store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
 287   } else {
 288     bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
 289     store = store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
 290   }
 291 
 292   // If reference is volatile, prevent following volatiles ops from
 293   // floating up before the volatile write.
 294   if (is_vol) {
 295     // If not multiple copy atomic, we do the MemBarVolatile before the load.
 296     if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
 297       insert_mem_bar(Op_MemBarVolatile); // Use fat membar
 298     }
 299     // Remember we wrote a volatile field.
 300     // For not multiple copy atomic cpu (ppc64) a barrier should be issued
 301     // in constructors which have such stores. See do_exits() in parse1.cpp.
 302     if (is_field) {
 303       set_wrote_volatile(true);
 304     }
 305   }


< prev index next >