< prev index next >

src/hotspot/share/opto/parse3.cpp

Print this page




 159   BasicType bt = field->layout_type();
 160 
 161   // Does this field have a constant value?  If so, just push the value.
 162   if (field->is_constant() &&
 163       // Keep consistent with types found by ciTypeFlow: for an
 164       // unloaded field type, ciTypeFlow::StateVector::do_getstatic()
 165       // speculates the field is null. The code in the rest of this
 166       // method does the same. We must not bypass it and use a non
 167       // null constant here.
 168       (bt != T_OBJECT || field->type()->is_loaded())) {
 169     // final or stable field
 170     Node* con = make_constant_from_field(field, obj);
 171     if (con != NULL) {
 172       push_node(field->layout_type(), con);
 173       return;
 174     }
 175   }
 176 
 177   ciType* field_klass = field->type();
 178   bool is_vol = field->is_volatile();
 179   // TODO change this when we support non-flattened value type fields that are non-static
 180   bool flattened = (bt == T_VALUETYPE) && !field->is_static();
 181 
 182   // Compute address and memory type.
 183   int offset = field->offset_in_bytes();
 184   const TypePtr* adr_type = C->alias_type(field)->adr_type();
 185   Node *adr = basic_plus_adr(obj, obj, offset);
 186 
 187   // Build the resultant type of the load
 188   const Type *type;
 189 
 190   bool must_assert_null = false;
 191   if (bt == T_OBJECT || bt == T_VALUETYPE) {
 192     if (!field->type()->is_loaded()) {
 193       type = TypeInstPtr::BOTTOM;
 194       must_assert_null = true;
 195     } else if (field->is_static_constant()) {
 196       // This can happen if the constant oop is non-perm.
 197       ciObject* con = field->constant_value().as_object();
 198       // Do not "join" in the previous type; it doesn't add value,
 199       // and may yield a vacuous result if the field is of interface type.
 200       if (con->is_null_object()) {


 218         }
 219         if (maybe_null) {
 220           type = type->is_valuetypeptr()->cast_to_ptr_type(TypePtr::BotPTR);
 221         }
 222       }
 223     }
 224   } else {
 225     type = Type::get_const_basic_type(bt);
 226   }
 227   if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) {
 228     insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
 229   }
 230 
 231   // Build the load.
 232   //
 233   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
 234   bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
 235   Node* ld = NULL;
 236    if (flattened) {
 237     // Load flattened value type
 238     ld = ValueTypeNode::make(_gvn, field_klass->as_value_klass(), map()->memory(), obj, obj, field->holder(), offset);
 239   } else {
 240     ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, needs_atomic_access);
 241   }
 242 
 243   // Adjust Java stack
 244   if (type2size[bt] == 1)
 245     push(ld);
 246   else
 247     push_pair(ld);
 248 
 249   if (must_assert_null) {
 250     // Do not take a trap here.  It's possible that the program
 251     // will never load the field's class, and will happily see
 252     // null values in this field forever.  Don't stumble into a
 253     // trap for such a program, or we might get a long series
 254     // of useless recompilations.  (Or, we might load a class
 255     // which should not be loaded.)  If we ever see a non-null
 256     // value, we will then trap and recompile.  (The trap will
 257     // not need to mention the class index, since the class will
 258     // already have been loaded if we ever see a non-null value.)


 264       C->log()->elem("assert_null reason='field' klass='%d'",
 265                      C->log()->identify(field->type()));
 266     }
 267     // If there is going to be a trap, put it at the next bytecode:
 268     set_bci(iter().next_bci());
 269     null_assert(peek());
 270     set_bci(iter().cur_bci()); // put it back
 271   }
 272 
 273   // If reference is volatile, prevent following memory ops from
 274   // floating up past the volatile read.  Also prevents commoning
 275   // another volatile read.
 276   if (field->is_volatile()) {
 277     // Memory barrier includes bogus read of value to force load BEFORE membar
 278     insert_mem_bar(Op_MemBarAcquire, ld);
 279   }
 280 }
 281 
 282 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
 283   bool is_vol = field->is_volatile();

 284   // If reference is volatile, prevent following memory ops from
 285   // floating down past the volatile write.  Also prevents commoning
 286   // another volatile read.
 287   if (is_vol)  insert_mem_bar(Op_MemBarRelease);
 288 
 289   // Compute address and memory type.
 290   int offset = field->offset_in_bytes();
 291   const TypePtr* adr_type = C->alias_type(field)->adr_type();
 292   Node* adr = basic_plus_adr(obj, obj, offset);
 293   BasicType bt = field->layout_type();
 294   // Value to be stored
 295   Node* val = type2size[bt] == 1 ? pop() : pop_pair();
 296   // Round doubles before storing
 297   if (bt == T_DOUBLE)  val = dstore_rounding(val);
 298 
 299   // Conservatively release stores of object references.
 300   const MemNode::MemOrd mo =
 301     is_vol ?
 302     // Volatile fields need releasing stores.
 303     MemNode::release :
 304     // Non-volatile fields also need releasing stores if they hold an
 305     // object reference, because the object reference might point to
 306     // a freshly created object.
 307     StoreNode::release_if_reference(bt);
 308 
 309   // Store the value.
 310   if (bt == T_OBJECT || bt == T_VALUETYPE) {
 311     const TypeOopPtr* field_type;
 312     if (!field->type()->is_loaded()) {
 313       field_type = TypeInstPtr::BOTTOM;
 314     } else {
 315       field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
 316     }
 317     if (bt == T_VALUETYPE && !field->is_static()) {
 318       // Store flattened value type to non-static field

 319       val->as_ValueType()->store_flattened(this, obj, obj, field->holder(), offset);
 320     } else {



 321       store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
 322     }
 323   } else {
 324     bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
 325     store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
 326   }
 327 
 328   // If reference is volatile, prevent following volatiles ops from
 329   // floating up before the volatile write.
 330   if (is_vol) {
 331     // If not multiple copy atomic, we do the MemBarVolatile before the load.
 332     if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
 333       insert_mem_bar(Op_MemBarVolatile); // Use fat membar
 334     }
 335     // Remember we wrote a volatile field.
 336     // For not multiple copy atomic cpu (ppc64) a barrier should be issued
 337     // in constructors which have such stores. See do_exits() in parse1.cpp.
 338     if (is_field) {
 339       set_wrote_volatile(true);
 340     }


 634   }
 635   guarantee(target_dvt_klass->is_valuetype(), "vunbox: Target DVT must be a value type");
 636 
 637   if (!target_vcc_klass->equals(source_type->klass()) || !source_type->klass_is_exact()) {
 638     Node* exact_obj = not_null_obj;
 639     Node* slow_ctl  = type_check_receiver(exact_obj, target_vcc_klass, 1.0, &exact_obj);
 640     {
 641       PreserveJVMState pjvms(this);
 642       set_control(slow_ctl);
 643       builtin_throw(Deoptimization::Reason_class_check);
 644     }
 645     replace_in_map(not_null_obj, exact_obj);
 646     not_null_obj = exact_obj;
 647   }
 648 
 649   // Remove object from the top of the stack
 650   pop();
 651 
 652   // Create a value type node with the corresponding type
 653   ciValueKlass* vk = target_dvt_klass->as_value_klass();
 654   Node* vt = ValueTypeNode::make(gvn(), vk, map()->memory(), not_null_obj, not_null_obj, target_vcc_klass, vk->first_field_offset());
 655 
 656   // Push the value type onto the stack
 657   push(vt);
 658 }


 159   BasicType bt = field->layout_type();
 160 
 161   // Does this field have a constant value?  If so, just push the value.
 162   if (field->is_constant() &&
 163       // Keep consistent with types found by ciTypeFlow: for an
 164       // unloaded field type, ciTypeFlow::StateVector::do_getstatic()
 165       // speculates the field is null. The code in the rest of this
 166       // method does the same. We must not bypass it and use a non
 167       // null constant here.
 168       (bt != T_OBJECT || field->type()->is_loaded())) {
 169     // final or stable field
 170     Node* con = make_constant_from_field(field, obj);
 171     if (con != NULL) {
 172       push_node(field->layout_type(), con);
 173       return;
 174     }
 175   }
 176 
 177   ciType* field_klass = field->type();
 178   bool is_vol = field->is_volatile();
 179   bool flattened = field->is_flattened();

 180 
 181   // Compute address and memory type.
 182   int offset = field->offset_in_bytes();
 183   const TypePtr* adr_type = C->alias_type(field)->adr_type();
 184   Node *adr = basic_plus_adr(obj, obj, offset);
 185 
 186   // Build the resultant type of the load
 187   const Type *type;
 188 
 189   bool must_assert_null = false;
 190   if (bt == T_OBJECT || bt == T_VALUETYPE) {
 191     if (!field->type()->is_loaded()) {
 192       type = TypeInstPtr::BOTTOM;
 193       must_assert_null = true;
 194     } else if (field->is_static_constant()) {
 195       // This can happen if the constant oop is non-perm.
 196       ciObject* con = field->constant_value().as_object();
 197       // Do not "join" in the previous type; it doesn't add value,
 198       // and may yield a vacuous result if the field is of interface type.
 199       if (con->is_null_object()) {


 217         }
 218         if (maybe_null) {
 219           type = type->is_valuetypeptr()->cast_to_ptr_type(TypePtr::BotPTR);
 220         }
 221       }
 222     }
 223   } else {
 224     type = Type::get_const_basic_type(bt);
 225   }
 226   if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) {
 227     insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
 228   }
 229 
 230   // Build the load.
 231   //
 232   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
 233   bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
 234   Node* ld = NULL;
 235    if (flattened) {
 236     // Load flattened value type
 237     ld = ValueTypeNode::make(this, field_klass->as_value_klass(), obj, obj, field->holder(), offset);
 238   } else {
 239     ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, needs_atomic_access);
 240   }
 241 
 242   // Adjust Java stack
 243   if (type2size[bt] == 1)
 244     push(ld);
 245   else
 246     push_pair(ld);
 247 
 248   if (must_assert_null) {
 249     // Do not take a trap here.  It's possible that the program
 250     // will never load the field's class, and will happily see
 251     // null values in this field forever.  Don't stumble into a
 252     // trap for such a program, or we might get a long series
 253     // of useless recompilations.  (Or, we might load a class
 254     // which should not be loaded.)  If we ever see a non-null
 255     // value, we will then trap and recompile.  (The trap will
 256     // not need to mention the class index, since the class will
 257     // already have been loaded if we ever see a non-null value.)


 263       C->log()->elem("assert_null reason='field' klass='%d'",
 264                      C->log()->identify(field->type()));
 265     }
 266     // If there is going to be a trap, put it at the next bytecode:
 267     set_bci(iter().next_bci());
 268     null_assert(peek());
 269     set_bci(iter().cur_bci()); // put it back
 270   }
 271 
 272   // If reference is volatile, prevent following memory ops from
 273   // floating up past the volatile read.  Also prevents commoning
 274   // another volatile read.
 275   if (field->is_volatile()) {
 276     // Memory barrier includes bogus read of value to force load BEFORE membar
 277     insert_mem_bar(Op_MemBarAcquire, ld);
 278   }
 279 }
 280 
 281 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
 282   bool is_vol = field->is_volatile();
 283   bool is_flattened = field->is_flattened();
 284   // If reference is volatile, prevent following memory ops from
 285   // floating down past the volatile write.  Also prevents commoning
 286   // another volatile read.
 287   if (is_vol)  insert_mem_bar(Op_MemBarRelease);
 288 
 289   // Compute address and memory type.
 290   int offset = field->offset_in_bytes();
 291   const TypePtr* adr_type = C->alias_type(field)->adr_type();
 292   Node* adr = basic_plus_adr(obj, obj, offset);
 293   BasicType bt = field->layout_type();
 294   // Value to be stored
 295   Node* val = type2size[bt] == 1 ? pop() : pop_pair();
 296   // Round doubles before storing
 297   if (bt == T_DOUBLE)  val = dstore_rounding(val);
 298 
 299   // Conservatively release stores of object references.
 300   const MemNode::MemOrd mo =
 301     is_vol ?
 302     // Volatile fields need releasing stores.
 303     MemNode::release :
 304     // Non-volatile fields also need releasing stores if they hold an
 305     // object reference, because the object reference might point to
 306     // a freshly created object.
 307     StoreNode::release_if_reference(bt);
 308 
 309   // Store the value.
 310   if (bt == T_OBJECT || bt == T_VALUETYPE) {
 311     const TypeOopPtr* field_type;
 312     if (!field->type()->is_loaded()) {
 313       field_type = TypeInstPtr::BOTTOM;
 314     } else {
 315       field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
 316     }
 317     if (is_flattened) {
 318       // Store flattened value type to a non-static field
 319       assert(bt == T_VALUETYPE, "flattening is only supported for value type fields");
 320       val->as_ValueType()->store_flattened(this, obj, obj, field->holder(), offset);
 321     } else {
 322       if (bt == T_VALUETYPE) {
 323         field_type = field_type->cast_to_ptr_type(TypePtr::BotPTR)->is_oopptr();
 324       }
 325       store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
 326     }
 327   } else {
 328     bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
 329     store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
 330   }
 331 
 332   // If reference is volatile, prevent following volatiles ops from
 333   // floating up before the volatile write.
 334   if (is_vol) {
 335     // If not multiple copy atomic, we do the MemBarVolatile before the load.
 336     if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
 337       insert_mem_bar(Op_MemBarVolatile); // Use fat membar
 338     }
 339     // Remember we wrote a volatile field.
 340     // For not multiple copy atomic cpu (ppc64) a barrier should be issued
 341     // in constructors which have such stores. See do_exits() in parse1.cpp.
 342     if (is_field) {
 343       set_wrote_volatile(true);
 344     }


 638   }
 639   guarantee(target_dvt_klass->is_valuetype(), "vunbox: Target DVT must be a value type");
 640 
 641   if (!target_vcc_klass->equals(source_type->klass()) || !source_type->klass_is_exact()) {
 642     Node* exact_obj = not_null_obj;
 643     Node* slow_ctl  = type_check_receiver(exact_obj, target_vcc_klass, 1.0, &exact_obj);
 644     {
 645       PreserveJVMState pjvms(this);
 646       set_control(slow_ctl);
 647       builtin_throw(Deoptimization::Reason_class_check);
 648     }
 649     replace_in_map(not_null_obj, exact_obj);
 650     not_null_obj = exact_obj;
 651   }
 652 
 653   // Remove object from the top of the stack
 654   pop();
 655 
 656   // Create a value type node with the corresponding type
 657   ciValueKlass* vk = target_dvt_klass->as_value_klass();
 658   Node* vt = ValueTypeNode::make(this, vk, not_null_obj, not_null_obj, target_vcc_klass, vk->first_field_offset());
 659 
 660   // Push the value type onto the stack
 661   push(vt);
 662 }
< prev index next >