< prev index next >

src/hotspot/share/opto/parse3.cpp

Print this page




 187   const Type *type;
 188 
 189   bool must_assert_null = false;
 190   if (bt == T_OBJECT || bt == T_VALUETYPE) {
 191     if (!field->type()->is_loaded()) {
 192       type = TypeInstPtr::BOTTOM;
 193       must_assert_null = true;
 194     } else if (field->is_static_constant()) {
 195       // This can happen if the constant oop is non-perm.
 196       ciObject* con = field->constant_value().as_object();
 197       // Do not "join" in the previous type; it doesn't add value,
 198       // and may yield a vacuous result if the field is of interface type.
 199       if (con->is_null_object()) {
 200         type = TypePtr::NULL_PTR;
 201       } else {
 202         type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
 203       }
 204       assert(type != NULL, "field singleton type must be consistent");
 205     } else {
 206       type = TypeOopPtr::make_from_klass(field_klass->as_klass());
 207       if (bt == T_VALUETYPE && !flattened) {
 208         // A non-flattened value type field may be NULL
 209         bool maybe_null = true;
 210         if (field->is_static()) {
 211           // Check if static field is already initialized
 212           ciInstance* mirror = field->holder()->java_mirror();
 213           ciObject* val = mirror->field_value(field).as_object();
 214           if (!val->is_null_object()) {
 215             maybe_null = false;
 216           }
 217         }
 218         if (maybe_null) {
 219           type = type->is_valuetypeptr()->cast_to_ptr_type(TypePtr::BotPTR);
 220         }
 221       }
 222     }
 223   } else {
 224     type = Type::get_const_basic_type(bt);
 225   }
 226   if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) {
 227     insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
 228   }
 229 
 230   // Build the load.
 231   //
 232   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
 233   bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
 234   Node* ld = NULL;
 235    if (flattened) {
 236     // Load flattened value type
 237     ld = ValueTypeNode::make(this, field_klass->as_value_klass(), obj, obj, field->holder(), offset);
 238   } else {
 239     ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, needs_atomic_access);
 240   }
 241 
 242   // Adjust Java stack
 243   if (type2size[bt] == 1)
 244     push(ld);
 245   else
 246     push_pair(ld);
 247 
 248   if (must_assert_null) {
 249     // Do not take a trap here.  It's possible that the program
 250     // will never load the field's class, and will happily see
 251     // null values in this field forever.  Don't stumble into a
 252     // trap for such a program, or we might get a long series
 253     // of useless recompilations.  (Or, we might load a class
 254     // which should not be loaded.)  If we ever see a non-null
 255     // value, we will then trap and recompile.  (The trap will
 256     // not need to mention the class index, since the class will
 257     // already have been loaded if we ever see a non-null value.)


 302     // Volatile fields need releasing stores.
 303     MemNode::release :
 304     // Non-volatile fields also need releasing stores if they hold an
 305     // object reference, because the object reference might point to
 306     // a freshly created object.
 307     StoreNode::release_if_reference(bt);
 308 
 309   // Store the value.
 310   if (bt == T_OBJECT || bt == T_VALUETYPE) {
 311     const TypeOopPtr* field_type;
 312     if (!field->type()->is_loaded()) {
 313       field_type = TypeInstPtr::BOTTOM;
 314     } else {
 315       field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
 316     }
 317     if (is_flattened) {
 318       // Store flattened value type to a non-static field
 319       assert(bt == T_VALUETYPE, "flattening is only supported for value type fields");
 320       val->as_ValueType()->store_flattened(this, obj, obj, field->holder(), offset);
 321     } else {
 322       if (bt == T_VALUETYPE) {
 323         field_type = field_type->cast_to_ptr_type(TypePtr::BotPTR)->is_oopptr();
 324       }
 325       store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
 326     }
 327   } else {
 328     bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
 329     store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
 330   }
 331 
 332   // If reference is volatile, prevent following volatiles ops from
 333   // floating up before the volatile write.
 334   if (is_vol) {
 335     // If not multiple copy atomic, we do the MemBarVolatile before the load.
 336     if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
 337       insert_mem_bar(Op_MemBarVolatile); // Use fat membar
 338     }
 339     // Remember we wrote a volatile field.
 340     // For not multiple copy atomic cpu (ppc64) a barrier should be issued
 341     // in constructors which have such stores. See do_exits() in parse1.cpp.
 342     if (is_field) {
 343       set_wrote_volatile(true);
 344     }


 636     guarantee(stopped(), "A ClassCastException must be always thrown on this path");
 637     return;
 638   }
 639   guarantee(target_dvt_klass->is_valuetype(), "vunbox: Target DVT must be a value type");
 640 
 641   if (!target_vcc_klass->equals(source_type->klass()) || !source_type->klass_is_exact()) {
 642     Node* exact_obj = not_null_obj;
 643     Node* slow_ctl  = type_check_receiver(exact_obj, target_vcc_klass, 1.0, &exact_obj);
 644     {
 645       PreserveJVMState pjvms(this);
 646       set_control(slow_ctl);
 647       builtin_throw(Deoptimization::Reason_class_check);
 648     }
 649     replace_in_map(not_null_obj, exact_obj);
 650     not_null_obj = exact_obj;
 651   }
 652 
 653   // Remove object from the top of the stack
 654   pop();
 655 
 656   // Create a value type node with the corresponding type
 657   ciValueKlass* vk = target_dvt_klass->as_value_klass();
 658   Node* vt = ValueTypeNode::make(this, vk, not_null_obj, not_null_obj, target_vcc_klass, vk->first_field_offset());
 659 
 660   // Push the value type onto the stack
 661   push(vt);
 662 }


 187   const Type *type;
 188 
 189   bool must_assert_null = false;
 190   if (bt == T_OBJECT || bt == T_VALUETYPE) {
 191     if (!field->type()->is_loaded()) {
 192       type = TypeInstPtr::BOTTOM;
 193       must_assert_null = true;
 194     } else if (field->is_static_constant()) {
 195       // This can happen if the constant oop is non-perm.
 196       ciObject* con = field->constant_value().as_object();
 197       // Do not "join" in the previous type; it doesn't add value,
 198       // and may yield a vacuous result if the field is of interface type.
 199       if (con->is_null_object()) {
 200         type = TypePtr::NULL_PTR;
 201       } else {
 202         type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
 203       }
 204       assert(type != NULL, "field singleton type must be consistent");
 205     } else {
 206       type = TypeOopPtr::make_from_klass(field_klass->as_klass());
 207       if (bt == T_VALUETYPE && field->is_static()) {
 208         // Check if static value type field is already initialized
 209         assert(!flattened, "static fields should not be flattened");


 210         ciInstance* mirror = field->holder()->java_mirror();
 211         ciObject* val = mirror->field_value(field).as_object();
 212         if (!val->is_null_object()) {
 213           type = type->join_speculative(TypePtr::NOTNULL);




 214         }
 215       }
 216     }
 217   } else {
 218     type = Type::get_const_basic_type(bt);
 219   }
 220   if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) {
 221     insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
 222   }
 223 
 224   // Build the load.
 225   //
 226   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
 227   bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
 228   Node* ld = NULL;
 229    if (flattened) {
 230     // Load flattened value type
 231     ld = ValueTypeNode::make_from_flattened(this, field_klass->as_value_klass(), obj, obj, field->holder(), offset);
 232   } else {
 233     ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, needs_atomic_access);
 234   }
 235 
 236   // Adjust Java stack
 237   if (type2size[bt] == 1)
 238     push(ld);
 239   else
 240     push_pair(ld);
 241 
 242   if (must_assert_null) {
 243     // Do not take a trap here.  It's possible that the program
 244     // will never load the field's class, and will happily see
 245     // null values in this field forever.  Don't stumble into a
 246     // trap for such a program, or we might get a long series
 247     // of useless recompilations.  (Or, we might load a class
 248     // which should not be loaded.)  If we ever see a non-null
 249     // value, we will then trap and recompile.  (The trap will
 250     // not need to mention the class index, since the class will
 251     // already have been loaded if we ever see a non-null value.)


 296     // Volatile fields need releasing stores.
 297     MemNode::release :
 298     // Non-volatile fields also need releasing stores if they hold an
 299     // object reference, because the object reference might point to
 300     // a freshly created object.
 301     StoreNode::release_if_reference(bt);
 302 
 303   // Store the value.
 304   if (bt == T_OBJECT || bt == T_VALUETYPE) {
 305     const TypeOopPtr* field_type;
 306     if (!field->type()->is_loaded()) {
 307       field_type = TypeInstPtr::BOTTOM;
 308     } else {
 309       field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
 310     }
 311     if (is_flattened) {
 312       // Store flattened value type to a non-static field
 313       assert(bt == T_VALUETYPE, "flattening is only supported for value type fields");
 314       val->as_ValueType()->store_flattened(this, obj, obj, field->holder(), offset);
 315     } else {



 316       store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
 317     }
 318   } else {
 319     bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
 320     store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
 321   }
 322 
 323   // If reference is volatile, prevent following volatiles ops from
 324   // floating up before the volatile write.
 325   if (is_vol) {
 326     // If not multiple copy atomic, we do the MemBarVolatile before the load.
 327     if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
 328       insert_mem_bar(Op_MemBarVolatile); // Use fat membar
 329     }
 330     // Remember we wrote a volatile field.
 331     // For not multiple copy atomic cpu (ppc64) a barrier should be issued
 332     // in constructors which have such stores. See do_exits() in parse1.cpp.
 333     if (is_field) {
 334       set_wrote_volatile(true);
 335     }


 627     guarantee(stopped(), "A ClassCastException must be always thrown on this path");
 628     return;
 629   }
 630   guarantee(target_dvt_klass->is_valuetype(), "vunbox: Target DVT must be a value type");
 631 
 632   if (!target_vcc_klass->equals(source_type->klass()) || !source_type->klass_is_exact()) {
 633     Node* exact_obj = not_null_obj;
 634     Node* slow_ctl  = type_check_receiver(exact_obj, target_vcc_klass, 1.0, &exact_obj);
 635     {
 636       PreserveJVMState pjvms(this);
 637       set_control(slow_ctl);
 638       builtin_throw(Deoptimization::Reason_class_check);
 639     }
 640     replace_in_map(not_null_obj, exact_obj);
 641     not_null_obj = exact_obj;
 642   }
 643 
 644   // Remove object from the top of the stack
 645   pop();
 646 
 647   // Create a value type node with the corresponding type and push it onto the stack
 648   ciValueKlass* vk = target_dvt_klass->as_value_klass();
 649   ValueTypeNode* vt = ValueTypeNode::make_from_flattened(this, vk, not_null_obj, not_null_obj, target_vcc_klass, vk->first_field_offset());


 650   push(vt);
 651 }
< prev index next >