< prev index next >

src/hotspot/share/opto/graphKit.cpp

Print this page




1374                           MemNode::MemOrd mo,
1375                           LoadNode::ControlDependency control_dependency,
1376                           bool require_atomic_access,
1377                           bool unaligned,
1378                           bool mismatched) {
1379   assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1380   const TypePtr* adr_type = NULL; // debug-mode-only argument
1381   debug_only(adr_type = C->get_adr_type(adr_idx));
1382   Node* mem = memory(adr_idx);
1383   Node* ld;
1384   if (require_atomic_access && bt == T_LONG) {
1385     ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
1386   } else if (require_atomic_access && bt == T_DOUBLE) {
1387     ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
1388   } else {
1389     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched);
1390   }
1391   ld = _gvn.transform(ld);
1392   if (bt == T_VALUETYPE) {
1393     // Loading a non-flattened value type from memory requires a null check.
1394     ld = ValueTypeNode::make(this, ld, true /* null check */);
1395   } else if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1396     // Improve graph before escape analysis and boxing elimination.
1397     record_for_igvn(ld);
1398   }
1399   return ld;
1400 }
1401 
1402 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1403                                 int adr_idx,
1404                                 MemNode::MemOrd mo,
1405                                 bool require_atomic_access,
1406                                 bool unaligned,
1407                                 bool mismatched) {
1408   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1409   const TypePtr* adr_type = NULL;
1410   debug_only(adr_type = C->get_adr_type(adr_idx));
1411   Node *mem = memory(adr_idx);
1412   Node* st;
1413   if (require_atomic_access && bt == T_LONG) {
1414     st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);


1698     // through and exceptional paths, so replace the projections for
1699     // the fall through path.
1700     set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1701     set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1702   }
1703 
1704   // Capture the return value, if any.
1705   Node* ret;
1706   if (call->method() == NULL ||
1707       call->method()->return_type()->basic_type() == T_VOID) {
1708     ret = top();
1709   } else {
1710     if (!call->tf()->returns_value_type_as_fields()) {
1711       ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1712     } else {
1713       // Return of multiple values (value type fields): we create a
1714       // ValueType node, each field is a projection from the call.
1715       const TypeTuple* range_sig = call->tf()->range_sig();
1716       const Type* t = range_sig->field_at(TypeFunc::Parms);
1717       assert(t->isa_valuetypeptr(), "only value types for multiple return values");
1718       ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
1719       Node* ctl = control();
1720       ret = ValueTypeNode::make(_gvn, ctl, merged_memory(), call, vk, TypeFunc::Parms+1, false);
1721       set_control(ctl);
1722     }
1723   }
1724 
1725   return ret;
1726 }
1727 
1728 //--------------------set_predefined_input_for_runtime_call--------------------
1729 // Reading and setting the memory state is way conservative here.
1730 // The real problem is that I am not doing real Type analysis on memory,
1731 // so I cannot distinguish card mark stores from other stores.  Across a GC
1732 // point the Store Barrier and the card mark memory has to agree.  I cannot
1733 // have a card mark store and its barrier split across the GC point from
1734 // either above or below.  Here I get that to happen by reading ALL of memory.
1735 // A better answer would be to separate out card marks from other memory.
1736 // For now, return the input memory state, so that it can be reused
1737 // after the call, if this call has restricted memory effects.
1738 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call) {
1739   // Set fixed predefined input arguments
1740   Node* memory = reset_memory();


3661     if (!vk->flatten_array()) {
3662       // Non-flattened value type arrays need to be initialized with default value type oops
3663       initialize_value_type_array(javaoop, length, elem_klass->as_value_klass(), nargs);
3664       InitializeNode* init = alloc->initialization();
3665       init->set_complete_with_arraycopy();
3666     }
3667   }
3668 
3669   return javaoop;
3670 }
3671 
3672 void GraphKit::initialize_value_type_array(Node* array, Node* length, ciValueKlass* vk, int nargs) {
3673   // Check for zero length
3674   Node* null_ctl = top();
3675   null_check_common(length, T_INT, false, &null_ctl, false);
3676   if (stopped()) {
3677     set_control(null_ctl); // Always zero
3678     return;
3679   }
3680 
3681   // Prepare for merging control and IO
3682   RegionNode* res_ctl = new RegionNode(3);
3683   res_ctl->init_req(1, null_ctl);
3684   gvn().set_type(res_ctl, Type::CONTROL);
3685   record_for_igvn(res_ctl);
3686   Node* res_io = PhiNode::make(res_ctl, i_o(), Type::ABIO);




3687   gvn().set_type(res_io, Type::ABIO);

3688   record_for_igvn(res_io);

3689 
3690   // TODO comment
3691   SafePointNode* loop_map = NULL;
3692   {
3693     PreserveJVMState pjvms(this);
3694     // Create default value type and store it to memory
3695     Node* oop = ValueTypeNode::make_default(gvn(), vk);
3696     oop = oop->as_ValueType()->allocate(this)->get_oop();
3697 
3698     length = SubI(length, intcon(1));
3699     add_predicate(nargs);
3700     RegionNode* loop = new RegionNode(3);
3701     loop->init_req(1, control());
3702     gvn().set_type(loop, Type::CONTROL);
3703     record_for_igvn(loop);
3704 
3705     Node* index = new PhiNode(loop, TypeInt::INT);
3706     index->init_req(1, intcon(0));
3707     gvn().set_type(index, TypeInt::INT);
3708     record_for_igvn(index);
3709 
3710     // TODO explain why we need to capture all memory
3711     PhiNode* mem = new PhiNode(loop, Type::MEMORY, TypePtr::BOTTOM);
3712     mem->init_req(1, reset_memory());
3713     gvn().set_type(mem, Type::MEMORY);


3714     record_for_igvn(mem);


3715     set_control(loop);
3716     set_all_memory(mem);
3717     // Initialize array element
3718     Node* adr = array_element_address(array, index, T_OBJECT);
3719     const TypeOopPtr* elemtype = TypeValueTypePtr::make(TypePtr::NotNull, vk);
3720     Node* store = store_oop_to_array(control(), array, adr, TypeAryPtr::OOPS, oop, elemtype, T_OBJECT, MemNode::release);
3721 


3722     IfNode* iff = create_and_map_if(control(), Bool(CmpI(index, length), BoolTest::lt), PROB_FAIR, COUNT_UNKNOWN);


3723     loop->init_req(2, IfTrue(iff));
3724     mem->init_req(2, merged_memory());
3725     index->init_req(2, AddI(index, intcon(1)));

3726 

3727     res_ctl->init_req(2, IfFalse(iff));
3728     res_io->set_req(2, i_o());
3729     loop_map = stop();
3730   }
3731   // Set merged control, IO and memory
3732   set_control(res_ctl);
3733   set_i_o(res_io);
3734   merge_memory(loop_map->merged_memory(), res_ctl, 2);
3735 
3736   // Transform new memory Phis.
3737   for (MergeMemStream mms(merged_memory()); mms.next_non_empty();) {
3738     Node* phi = mms.memory();
3739     if (phi->is_Phi() && phi->in(0) == res_ctl) {
3740       mms.set_memory(gvn().transform(phi));
3741     }
3742   }
3743 }
3744 
3745 // The following "Ideal_foo" functions are placed here because they recognize
3746 // the graph shapes created by the functions immediately above.
3747 
3748 //---------------------------Ideal_allocation----------------------------------
3749 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
3750 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
3751   if (ptr == NULL) {     // reduce dumb test in callers
3752     return NULL;
3753   }
3754   if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast
3755     ptr = ptr->in(1);
3756     if (ptr == NULL) return NULL;
3757   }
3758   // Return NULL for allocations with several casts:
3759   //   j.l.reflect.Array.newInstance(jobject, jint)
3760   //   Object.clone()
3761   // to keep more precise type from last cast.
3762   if (ptr->is_Proj()) {


4547   set_memory(st, TypeAryPtr::BYTES);
4548 }
4549 
4550 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4551   if (!field->is_constant()) {
4552     return NULL; // Field not marked as constant.
4553   }
4554   ciInstance* holder = NULL;
4555   if (!field->is_static()) {
4556     ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4557     if (const_oop != NULL && const_oop->is_instance()) {
4558       holder = const_oop->as_instance();
4559     }
4560   }
4561   const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4562                                                         /*is_unsigned_load=*/false);
4563   if (con_type != NULL) {
4564     Node* con = makecon(con_type);
4565     if (field->layout_type() == T_VALUETYPE) {
4566       // Load value type from constant oop
4567       con = ValueTypeNode::make(this, con);
4568     }
4569     return con;
4570   }
4571   return NULL;
4572 }
4573 
4574 Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) {
4575   // Reify the property as a CastPP node in Ideal graph to comply with monotonicity
4576   // assumption of CCP analysis.
4577   return _gvn.transform(new CastPPNode(ary, ary_type->cast_to_stable(true)));
4578 }


1374                           MemNode::MemOrd mo,
1375                           LoadNode::ControlDependency control_dependency,
1376                           bool require_atomic_access,
1377                           bool unaligned,
1378                           bool mismatched) {
1379   assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1380   const TypePtr* adr_type = NULL; // debug-mode-only argument
1381   debug_only(adr_type = C->get_adr_type(adr_idx));
1382   Node* mem = memory(adr_idx);
1383   Node* ld;
1384   if (require_atomic_access && bt == T_LONG) {
1385     ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
1386   } else if (require_atomic_access && bt == T_DOUBLE) {
1387     ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched);
1388   } else {
1389     ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched);
1390   }
1391   ld = _gvn.transform(ld);
1392   if (bt == T_VALUETYPE) {
1393     // Loading a non-flattened value type from memory requires a null check.
1394     ld = ValueTypeNode::make_from_oop(this, ld, true /* null check */);
1395   } else if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1396     // Improve graph before escape analysis and boxing elimination.
1397     record_for_igvn(ld);
1398   }
1399   return ld;
1400 }
1401 
1402 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1403                                 int adr_idx,
1404                                 MemNode::MemOrd mo,
1405                                 bool require_atomic_access,
1406                                 bool unaligned,
1407                                 bool mismatched) {
1408   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1409   const TypePtr* adr_type = NULL;
1410   debug_only(adr_type = C->get_adr_type(adr_idx));
1411   Node *mem = memory(adr_idx);
1412   Node* st;
1413   if (require_atomic_access && bt == T_LONG) {
1414     st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo);


1698     // through and exceptional paths, so replace the projections for
1699     // the fall through path.
1700     set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1701     set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1702   }
1703 
1704   // Capture the return value, if any.
1705   Node* ret;
1706   if (call->method() == NULL ||
1707       call->method()->return_type()->basic_type() == T_VOID) {
1708     ret = top();
1709   } else {
1710     if (!call->tf()->returns_value_type_as_fields()) {
1711       ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1712     } else {
1713       // Return of multiple values (value type fields): we create a
1714       // ValueType node, each field is a projection from the call.
1715       const TypeTuple* range_sig = call->tf()->range_sig();
1716       const Type* t = range_sig->field_at(TypeFunc::Parms);
1717       assert(t->isa_valuetypeptr(), "only value types for multiple return values");
1718       ciValueKlass* vk = t->is_valuetypeptr()->value_klass();
1719       Node* ctl = control();
1720       ret = ValueTypeNode::make_from_multi(_gvn, ctl, merged_memory(), call, vk, TypeFunc::Parms+1, false);
1721       set_control(ctl);
1722     }
1723   }
1724 
1725   return ret;
1726 }
1727 
1728 //--------------------set_predefined_input_for_runtime_call--------------------
1729 // Reading and setting the memory state is way conservative here.
1730 // The real problem is that I am not doing real Type analysis on memory,
1731 // so I cannot distinguish card mark stores from other stores.  Across a GC
1732 // point the Store Barrier and the card mark memory has to agree.  I cannot
1733 // have a card mark store and its barrier split across the GC point from
1734 // either above or below.  Here I get that to happen by reading ALL of memory.
1735 // A better answer would be to separate out card marks from other memory.
1736 // For now, return the input memory state, so that it can be reused
1737 // after the call, if this call has restricted memory effects.
1738 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call) {
1739   // Set fixed predefined input arguments
1740   Node* memory = reset_memory();


3661     if (!vk->flatten_array()) {
3662       // Non-flattened value type arrays need to be initialized with default value type oops
3663       initialize_value_type_array(javaoop, length, elem_klass->as_value_klass(), nargs);
3664       InitializeNode* init = alloc->initialization();
3665       init->set_complete_with_arraycopy();
3666     }
3667   }
3668 
3669   return javaoop;
3670 }
3671 
3672 void GraphKit::initialize_value_type_array(Node* array, Node* length, ciValueKlass* vk, int nargs) {
3673   // Check for zero length
3674   Node* null_ctl = top();
3675   null_check_common(length, T_INT, false, &null_ctl, false);
3676   if (stopped()) {
3677     set_control(null_ctl); // Always zero
3678     return;
3679   }
3680 

3681   RegionNode* res_ctl = new RegionNode(3);

3682   gvn().set_type(res_ctl, Type::CONTROL);
3683   record_for_igvn(res_ctl);
3684 
3685   // Length is zero: don't execute initialization loop
3686   res_ctl->init_req(1, null_ctl);
3687   PhiNode* res_io  = PhiNode::make(res_ctl, i_o(), Type::ABIO);
3688   PhiNode* res_mem = PhiNode::make(res_ctl, merged_memory(), Type::MEMORY, TypePtr::BOTTOM);
3689   gvn().set_type(res_io, Type::ABIO);
3690   gvn().set_type(res_mem, Type::MEMORY);
3691   record_for_igvn(res_io);
3692   record_for_igvn(res_mem);
3693 
3694   // Length is non-zero: execute a loop that initializes the array with the default value type




3695   Node* oop = ValueTypeNode::make_default(gvn(), vk);
3696   oop = oop->as_ValueType()->allocate(this)->get_oop();
3697 

3698   add_predicate(nargs);
3699   RegionNode* loop = new RegionNode(3);
3700   loop->init_req(1, control());
3701   PhiNode* index = PhiNode::make(loop, intcon(0), TypeInt::INT);
3702   PhiNode* mem   = PhiNode::make(loop, reset_memory(), Type::MEMORY, TypePtr::BOTTOM);
3703 
3704   gvn().set_type(loop, Type::CONTROL);

3705   gvn().set_type(index, TypeInt::INT);





3706   gvn().set_type(mem, Type::MEMORY);
3707   record_for_igvn(loop);
3708   record_for_igvn(index);
3709   record_for_igvn(mem);
3710 
3711   // Loop body: initialize array element at 'index'
3712   set_control(loop);
3713   set_all_memory(mem);

3714   Node* adr = array_element_address(array, index, T_OBJECT);
3715   const TypeOopPtr* elemtype = TypeValueTypePtr::make(TypePtr::NotNull, vk);
3716   store_oop_to_array(control(), array, adr, TypeAryPtr::OOPS, oop, elemtype, T_VALUETYPE, MemNode::release);
3717 
3718   // Check if we need to execute another loop iteration
3719   length = SubI(length, intcon(1));
3720   IfNode* iff = create_and_map_if(control(), Bool(CmpI(index, length), BoolTest::lt), PROB_FAIR, COUNT_UNKNOWN);
3721 
3722   // Continue with next iteration
3723   loop->init_req(2, IfTrue(iff));

3724   index->init_req(2, AddI(index, intcon(1)));
3725   mem->init_req(2, merged_memory());
3726 
3727   // Exit loop
3728   res_ctl->init_req(2, IfFalse(iff));
3729   res_io->set_req(2, i_o());
3730   res_mem->set_req(2, reset_memory());
3731 
3732   // Set merged control, IO and memory
3733   set_control(res_ctl);
3734   set_i_o(res_io);
3735   set_all_memory(res_mem);








3736 }
3737 
3738 // The following "Ideal_foo" functions are placed here because they recognize
3739 // the graph shapes created by the functions immediately above.
3740 
3741 //---------------------------Ideal_allocation----------------------------------
3742 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
3743 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
3744   if (ptr == NULL) {     // reduce dumb test in callers
3745     return NULL;
3746   }
3747   if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast
3748     ptr = ptr->in(1);
3749     if (ptr == NULL) return NULL;
3750   }
3751   // Return NULL for allocations with several casts:
3752   //   j.l.reflect.Array.newInstance(jobject, jint)
3753   //   Object.clone()
3754   // to keep more precise type from last cast.
3755   if (ptr->is_Proj()) {


4540   set_memory(st, TypeAryPtr::BYTES);
4541 }
4542 
4543 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4544   if (!field->is_constant()) {
4545     return NULL; // Field not marked as constant.
4546   }
4547   ciInstance* holder = NULL;
4548   if (!field->is_static()) {
4549     ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4550     if (const_oop != NULL && const_oop->is_instance()) {
4551       holder = const_oop->as_instance();
4552     }
4553   }
4554   const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4555                                                         /*is_unsigned_load=*/false);
4556   if (con_type != NULL) {
4557     Node* con = makecon(con_type);
4558     if (field->layout_type() == T_VALUETYPE) {
4559       // Load value type from constant oop
4560       con = ValueTypeNode::make_from_oop(this, con);
4561     }
4562     return con;
4563   }
4564   return NULL;
4565 }
4566 
4567 Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) {
4568   // Reify the property as a CastPP node in Ideal graph to comply with monotonicity
4569   // assumption of CCP analysis.
4570   return _gvn.transform(new CastPPNode(ary, ary_type->cast_to_stable(true)));
4571 }
< prev index next >