< prev index next >

src/share/vm/opto/graphKit.cpp

Print this page




1670       if (!field->type()->is_loaded()) {
1671         val_type = TypeInstPtr::BOTTOM;
1672       } else {
1673         val_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
1674       }
1675     }
1676   } else if (adr_type->isa_aryptr()) {
1677     val_type = adr_type->is_aryptr()->elem()->make_oopptr();
1678   }
1679   if (val_type == NULL) {
1680     val_type = TypeInstPtr::BOTTOM;
1681   }
1682   return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo, mismatched);
1683 }
1684 
1685 
1686 //-------------------------array_element_address-------------------------
1687 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1688                                       const TypeInt* sizetype, Node* ctrl) {
1689   uint shift  = exact_log2(type2aelembytes(elembt));





1690   uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1691 
1692   // short-circuit a common case (saves lots of confusing waste motion)
1693   jint idx_con = find_int_con(idx, -1);
1694   if (idx_con >= 0) {
1695     intptr_t offset = header + ((intptr_t)idx_con << shift);
1696     return basic_plus_adr(ary, offset);
1697   }
1698 
1699   // must be correct type for alignment purposes
1700   Node* base  = basic_plus_adr(ary, header);
1701   idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1702   Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1703   return basic_plus_adr(ary, base, scale);
1704 }
1705 
1706 //-------------------------load_array_element-------------------------
1707 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
1708   const Type* elemtype = arytype->elem();
1709   BasicType elembt = elemtype->array_element_basic_type();

1710   Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1711   Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1712   return ld;
1713 }
1714 
1715 //-------------------------set_arguments_for_java_call-------------------------
1716 // Arguments (pre-popped from the stack) are taken from the JVMS.
1717 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1718   // Add the call arguments:
1719   uint nargs = call->method()->arg_size();
1720   for (uint i = 0, idx = 0; i < nargs; i++) {
1721     Node* arg = argument(i);
1722     if (ValueTypePassFieldsAsArgs) {
1723       if (arg->is_ValueType()) {
1724         ValueTypeNode* vt = arg->as_ValueType();
1725         // We don't pass value type arguments by reference but instead
1726         // pass each field of the value type
1727         idx += vt->set_arguments_for_java_call(call, idx + TypeFunc::Parms, *this);
1728       } else {
1729         call->init_req(idx + TypeFunc::Parms, arg);


3500   const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
3501   if (!tklass)  tklass = TypeKlassPtr::OBJECT;
3502   const TypeOopPtr* oop_type = tklass->as_instance_type();
3503 
3504   // Now generate allocation code
3505 
3506   // The entire memory state is needed for slow path of the allocation
3507   // since GC and deoptimization can happen.
3508   Node *mem = reset_memory();
3509   set_all_memory(mem); // Create new memory state
3510 
3511   AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
3512                                          control(), mem, i_o(),
3513                                          size, klass_node,
3514                                          initial_slow_test);
3515 
3516   return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
3517 }
3518 
3519 //-------------------------------new_array-------------------------------------
3520 // helper for both newarray and anewarray
3521 // The 'length' parameter is (obviously) the length of the array.
3522 // See comments on new_instance for the meaning of the other arguments.
3523 Node* GraphKit::new_array(Node* klass_node,     // array klass (maybe variable)
3524                           Node* length,         // number of array elements
3525                           int   nargs,          // number of arguments to push back for uncommon trap
3526                           Node* *return_size_val,
3527                           bool deoptimize_on_exception) {
3528   jint  layout_con = Klass::_lh_neutral_value;
3529   Node* layout_val = get_layout_helper(klass_node, layout_con);
3530   int   layout_is_con = (layout_val == NULL);
3531 
3532   if (!layout_is_con && !StressReflectiveCode &&
3533       !too_many_traps(Deoptimization::Reason_class_check)) {
3534     // This is a reflective array creation site.
3535     // Optimistically assume that it is a subtype of Object[],
3536     // so that we can fold up all the address arithmetic.
3537     layout_con = Klass::array_layout_helper(T_OBJECT);
3538     Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) );
3539     Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) );
3540     { BuildCutout unless(this, bol_lh, PROB_MAX);


3561   }
3562 
3563   Node* initial_slow_cmp  = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
3564   Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
3565 
3566   // --- Size Computation ---
3567   // array_size = round_to_heap(array_header + (length << elem_shift));
3568   // where round_to_heap(x) == round_to(x, MinObjAlignmentInBytes)
3569   // and round_to(x, y) == ((x + y-1) & ~(y-1))
3570   // The rounding mask is strength-reduced, if possible.
3571   int round_mask = MinObjAlignmentInBytes - 1;
3572   Node* header_size = NULL;
3573   int   header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
3574   // (T_BYTE has the weakest alignment and size restrictions...)
3575   if (layout_is_con) {
3576     int       hsize  = Klass::layout_helper_header_size(layout_con);
3577     int       eshift = Klass::layout_helper_log2_element_size(layout_con);
3578     BasicType etype  = Klass::layout_helper_element_type(layout_con);
3579     if ((round_mask & ~right_n_bits(eshift)) == 0)
3580       round_mask = 0;  // strength-reduce it if it goes away completely
3581     assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");

3582     assert(header_size_min <= hsize, "generic minimum is smallest");
3583     header_size_min = hsize;
3584     header_size = intcon(hsize + round_mask);
3585   } else {
3586     Node* hss   = intcon(Klass::_lh_header_size_shift);
3587     Node* hsm   = intcon(Klass::_lh_header_size_mask);
3588     Node* hsize = _gvn.transform( new URShiftINode(layout_val, hss) );
3589     hsize       = _gvn.transform( new AndINode(hsize, hsm) );
3590     Node* mask  = intcon(round_mask);
3591     header_size = _gvn.transform( new AddINode(hsize, mask) );
3592   }
3593 
3594   Node* elem_shift = NULL;
3595   if (layout_is_con) {
3596     int eshift = Klass::layout_helper_log2_element_size(layout_con);
3597     if (eshift != 0)
3598       elem_shift = intcon(eshift);
3599   } else {
3600     // There is no need to mask or shift this value.
3601     // The semantics of LShiftINode include an implicit mask to 0x1F.


3645   // places, one where the length is sharply limited, and the other
3646   // after a successful allocation.
3647   Node* abody = lengthx;
3648   if (elem_shift != NULL)
3649     abody     = _gvn.transform( new LShiftXNode(lengthx, elem_shift) );
3650   Node* size  = _gvn.transform( new AddXNode(headerx, abody) );
3651   if (round_mask != 0) {
3652     Node* mask = MakeConX(~round_mask);
3653     size       = _gvn.transform( new AndXNode(size, mask) );
3654   }
3655   // else if round_mask == 0, the size computation is self-rounding
3656 
3657   if (return_size_val != NULL) {
3658     // This is the size
3659     (*return_size_val) = size;
3660   }
3661 
3662   // Now generate allocation code
3663 
3664   // The entire memory state is needed for slow path of the allocation
3665   // since GC and deoptimization can happened.
3666   Node *mem = reset_memory();
3667   set_all_memory(mem); // Create new memory state
3668 
3669   if (initial_slow_test->is_Bool()) {
3670     // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
3671     initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
3672   }
3673 
3674   // Create the AllocateArrayNode and its result projections
3675   AllocateArrayNode* alloc
3676     = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
3677                             control(), mem, i_o(),
3678                             size, klass_node,
3679                             initial_slow_test,
3680                             length);
3681 
3682   // Cast to correct type.  Note that the klass_node may be constant or not,
3683   // and in the latter case the actual array type will be inexact also.
3684   // (This happens via a non-constant argument to inline_native_newArray.)
3685   // In any case, the value of klass_node provides the desired array type.
3686   const TypeInt* length_type = _gvn.find_int_type(length);
3687   const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
3688   if (ary_type->isa_aryptr() && length_type != NULL) {
3689     // Try to get a better type than POS for the size
3690     ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
3691   }
3692 
3693   Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception);
3694 
3695   // Cast length on remaining path to be as narrow as possible
3696   if (map()->find_edge(length) >= 0) {
3697     Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);
3698     if (ccast != length) {
3699       _gvn.set_type_bottom(ccast);
3700       record_for_igvn(ccast);
3701       replace_in_map(length, ccast);
3702     }
3703   }
3704 















3705   return javaoop;

































































3706 }
3707 
3708 // The following "Ideal_foo" functions are placed here because they recognize
3709 // the graph shapes created by the functions immediately above.
3710 
3711 //---------------------------Ideal_allocation----------------------------------
3712 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
3713 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
3714   if (ptr == NULL) {     // reduce dumb test in callers
3715     return NULL;
3716   }
3717   if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast
3718     ptr = ptr->in(1);
3719     if (ptr == NULL) return NULL;
3720   }
3721   // Return NULL for allocations with several casts:
3722   //   j.l.reflect.Array.newInstance(jobject, jint)
3723   //   Object.clone()
3724   // to keep more precise type from last cast.
3725   if (ptr->is_Proj()) {




1670       if (!field->type()->is_loaded()) {
1671         val_type = TypeInstPtr::BOTTOM;
1672       } else {
1673         val_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
1674       }
1675     }
1676   } else if (adr_type->isa_aryptr()) {
1677     val_type = adr_type->is_aryptr()->elem()->make_oopptr();
1678   }
1679   if (val_type == NULL) {
1680     val_type = TypeInstPtr::BOTTOM;
1681   }
1682   return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo, mismatched);
1683 }
1684 
1685 
1686 //-------------------------array_element_address-------------------------
1687 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1688                                       const TypeInt* sizetype, Node* ctrl) {
1689   uint shift  = exact_log2(type2aelembytes(elembt));
1690   ciKlass* arytype_klass = _gvn.type(ary)->is_aryptr()->klass();
1691   if (arytype_klass->is_value_array_klass()) {
1692     ciValueArrayKlass* vak = arytype_klass->as_value_array_klass();
1693     shift = vak->log2_element_size();
1694   }
1695   uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1696 
1697   // short-circuit a common case (saves lots of confusing waste motion)
1698   jint idx_con = find_int_con(idx, -1);
1699   if (idx_con >= 0) {
1700     intptr_t offset = header + ((intptr_t)idx_con << shift);
1701     return basic_plus_adr(ary, offset);
1702   }
1703 
1704   // must be correct type for alignment purposes
1705   Node* base  = basic_plus_adr(ary, header);
1706   idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1707   Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1708   return basic_plus_adr(ary, base, scale);
1709 }
1710 
1711 //-------------------------load_array_element-------------------------
1712 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
1713   const Type* elemtype = arytype->elem();
1714   BasicType elembt = elemtype->array_element_basic_type();
1715   assert(elembt != T_VALUETYPE, "value types are not supported by this method");
1716   Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1717   Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1718   return ld;
1719 }
1720 
1721 //-------------------------set_arguments_for_java_call-------------------------
1722 // Arguments (pre-popped from the stack) are taken from the JVMS.
1723 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1724   // Add the call arguments:
1725   uint nargs = call->method()->arg_size();
1726   for (uint i = 0, idx = 0; i < nargs; i++) {
1727     Node* arg = argument(i);
1728     if (ValueTypePassFieldsAsArgs) {
1729       if (arg->is_ValueType()) {
1730         ValueTypeNode* vt = arg->as_ValueType();
1731         // We don't pass value type arguments by reference but instead
1732         // pass each field of the value type
1733         idx += vt->set_arguments_for_java_call(call, idx + TypeFunc::Parms, *this);
1734       } else {
1735         call->init_req(idx + TypeFunc::Parms, arg);


3506   const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
3507   if (!tklass)  tklass = TypeKlassPtr::OBJECT;
3508   const TypeOopPtr* oop_type = tklass->as_instance_type();
3509 
3510   // Now generate allocation code
3511 
3512   // The entire memory state is needed for slow path of the allocation
3513   // since GC and deoptimization can happen.
3514   Node *mem = reset_memory();
3515   set_all_memory(mem); // Create new memory state
3516 
3517   AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
3518                                          control(), mem, i_o(),
3519                                          size, klass_node,
3520                                          initial_slow_test);
3521 
3522   return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
3523 }
3524 
3525 //-------------------------------new_array-------------------------------------
3526 // helper for newarray, anewarray and vnewarray
3527 // The 'length' parameter is (obviously) the length of the array.
3528 // See comments on new_instance for the meaning of the other arguments.
3529 Node* GraphKit::new_array(Node* klass_node,     // array klass (maybe variable)
3530                           Node* length,         // number of array elements
3531                           int   nargs,          // number of arguments to push back for uncommon trap
3532                           Node* *return_size_val,
3533                           bool deoptimize_on_exception) {
3534   jint  layout_con = Klass::_lh_neutral_value;
3535   Node* layout_val = get_layout_helper(klass_node, layout_con);
3536   int   layout_is_con = (layout_val == NULL);
3537 
3538   if (!layout_is_con && !StressReflectiveCode &&
3539       !too_many_traps(Deoptimization::Reason_class_check)) {
3540     // This is a reflective array creation site.
3541     // Optimistically assume that it is a subtype of Object[],
3542     // so that we can fold up all the address arithmetic.
3543     layout_con = Klass::array_layout_helper(T_OBJECT);
3544     Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) );
3545     Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) );
3546     { BuildCutout unless(this, bol_lh, PROB_MAX);


3567   }
3568 
3569   Node* initial_slow_cmp  = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
3570   Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
3571 
3572   // --- Size Computation ---
3573   // array_size = round_to_heap(array_header + (length << elem_shift));
3574   // where round_to_heap(x) == round_to(x, MinObjAlignmentInBytes)
3575   // and round_to(x, y) == ((x + y-1) & ~(y-1))
3576   // The rounding mask is strength-reduced, if possible.
3577   int round_mask = MinObjAlignmentInBytes - 1;
3578   Node* header_size = NULL;
3579   int   header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
3580   // (T_BYTE has the weakest alignment and size restrictions...)
3581   if (layout_is_con) {
3582     int       hsize  = Klass::layout_helper_header_size(layout_con);
3583     int       eshift = Klass::layout_helper_log2_element_size(layout_con);
3584     BasicType etype  = Klass::layout_helper_element_type(layout_con);
3585     if ((round_mask & ~right_n_bits(eshift)) == 0)
3586       round_mask = 0;  // strength-reduce it if it goes away completely
3587     // TODO re-enabled assert
3588   //  assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
3589     assert(header_size_min <= hsize, "generic minimum is smallest");
3590     header_size_min = hsize;
3591     header_size = intcon(hsize + round_mask);
3592   } else {
3593     Node* hss   = intcon(Klass::_lh_header_size_shift);
3594     Node* hsm   = intcon(Klass::_lh_header_size_mask);
3595     Node* hsize = _gvn.transform( new URShiftINode(layout_val, hss) );
3596     hsize       = _gvn.transform( new AndINode(hsize, hsm) );
3597     Node* mask  = intcon(round_mask);
3598     header_size = _gvn.transform( new AddINode(hsize, mask) );
3599   }
3600 
3601   Node* elem_shift = NULL;
3602   if (layout_is_con) {
3603     int eshift = Klass::layout_helper_log2_element_size(layout_con);
3604     if (eshift != 0)
3605       elem_shift = intcon(eshift);
3606   } else {
3607     // There is no need to mask or shift this value.
3608     // The semantics of LShiftINode include an implicit mask to 0x1F.


3652   // places, one where the length is sharply limited, and the other
3653   // after a successful allocation.
3654   Node* abody = lengthx;
3655   if (elem_shift != NULL)
3656     abody     = _gvn.transform( new LShiftXNode(lengthx, elem_shift) );
3657   Node* size  = _gvn.transform( new AddXNode(headerx, abody) );
3658   if (round_mask != 0) {
3659     Node* mask = MakeConX(~round_mask);
3660     size       = _gvn.transform( new AndXNode(size, mask) );
3661   }
3662   // else if round_mask == 0, the size computation is self-rounding
3663 
3664   if (return_size_val != NULL) {
3665     // This is the size
3666     (*return_size_val) = size;
3667   }
3668 
3669   // Now generate allocation code
3670 
3671   // The entire memory state is needed for slow path of the allocation
3672   // since GC and deoptimization can happen.
3673   Node *mem = reset_memory();
3674   set_all_memory(mem); // Create new memory state
3675 
3676   if (initial_slow_test->is_Bool()) {
3677     // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
3678     initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
3679   }
3680 
3681   // Create the AllocateArrayNode and its result projections
3682   AllocateArrayNode* alloc
3683     = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
3684                             control(), mem, i_o(),
3685                             size, klass_node,
3686                             initial_slow_test,
3687                             length);
3688 
3689   // Cast to correct type.  Note that the klass_node may be constant or not,
3690   // and in the latter case the actual array type will be inexact also.
3691   // (This happens via a non-constant argument to inline_native_newArray.)
3692   // In any case, the value of klass_node provides the desired array type.
3693   const TypeInt* length_type = _gvn.find_int_type(length);
3694   const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
3695   if (ary_type->isa_aryptr() && length_type != NULL) {
3696     // Try to get a better type than POS for the size
3697     ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
3698   }
3699 
3700   Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception);
3701 
3702   // Cast length on remaining path to be as narrow as possible
3703   if (map()->find_edge(length) >= 0) {
3704     Node* ccast = alloc->make_ideal_length(ary_type, &_gvn);
3705     if (ccast != length) {
3706       _gvn.set_type_bottom(ccast);
3707       record_for_igvn(ccast);
3708       replace_in_map(length, ccast);
3709     }
3710   }
3711 
3712   const TypeAryPtr* ary_ptr = ary_type->isa_aryptr();
3713   ciKlass* elem_klass = ary_ptr != NULL ? ary_ptr->klass()->as_array_klass()->element_klass() : NULL;
3714   //if (layout_is_con && Klass::layout_helper_element_type(layout_con) == T_VALUETYPE) {
3715   if (elem_klass != NULL && elem_klass->is_valuetype()) {
3716     ciValueKlass* vk = elem_klass->as_value_klass();
3717     if (vk->flatten_array()) {
3718       // TODO
3719     } else {
3720       // TODO explain this and add asserts
3721       initialize_value_type_array(javaoop, length, elem_klass->as_value_klass(), nargs);
3722       InitializeNode* init = alloc->initialization();
3723       init->set_complete_with_arraycopy();
3724     }
3725   }
3726 
3727   return javaoop;
3728 }
3729 
3730 void GraphKit::initialize_value_type_array(Node* array, Node* length, ciValueKlass* vk, int nargs) {
3731   // Check for zero length
3732   Node* null_ctl = top();
3733   null_check_common(length, T_INT, false, &null_ctl, false);
3734   if (stopped()) {
3735     set_control(null_ctl); // Always zero
3736     return;
3737   }
3738 
3739   // Prepare for merging control and IO
3740   RegionNode* res_ctl = new RegionNode(3);
3741   res_ctl->init_req(1, null_ctl);
3742   gvn().set_type(res_ctl, Type::CONTROL);
3743   record_for_igvn(res_ctl);
3744   Node* res_io = PhiNode::make(res_ctl, i_o(), Type::ABIO);
3745   gvn().set_type(res_io, Type::ABIO);
3746   record_for_igvn(res_io);
3747 
3748   // TODO comment
3749   SafePointNode* loop_map = NULL;
3750   {
3751     PreserveJVMState pjvms(this);
3752     // Create default value type and store it to memory
3753     Node* oop = ValueTypeNode::make_default(gvn(), vk);
3754     oop = oop->as_ValueType()->store_to_memory(this);
3755 
3756     length = SubI(length, intcon(1));
3757     add_predicate(nargs);
3758     RegionNode* loop = new RegionNode(3);
3759     loop->init_req(1, control());
3760     gvn().set_type(loop, Type::CONTROL);
3761     record_for_igvn(loop);
3762 
3763     Node* index = new PhiNode(loop, TypeInt::INT);
3764     index->init_req(1, intcon(0));
3765     gvn().set_type(index, TypeInt::INT);
3766     record_for_igvn(index);
3767 
3768     // TODO explain why we need to capture all memory
3769     PhiNode* mem = new PhiNode(loop, Type::MEMORY, TypePtr::BOTTOM);
3770     mem->init_req(1, reset_memory());
3771     gvn().set_type(mem, Type::MEMORY);
3772     record_for_igvn(mem);
3773     set_control(loop);
3774     set_all_memory(mem);
3775     // Initialize array element
3776     Node* adr = array_element_address(array, index, T_OBJECT);
3777     const TypeOopPtr* elemtype = TypeValueTypePtr::make(TypePtr::NotNull, vk); // ary_type->is_aryptr()->elem()->make_oopptr();
3778     Node* store = store_oop_to_array(control(), array, adr, TypeAryPtr::OOPS, oop, elemtype, T_OBJECT, MemNode::release);
3779 
3780     IfNode* iff = create_and_map_if(control(), Bool(CmpI(index, length), BoolTest::lt), PROB_FAIR, COUNT_UNKNOWN);
3781     loop->init_req(2, IfTrue(iff));
3782     mem->init_req(2, merged_memory());
3783     index->init_req(2, AddI(index, intcon(1)));
3784 
3785     res_ctl->init_req(2, IfFalse(iff));
3786     res_io->set_req(2, i_o());
3787     loop_map = stop();
3788   }
3789   // Set merged control, IO and memory
3790   set_control(res_ctl);
3791   set_i_o(res_io);
3792   merge_memory(loop_map->merged_memory(), res_ctl, 2);
3793 }
3794 
3795 // The following "Ideal_foo" functions are placed here because they recognize
3796 // the graph shapes created by the functions immediately above.
3797 
3798 //---------------------------Ideal_allocation----------------------------------
3799 // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode.
3800 AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) {
3801   if (ptr == NULL) {     // reduce dumb test in callers
3802     return NULL;
3803   }
3804   if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast
3805     ptr = ptr->in(1);
3806     if (ptr == NULL) return NULL;
3807   }
3808   // Return NULL for allocations with several casts:
3809   //   j.l.reflect.Array.newInstance(jobject, jint)
3810   //   Object.clone()
3811   // to keep more precise type from last cast.
3812   if (ptr->is_Proj()) {


< prev index next >