< prev index next >

src/share/vm/opto/graphKit.cpp

Print this page




1641       // known field.  This code is a copy of the do_put_xxx logic.
1642       ciField* field = at->field();
1643       if (!field->type()->is_loaded()) {
1644         val_type = TypeInstPtr::BOTTOM;
1645       } else {
1646         val_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
1647       }
1648     }
1649   } else if (adr_type->isa_aryptr()) {
1650     val_type = adr_type->is_aryptr()->elem()->make_oopptr();
1651   }
1652   if (val_type == NULL) {
1653     val_type = TypeInstPtr::BOTTOM;
1654   }
1655   return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo, mismatched);
1656 }
1657 
1658 
1659 //-------------------------array_element_address-------------------------
1660 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1661                                       const TypeInt* sizetype) {
1662   uint shift  = exact_log2(type2aelembytes(elembt));
1663   uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1664 
1665   // short-circuit a common case (saves lots of confusing waste motion)
1666   jint idx_con = find_int_con(idx, -1);
1667   if (idx_con >= 0) {
1668     intptr_t offset = header + ((intptr_t)idx_con << shift);
1669     return basic_plus_adr(ary, offset);
1670   }
1671 
1672   // must be correct type for alignment purposes
1673   Node* base  = basic_plus_adr(ary, header);
1674   idx = Compile::conv_I2X_index(&_gvn, idx, sizetype);
1675   Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1676   return basic_plus_adr(ary, base, scale);
1677 }
1678 
1679 //-------------------------load_array_element-------------------------
1680 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
1681   const Type* elemtype = arytype->elem();
1682   BasicType elembt = elemtype->array_element_basic_type();
1683   Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1684   Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1685   return ld;
1686 }
1687 
1688 //-------------------------set_arguments_for_java_call-------------------------
1689 // Arguments (pre-popped from the stack) are taken from the JVMS.
1690 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1691   // Add the call arguments:
1692   uint nargs = call->method()->arg_size();
1693   for (uint i = 0; i < nargs; i++) {
1694     Node* arg = argument(i);


3490     layout_val = NULL;
3491     layout_is_con = true;
3492   }
3493 
3494   // Generate the initial go-slow test.  Make sure we do not overflow
3495   // if length is huge (near 2Gig) or negative!  We do not need
3496   // exact double-words here, just a close approximation of needed
3497   // double-words.  We can't add any offset or rounding bits, lest we
3498   // take a size -1 of bytes and make it positive.  Use an unsigned
3499   // compare, so negative sizes look hugely positive.
3500   int fast_size_limit = FastAllocateSizeLimit;
3501   if (layout_is_con) {
3502     assert(!StressReflectiveCode, "stress mode does not use these paths");
3503     // Increase the size limit if we have exact knowledge of array type.
3504     int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
3505     fast_size_limit <<= (LogBytesPerLong - log2_esize);
3506   }
3507 
3508   Node* initial_slow_cmp  = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
3509   Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
3510   if (initial_slow_test->is_Bool()) {
3511     // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
3512     initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
3513   }
3514 
3515   // --- Size Computation ---
3516   // array_size = round_to_heap(array_header + (length << elem_shift));
3517   // where round_to_heap(x) == round_to(x, MinObjAlignmentInBytes)
3518   // and round_to(x, y) == ((x + y-1) & ~(y-1))
3519   // The rounding mask is strength-reduced, if possible.
3520   int round_mask = MinObjAlignmentInBytes - 1;
3521   Node* header_size = NULL;
3522   int   header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
3523   // (T_BYTE has the weakest alignment and size restrictions...)
3524   if (layout_is_con) {
3525     int       hsize  = Klass::layout_helper_header_size(layout_con);
3526     int       eshift = Klass::layout_helper_log2_element_size(layout_con);
3527     BasicType etype  = Klass::layout_helper_element_type(layout_con);
3528     if ((round_mask & ~right_n_bits(eshift)) == 0)
3529       round_mask = 0;  // strength-reduce it if it goes away completely
3530     assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
3531     assert(header_size_min <= hsize, "generic minimum is smallest");
3532     header_size_min = hsize;
3533     header_size = intcon(hsize + round_mask);


3539     Node* mask  = intcon(round_mask);
3540     header_size = _gvn.transform( new AddINode(hsize, mask) );
3541   }
3542 
3543   Node* elem_shift = NULL;
3544   if (layout_is_con) {
3545     int eshift = Klass::layout_helper_log2_element_size(layout_con);
3546     if (eshift != 0)
3547       elem_shift = intcon(eshift);
3548   } else {
3549     // There is no need to mask or shift this value.
3550     // The semantics of LShiftINode include an implicit mask to 0x1F.
3551     assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
3552     elem_shift = layout_val;
3553   }
3554 
3555   // Transition to native address size for all offset calculations:
3556   Node* lengthx = ConvI2X(length);
3557   Node* headerx = ConvI2X(header_size);
3558 #ifdef _LP64
3559   { const TypeLong* tllen = _gvn.find_long_type(lengthx);
3560     if (tllen != NULL && tllen->_lo < 0) {
3561       // Add a manual constraint to a positive range.  Cf. array_element_address.
3562       jlong size_max = arrayOopDesc::max_array_length(T_BYTE);
3563       if (size_max > tllen->_hi)  size_max = tllen->_hi;
3564       const TypeLong* tlcon = TypeLong::make(CONST64(0), size_max, Type::WidenMin);
3565       lengthx = _gvn.transform( new ConvI2LNode(length, tlcon));






















3566     }
3567   }
3568 #endif
3569 
3570   // Combine header size (plus rounding) and body size.  Then round down.
3571   // This computation cannot overflow, because it is used only in two
3572   // places, one where the length is sharply limited, and the other
3573   // after a successful allocation.
3574   Node* abody = lengthx;
3575   if (elem_shift != NULL)
3576     abody     = _gvn.transform( new LShiftXNode(lengthx, elem_shift) );
3577   Node* size  = _gvn.transform( new AddXNode(headerx, abody) );
3578   if (round_mask != 0) {
3579     Node* mask = MakeConX(~round_mask);
3580     size       = _gvn.transform( new AndXNode(size, mask) );
3581   }
3582   // else if round_mask == 0, the size computation is self-rounding
3583 
3584   if (return_size_val != NULL) {
3585     // This is the size
3586     (*return_size_val) = size;
3587   }
3588 
3589   // Now generate allocation code
3590 
3591   // The entire memory state is needed for slow path of the allocation
3592   // since GC and deoptimization can happened.
3593   Node *mem = reset_memory();
3594   set_all_memory(mem); // Create new memory state





3595 
3596   // Create the AllocateArrayNode and its result projections
3597   AllocateArrayNode* alloc
3598     = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
3599                             control(), mem, i_o(),
3600                             size, klass_node,
3601                             initial_slow_test,
3602                             length);
3603 
3604   // Cast to correct type.  Note that the klass_node may be constant or not,
3605   // and in the latter case the actual array type will be inexact also.
3606   // (This happens via a non-constant argument to inline_native_newArray.)
3607   // In any case, the value of klass_node provides the desired array type.
3608   const TypeInt* length_type = _gvn.find_int_type(length);
3609   const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
3610   if (ary_type->isa_aryptr() && length_type != NULL) {
3611     // Try to get a better type than POS for the size
3612     ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
3613   }
3614 




1641       // known field.  This code is a copy of the do_put_xxx logic.
1642       ciField* field = at->field();
1643       if (!field->type()->is_loaded()) {
1644         val_type = TypeInstPtr::BOTTOM;
1645       } else {
1646         val_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
1647       }
1648     }
1649   } else if (adr_type->isa_aryptr()) {
1650     val_type = adr_type->is_aryptr()->elem()->make_oopptr();
1651   }
1652   if (val_type == NULL) {
1653     val_type = TypeInstPtr::BOTTOM;
1654   }
1655   return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo, mismatched);
1656 }
1657 
1658 
1659 //-------------------------array_element_address-------------------------
1660 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1661                                       const TypeInt* sizetype, Node* ctrl) {
1662   uint shift  = exact_log2(type2aelembytes(elembt));
1663   uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1664 
1665   // short-circuit a common case (saves lots of confusing waste motion)
1666   jint idx_con = find_int_con(idx, -1);
1667   if (idx_con >= 0) {
1668     intptr_t offset = header + ((intptr_t)idx_con << shift);
1669     return basic_plus_adr(ary, offset);
1670   }
1671 
1672   // must be correct type for alignment purposes
1673   Node* base  = basic_plus_adr(ary, header);
1674   idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1675   Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1676   return basic_plus_adr(ary, base, scale);
1677 }
1678 
1679 //-------------------------load_array_element-------------------------
1680 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
1681   const Type* elemtype = arytype->elem();
1682   BasicType elembt = elemtype->array_element_basic_type();
1683   Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1684   Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1685   return ld;
1686 }
1687 
1688 //-------------------------set_arguments_for_java_call-------------------------
1689 // Arguments (pre-popped from the stack) are taken from the JVMS.
1690 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1691   // Add the call arguments:
1692   uint nargs = call->method()->arg_size();
1693   for (uint i = 0; i < nargs; i++) {
1694     Node* arg = argument(i);


3490     layout_val = NULL;
3491     layout_is_con = true;
3492   }
3493 
3494   // Generate the initial go-slow test.  Make sure we do not overflow
3495   // if length is huge (near 2Gig) or negative!  We do not need
3496   // exact double-words here, just a close approximation of needed
3497   // double-words.  We can't add any offset or rounding bits, lest we
3498   // take a size -1 of bytes and make it positive.  Use an unsigned
3499   // compare, so negative sizes look hugely positive.
3500   int fast_size_limit = FastAllocateSizeLimit;
3501   if (layout_is_con) {
3502     assert(!StressReflectiveCode, "stress mode does not use these paths");
3503     // Increase the size limit if we have exact knowledge of array type.
3504     int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
3505     fast_size_limit <<= (LogBytesPerLong - log2_esize);
3506   }
3507 
3508   Node* initial_slow_cmp  = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
3509   Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );




3510 
3511   // --- Size Computation ---
3512   // array_size = round_to_heap(array_header + (length << elem_shift));
3513   // where round_to_heap(x) == round_to(x, MinObjAlignmentInBytes)
3514   // and round_to(x, y) == ((x + y-1) & ~(y-1))
3515   // The rounding mask is strength-reduced, if possible.
3516   int round_mask = MinObjAlignmentInBytes - 1;
3517   Node* header_size = NULL;
3518   int   header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
3519   // (T_BYTE has the weakest alignment and size restrictions...)
3520   if (layout_is_con) {
3521     int       hsize  = Klass::layout_helper_header_size(layout_con);
3522     int       eshift = Klass::layout_helper_log2_element_size(layout_con);
3523     BasicType etype  = Klass::layout_helper_element_type(layout_con);
3524     if ((round_mask & ~right_n_bits(eshift)) == 0)
3525       round_mask = 0;  // strength-reduce it if it goes away completely
3526     assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
3527     assert(header_size_min <= hsize, "generic minimum is smallest");
3528     header_size_min = hsize;
3529     header_size = intcon(hsize + round_mask);


3535     Node* mask  = intcon(round_mask);
3536     header_size = _gvn.transform( new AddINode(hsize, mask) );
3537   }
3538 
3539   Node* elem_shift = NULL;
3540   if (layout_is_con) {
3541     int eshift = Klass::layout_helper_log2_element_size(layout_con);
3542     if (eshift != 0)
3543       elem_shift = intcon(eshift);
3544   } else {
3545     // There is no need to mask or shift this value.
3546     // The semantics of LShiftINode include an implicit mask to 0x1F.
3547     assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
3548     elem_shift = layout_val;
3549   }
3550 
3551   // Transition to native address size for all offset calculations:
3552   Node* lengthx = ConvI2X(length);
3553   Node* headerx = ConvI2X(header_size);
3554 #ifdef _LP64
3555   { const TypeInt* tilen = _gvn.find_int_type(length);
3556     if (tilen != NULL && tilen->_lo < 0) {
3557       // Add a manual constraint to a positive range.  Cf. array_element_address.
3558       jint size_max = fast_size_limit;
3559       if (size_max > tilen->_hi)  size_max = tilen->_hi;
3560       const TypeInt* tlcon = TypeInt::make(0, size_max, Type::WidenMin);
3561 
3562       // Only do a narrow I2L conversion if the range check passed.
3563       IfNode* iff = new IfNode(control(), initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
3564       _gvn.transform(iff);
3565       RegionNode* region = new RegionNode(3);
3566       _gvn.set_type(region, Type::CONTROL);
3567       lengthx = new PhiNode(region, TypeLong::LONG);
3568       _gvn.set_type(lengthx, TypeLong::LONG);
3569 
3570       // Range check passed. Use ConvI2L node with narrow type.
3571       Node* passed = IfFalse(iff);
3572       region->init_req(1, passed);
3573       // Make I2L conversion control dependent to prevent it from
3574       // floating above the range check during loop optimizations.
3575       lengthx->init_req(1, C->constrained_convI2L(&_gvn, length, tlcon, passed));
3576 
3577       // Range check failed. Use ConvI2L with wide type because length may be invalid.
3578       region->init_req(2, IfTrue(iff));
3579       lengthx->init_req(2, ConvI2X(length));
3580 
3581       set_control(region);
3582       record_for_igvn(region);
3583       record_for_igvn(lengthx);
3584     }
3585   }
3586 #endif
3587 
3588   // Combine header size (plus rounding) and body size.  Then round down.
3589   // This computation cannot overflow, because it is used only in two
3590   // places, one where the length is sharply limited, and the other
3591   // after a successful allocation.
3592   Node* abody = lengthx;
3593   if (elem_shift != NULL)
3594     abody     = _gvn.transform( new LShiftXNode(lengthx, elem_shift) );
3595   Node* size  = _gvn.transform( new AddXNode(headerx, abody) );
3596   if (round_mask != 0) {
3597     Node* mask = MakeConX(~round_mask);
3598     size       = _gvn.transform( new AndXNode(size, mask) );
3599   }
3600   // else if round_mask == 0, the size computation is self-rounding
3601 
3602   if (return_size_val != NULL) {
3603     // This is the size
3604     (*return_size_val) = size;
3605   }
3606 
3607   // Now generate allocation code
3608 
3609   // The entire memory state is needed for slow path of the allocation
3610   // since GC and deoptimization can happened.
3611   Node *mem = reset_memory();
3612   set_all_memory(mem); // Create new memory state
3613 
3614   if (initial_slow_test->is_Bool()) {
3615     // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
3616     initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
3617   }
3618 
3619   // Create the AllocateArrayNode and its result projections
3620   AllocateArrayNode* alloc
3621     = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
3622                             control(), mem, i_o(),
3623                             size, klass_node,
3624                             initial_slow_test,
3625                             length);
3626 
3627   // Cast to correct type.  Note that the klass_node may be constant or not,
3628   // and in the latter case the actual array type will be inexact also.
3629   // (This happens via a non-constant argument to inline_native_newArray.)
3630   // In any case, the value of klass_node provides the desired array type.
3631   const TypeInt* length_type = _gvn.find_int_type(length);
3632   const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
3633   if (ary_type->isa_aryptr() && length_type != NULL) {
3634     // Try to get a better type than POS for the size
3635     ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
3636   }
3637 


< prev index next >