< prev index next >

src/share/vm/opto/graphKit.cpp

Print this page




1628       // known field.  This code is a copy of the do_put_xxx logic.
1629       ciField* field = at->field();
1630       if (!field->type()->is_loaded()) {
1631         val_type = TypeInstPtr::BOTTOM;
1632       } else {
1633         val_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
1634       }
1635     }
1636   } else if (adr_type->isa_aryptr()) {
1637     val_type = adr_type->is_aryptr()->elem()->make_oopptr();
1638   }
1639   if (val_type == NULL) {
1640     val_type = TypeInstPtr::BOTTOM;
1641   }
1642   return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo);
1643 }
1644 
1645 
1646 //-------------------------array_element_address-------------------------
1647 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1648                                       const TypeInt* sizetype) {
1649   uint shift  = exact_log2(type2aelembytes(elembt));
1650   uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1651 
1652   // short-circuit a common case (saves lots of confusing waste motion)
1653   jint idx_con = find_int_con(idx, -1);
1654   if (idx_con >= 0) {
1655     intptr_t offset = header + ((intptr_t)idx_con << shift);
1656     return basic_plus_adr(ary, offset);
1657   }
1658 
1659   // must be correct type for alignment purposes
1660   Node* base  = basic_plus_adr(ary, header);
1661 #ifdef _LP64
1662   // The scaled index operand to AddP must be a clean 64-bit value.
1663   // Java allows a 32-bit int to be incremented to a negative
1664   // value, which appears in a 64-bit register as a large
1665   // positive number.  Using that large positive number as an
1666   // operand in pointer arithmetic has bad consequences.
1667   // On the other hand, 32-bit overflow is rare, and the possibility
1668   // can often be excluded, if we annotate the ConvI2L node with
1669   // a type assertion that its value is known to be a small positive
1670   // number.  (The prior range check has ensured this.)
1671   // This assertion is used by ConvI2LNode::Ideal.
1672   int index_max = max_jint - 1;  // array size is max_jint, index is one less
1673   if (sizetype != NULL)  index_max = sizetype->_hi - 1;
1674   const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax);
1675   idx = _gvn.transform( new (C) ConvI2LNode(idx, lidxtype) );
1676 #endif
1677   Node* scale = _gvn.transform( new (C) LShiftXNode(idx, intcon(shift)) );
1678   return basic_plus_adr(ary, base, scale);
1679 }
1680 
1681 //-------------------------load_array_element-------------------------
1682 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
1683   const Type* elemtype = arytype->elem();
1684   BasicType elembt = elemtype->array_element_basic_type();
1685   Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1686   Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1687   return ld;
1688 }
1689 
1690 //-------------------------set_arguments_for_java_call-------------------------
1691 // Arguments (pre-popped from the stack) are taken from the JVMS.
1692 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1693   // Add the call arguments:
1694   uint nargs = call->method()->arg_size();
1695   for (uint i = 0; i < nargs; i++) {


3474     layout_val = NULL;
3475     layout_is_con = true;
3476   }
3477 
3478   // Generate the initial go-slow test.  Make sure we do not overflow
3479   // if length is huge (near 2Gig) or negative!  We do not need
3480   // exact double-words here, just a close approximation of needed
3481   // double-words.  We can't add any offset or rounding bits, lest we
3482   // take a size -1 of bytes and make it positive.  Use an unsigned
3483   // compare, so negative sizes look hugely positive.
3484   int fast_size_limit = FastAllocateSizeLimit;
3485   if (layout_is_con) {
3486     assert(!StressReflectiveCode, "stress mode does not use these paths");
3487     // Increase the size limit if we have exact knowledge of array type.
3488     int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
3489     fast_size_limit <<= (LogBytesPerLong - log2_esize);
3490   }
3491 
3492   Node* initial_slow_cmp  = _gvn.transform( new (C) CmpUNode( length, intcon( fast_size_limit ) ) );
3493   Node* initial_slow_test = _gvn.transform( new (C) BoolNode( initial_slow_cmp, BoolTest::gt ) );
3494   if (initial_slow_test->is_Bool()) {
3495     // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
3496     initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
3497   }
3498 
3499   // --- Size Computation ---
3500   // array_size = round_to_heap(array_header + (length << elem_shift));
3501   // where round_to_heap(x) == round_to(x, MinObjAlignmentInBytes)
3502   // and round_to(x, y) == ((x + y-1) & ~(y-1))
3503   // The rounding mask is strength-reduced, if possible.
3504   int round_mask = MinObjAlignmentInBytes - 1;
3505   Node* header_size = NULL;
3506   int   header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
3507   // (T_BYTE has the weakest alignment and size restrictions...)
3508   if (layout_is_con) {
3509     int       hsize  = Klass::layout_helper_header_size(layout_con);
3510     int       eshift = Klass::layout_helper_log2_element_size(layout_con);
3511     BasicType etype  = Klass::layout_helper_element_type(layout_con);
3512     if ((round_mask & ~right_n_bits(eshift)) == 0)
3513       round_mask = 0;  // strength-reduce it if it goes away completely
3514     assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
3515     assert(header_size_min <= hsize, "generic minimum is smallest");
3516     header_size_min = hsize;
3517     header_size = intcon(hsize + round_mask);


3523     Node* mask  = intcon(round_mask);
3524     header_size = _gvn.transform( new(C) AddINode(hsize, mask) );
3525   }
3526 
3527   Node* elem_shift = NULL;
3528   if (layout_is_con) {
3529     int eshift = Klass::layout_helper_log2_element_size(layout_con);
3530     if (eshift != 0)
3531       elem_shift = intcon(eshift);
3532   } else {
3533     // There is no need to mask or shift this value.
3534     // The semantics of LShiftINode include an implicit mask to 0x1F.
3535     assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
3536     elem_shift = layout_val;
3537   }
3538 
3539   // Transition to native address size for all offset calculations:
3540   Node* lengthx = ConvI2X(length);
3541   Node* headerx = ConvI2X(header_size);
3542 #ifdef _LP64
3543   { const TypeLong* tllen = _gvn.find_long_type(lengthx);
3544     if (tllen != NULL && tllen->_lo < 0) {
3545       // Add a manual constraint to a positive range.  Cf. array_element_address.
3546       jlong size_max = arrayOopDesc::max_array_length(T_BYTE);
3547       if (size_max > tllen->_hi)  size_max = tllen->_hi;
3548       const TypeLong* tlcon = TypeLong::make(CONST64(0), size_max, Type::WidenMin);
3549       lengthx = _gvn.transform( new (C) ConvI2LNode(length, tlcon));






















3550     }
3551   }
3552 #endif
3553 
3554   // Combine header size (plus rounding) and body size.  Then round down.
3555   // This computation cannot overflow, because it is used only in two
3556   // places, one where the length is sharply limited, and the other
3557   // after a successful allocation.
3558   Node* abody = lengthx;
3559   if (elem_shift != NULL)
3560     abody     = _gvn.transform( new(C) LShiftXNode(lengthx, elem_shift) );
3561   Node* size  = _gvn.transform( new(C) AddXNode(headerx, abody) );
3562   if (round_mask != 0) {
3563     Node* mask = MakeConX(~round_mask);
3564     size       = _gvn.transform( new(C) AndXNode(size, mask) );
3565   }
3566   // else if round_mask == 0, the size computation is self-rounding
3567 
3568   if (return_size_val != NULL) {
3569     // This is the size
3570     (*return_size_val) = size;
3571   }
3572 
3573   // Now generate allocation code
3574 
3575   // The entire memory state is needed for slow path of the allocation
3576   // since GC and deoptimization can happened.
3577   Node *mem = reset_memory();
3578   set_all_memory(mem); // Create new memory state





3579 
3580   // Create the AllocateArrayNode and its result projections
3581   AllocateArrayNode* alloc
3582     = new (C) AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
3583                                 control(), mem, i_o(),
3584                                 size, klass_node,
3585                                 initial_slow_test,
3586                                 length);
3587 
3588   // Cast to correct type.  Note that the klass_node may be constant or not,
3589   // and in the latter case the actual array type will be inexact also.
3590   // (This happens via a non-constant argument to inline_native_newArray.)
3591   // In any case, the value of klass_node provides the desired array type.
3592   const TypeInt* length_type = _gvn.find_int_type(length);
3593   const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
3594   if (ary_type->isa_aryptr() && length_type != NULL) {
3595     // Try to get a better type than POS for the size
3596     ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
3597   }
3598 




1628       // known field.  This code is a copy of the do_put_xxx logic.
1629       ciField* field = at->field();
1630       if (!field->type()->is_loaded()) {
1631         val_type = TypeInstPtr::BOTTOM;
1632       } else {
1633         val_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
1634       }
1635     }
1636   } else if (adr_type->isa_aryptr()) {
1637     val_type = adr_type->is_aryptr()->elem()->make_oopptr();
1638   }
1639   if (val_type == NULL) {
1640     val_type = TypeInstPtr::BOTTOM;
1641   }
1642   return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo);
1643 }
1644 
1645 
1646 //-------------------------array_element_address-------------------------
1647 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1648                                       const TypeInt* sizetype, Node* ctrl) {
1649   uint shift  = exact_log2(type2aelembytes(elembt));
1650   uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1651 
1652   // short-circuit a common case (saves lots of confusing waste motion)
1653   jint idx_con = find_int_con(idx, -1);
1654   if (idx_con >= 0) {
1655     intptr_t offset = header + ((intptr_t)idx_con << shift);
1656     return basic_plus_adr(ary, offset);
1657   }
1658 
1659   // must be correct type for alignment purposes
1660   Node* base  = basic_plus_adr(ary, header);
1661 #ifdef _LP64
1662   // The scaled index operand to AddP must be a clean 64-bit value.
1663   // Java allows a 32-bit int to be incremented to a negative
1664   // value, which appears in a 64-bit register as a large
1665   // positive number.  Using that large positive number as an
1666   // operand in pointer arithmetic has bad consequences.
1667   // On the other hand, 32-bit overflow is rare, and the possibility
1668   // can often be excluded, if we annotate the ConvI2L node with
1669   // a type assertion that its value is known to be a small positive
1670   // number.  (The prior range check has ensured this.)
1671   // This assertion is used by ConvI2LNode::Ideal.
1672   int index_max = max_jint - 1;  // array size is max_jint, index is one less
1673   if (sizetype != NULL) index_max = sizetype->_hi - 1;
1674   const TypeInt* iidxtype = TypeInt::make(0, index_max, Type::WidenMax);
1675   idx = C->constrained_convI2L(&_gvn, idx, iidxtype, ctrl);
1676 #endif
1677   Node* scale = _gvn.transform( new (C) LShiftXNode(idx, intcon(shift)) );
1678   return basic_plus_adr(ary, base, scale);
1679 }
1680 
1681 //-------------------------load_array_element-------------------------
1682 Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
1683   const Type* elemtype = arytype->elem();
1684   BasicType elembt = elemtype->array_element_basic_type();
1685   Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1686   Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1687   return ld;
1688 }
1689 
1690 //-------------------------set_arguments_for_java_call-------------------------
1691 // Arguments (pre-popped from the stack) are taken from the JVMS.
1692 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1693   // Add the call arguments:
1694   uint nargs = call->method()->arg_size();
1695   for (uint i = 0; i < nargs; i++) {


3474     layout_val = NULL;
3475     layout_is_con = true;
3476   }
3477 
3478   // Generate the initial go-slow test.  Make sure we do not overflow
3479   // if length is huge (near 2Gig) or negative!  We do not need
3480   // exact double-words here, just a close approximation of needed
3481   // double-words.  We can't add any offset or rounding bits, lest we
3482   // take a size -1 of bytes and make it positive.  Use an unsigned
3483   // compare, so negative sizes look hugely positive.
3484   int fast_size_limit = FastAllocateSizeLimit;
3485   if (layout_is_con) {
3486     assert(!StressReflectiveCode, "stress mode does not use these paths");
3487     // Increase the size limit if we have exact knowledge of array type.
3488     int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
3489     fast_size_limit <<= (LogBytesPerLong - log2_esize);
3490   }
3491 
3492   Node* initial_slow_cmp  = _gvn.transform( new (C) CmpUNode( length, intcon( fast_size_limit ) ) );
3493   Node* initial_slow_test = _gvn.transform( new (C) BoolNode( initial_slow_cmp, BoolTest::gt ) );




3494 
3495   // --- Size Computation ---
3496   // array_size = round_to_heap(array_header + (length << elem_shift));
3497   // where round_to_heap(x) == round_to(x, MinObjAlignmentInBytes)
3498   // and round_to(x, y) == ((x + y-1) & ~(y-1))
3499   // The rounding mask is strength-reduced, if possible.
3500   int round_mask = MinObjAlignmentInBytes - 1;
3501   Node* header_size = NULL;
3502   int   header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
3503   // (T_BYTE has the weakest alignment and size restrictions...)
3504   if (layout_is_con) {
3505     int       hsize  = Klass::layout_helper_header_size(layout_con);
3506     int       eshift = Klass::layout_helper_log2_element_size(layout_con);
3507     BasicType etype  = Klass::layout_helper_element_type(layout_con);
3508     if ((round_mask & ~right_n_bits(eshift)) == 0)
3509       round_mask = 0;  // strength-reduce it if it goes away completely
3510     assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
3511     assert(header_size_min <= hsize, "generic minimum is smallest");
3512     header_size_min = hsize;
3513     header_size = intcon(hsize + round_mask);


3519     Node* mask  = intcon(round_mask);
3520     header_size = _gvn.transform( new(C) AddINode(hsize, mask) );
3521   }
3522 
3523   Node* elem_shift = NULL;
3524   if (layout_is_con) {
3525     int eshift = Klass::layout_helper_log2_element_size(layout_con);
3526     if (eshift != 0)
3527       elem_shift = intcon(eshift);
3528   } else {
3529     // There is no need to mask or shift this value.
3530     // The semantics of LShiftINode include an implicit mask to 0x1F.
3531     assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
3532     elem_shift = layout_val;
3533   }
3534 
3535   // Transition to native address size for all offset calculations:
3536   Node* lengthx = ConvI2X(length);
3537   Node* headerx = ConvI2X(header_size);
3538 #ifdef _LP64
3539   { const TypeInt* tilen = _gvn.find_int_type(length);
3540     if (tilen != NULL && tilen->_lo < 0) {
3541       // Add a manual constraint to a positive range.  Cf. array_element_address.
3542       jlong size_max = arrayOopDesc::max_array_length(T_BYTE);
3543       if (size_max > tilen->_hi)  size_max = tilen->_hi;
3544       const TypeInt* tlcon = TypeInt::make(0, size_max, Type::WidenMin);
3545 
3546       // Only do a narrow I2L conversion if the range check passed.
3547       IfNode* iff = new (C) IfNode(control(), initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
3548       _gvn.transform(iff);
3549       RegionNode* region = new (C) RegionNode(3);
3550       _gvn.set_type(region, Type::CONTROL);
3551       lengthx = new (C) PhiNode(region, TypeLong::LONG);
3552       _gvn.set_type(lengthx, TypeLong::LONG);
3553 
3554       // Range check passed. Use ConvI2L node with narrow type.
3555       Node* passed = IfFalse(iff);
3556       region->init_req(1, passed);
3557       // Make I2L conversion control dependent to prevent it from
3558       // floating above the range check during loop optimizations.
3559       lengthx->init_req(1, C->constrained_convI2L(&_gvn, length, tlcon, passed));
3560 
3561       // Range check failed. Use ConvI2L with wide type because length may be invalid.
3562       region->init_req(2, IfTrue(iff));
3563       lengthx->init_req(2, ConvI2X(length));
3564 
3565       set_control(region);
3566       record_for_igvn(region);
3567       record_for_igvn(lengthx);
3568     }
3569   }
3570 #endif
3571 
3572   // Combine header size (plus rounding) and body size.  Then round down.
3573   // This computation cannot overflow, because it is used only in two
3574   // places, one where the length is sharply limited, and the other
3575   // after a successful allocation.
3576   Node* abody = lengthx;
3577   if (elem_shift != NULL)
3578     abody     = _gvn.transform( new(C) LShiftXNode(lengthx, elem_shift) );
3579   Node* size  = _gvn.transform( new(C) AddXNode(headerx, abody) );
3580   if (round_mask != 0) {
3581     Node* mask = MakeConX(~round_mask);
3582     size       = _gvn.transform( new(C) AndXNode(size, mask) );
3583   }
3584   // else if round_mask == 0, the size computation is self-rounding
3585 
3586   if (return_size_val != NULL) {
3587     // This is the size
3588     (*return_size_val) = size;
3589   }
3590 
3591   // Now generate allocation code
3592 
3593   // The entire memory state is needed for slow path of the allocation
3594   // since GC and deoptimization can happened.
3595   Node *mem = reset_memory();
3596   set_all_memory(mem); // Create new memory state
3597 
3598   if (initial_slow_test->is_Bool()) {
3599     // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
3600     initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
3601   }
3602 
3603   // Create the AllocateArrayNode and its result projections
3604   AllocateArrayNode* alloc
3605     = new (C) AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
3606                                 control(), mem, i_o(),
3607                                 size, klass_node,
3608                                 initial_slow_test,
3609                                 length);
3610 
3611   // Cast to correct type.  Note that the klass_node may be constant or not,
3612   // and in the latter case the actual array type will be inexact also.
3613   // (This happens via a non-constant argument to inline_native_newArray.)
3614   // In any case, the value of klass_node provides the desired array type.
3615   const TypeInt* length_type = _gvn.find_int_type(length);
3616   const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
3617   if (ary_type->isa_aryptr() && length_type != NULL) {
3618     // Try to get a better type than POS for the size
3619     ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
3620   }
3621 


< prev index next >