< prev index next >

src/hotspot/share/c1/c1_LIRGenerator.cpp

Print this page
rev 49948 : 8201593: Print array length in ArrayIndexOutOfBoundsException.
Reviewed-by: dholmes, mdoerr, smonteith


 463   return state_for(x, x->exception_state());
 464 }
 465 
 466 
 467 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
 468   /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if TieredCompilation
 469    * is active and the class hasn't yet been resolved we need to emit a patch that resolves
 470    * the class. */
 471   if ((TieredCompilation && need_resolve) || !obj->is_loaded() || PatchALot) {
 472     assert(info != NULL, "info must be set if class is not loaded");
 473     __ klass2reg_patch(NULL, r, info);
 474   } else {
 475     // no patching needed
 476     __ metadata2reg(obj->constant_encoding(), r);
 477   }
 478 }
 479 
 480 
 481 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
 482                                     CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
 483   CodeStub* stub = new RangeCheckStub(range_check_info, index);
 484   if (index->is_constant()) {
 485     cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
 486                 index->as_jint(), null_check_info);
 487     __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
 488   } else {
 489     cmp_reg_mem(lir_cond_aboveEqual, index, array,
 490                 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
 491     __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
 492   }
 493 }
 494 
 495 
 496 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
 497   CodeStub* stub = new RangeCheckStub(info, index, true);
 498   if (index->is_constant()) {
 499     cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
 500     __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
 501   } else {
 502     cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
 503                 java_nio_Buffer::limit_offset(), T_INT, info);
 504     __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
 505   }
 506   __ move(index, result);
 507 }
 508 
 509 
 510 
 511 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
 512   LIR_Opr result_op = result;
 513   LIR_Opr left_op   = left;
 514   LIR_Opr right_op  = right;
 515 
 516   if (TwoOperandLIRForm && left_op != result_op) {
 517     assert(right_op != result_op, "malformed");


1575   if (needs_store_check || x->check_boolean()) {
1576     value.load_item();
1577   } else {
1578     value.load_for_store(x->elt_type());
1579   }
1580 
1581   set_no_result(x);
1582 
1583   // the CodeEmitInfo must be duplicated for each different
1584   // LIR-instruction because spilling can occur anywhere between two
1585   // instructions and so the debug information must be different
1586   CodeEmitInfo* range_check_info = state_for(x);
1587   CodeEmitInfo* null_check_info = NULL;
1588   if (x->needs_null_check()) {
1589     null_check_info = new CodeEmitInfo(range_check_info);
1590   }
1591 
1592   if (GenerateRangeChecks && needs_range_check) {
1593     if (use_length) {
1594       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1595       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1596     } else {
1597       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1598       // range_check also does the null check
1599       null_check_info = NULL;
1600     }
1601   }
1602 
1603   if (GenerateArrayStoreCheck && needs_store_check) {
1604     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1605     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1606   }
1607 
1608   DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY;
1609   if (x->check_boolean()) {
1610     decorators |= C1_MASK_BOOLEAN;
1611   }
1612 
1613   access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1614                   NULL, null_check_info);
1615 }


1739                  info ? new CodeEmitInfo(info) : NULL, info);
1740 }
1741 
1742 
1743 //------------------------java.nio.Buffer.checkIndex------------------------
1744 
1745 // int java.nio.Buffer.checkIndex(int)
1746 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1747   // NOTE: by the time we are in checkIndex() we are guaranteed that
1748   // the buffer is non-null (because checkIndex is package-private and
1749   // only called from within other methods in the buffer).
1750   assert(x->number_of_arguments() == 2, "wrong type");
1751   LIRItem buf  (x->argument_at(0), this);
1752   LIRItem index(x->argument_at(1), this);
1753   buf.load_item();
1754   index.load_item();
1755 
1756   LIR_Opr result = rlock_result(x);
1757   if (GenerateRangeChecks) {
1758     CodeEmitInfo* info = state_for(x);
1759     CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1760     if (index.result()->is_constant()) {
1761       cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1762       __ branch(lir_cond_belowEqual, T_INT, stub);
1763     } else {
1764       cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1765                   java_nio_Buffer::limit_offset(), T_INT, info);
1766       __ branch(lir_cond_aboveEqual, T_INT, stub);
1767     }
1768     __ move(index.result(), result);
1769   } else {
1770     // Just load the index into the result register
1771     __ move(index.result(), result);
1772   }
1773 }
1774 
1775 
1776 //------------------------array access--------------------------------------
1777 
1778 
1779 void LIRGenerator::do_ArrayLength(ArrayLength* x) {


1820   }
1821 
1822   CodeEmitInfo* range_check_info = state_for(x);
1823   CodeEmitInfo* null_check_info = NULL;
1824   if (x->needs_null_check()) {
1825     NullCheck* nc = x->explicit_null_check();
1826     if (nc != NULL) {
1827       null_check_info = state_for(nc);
1828     } else {
1829       null_check_info = range_check_info;
1830     }
1831     if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1832       LIR_Opr obj = new_register(T_OBJECT);
1833       __ move(LIR_OprFact::oopConst(NULL), obj);
1834       __ null_check(obj, new CodeEmitInfo(null_check_info));
1835     }
1836   }
1837 
1838   if (GenerateRangeChecks && needs_range_check) {
1839     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1840       __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
1841     } else if (use_length) {
1842       // TODO: use a (modified) version of array_range_check that does not require a
1843       //       constant length to be loaded to a register
1844       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1845       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1846     } else {
1847       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1848       // The range check performs the null check, so clear it out for the load
1849       null_check_info = NULL;
1850     }
1851   }
1852 
1853   DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY;
1854 
1855   LIR_Opr result = rlock_result(x, x->elt_type());
1856   access_load_at(decorators, x->elt_type(),
1857                  array, index.result(), result,
1858                  NULL, null_check_info);
1859 }
1860 
1861 
1862 void LIRGenerator::do_NullCheck(NullCheck* x) {
1863   if (x->can_trap()) {
1864     LIRItem value(x->obj(), this);
1865     value.load_item();




 463   return state_for(x, x->exception_state());
 464 }
 465 
 466 
 467 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
 468   /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if TieredCompilation
 469    * is active and the class hasn't yet been resolved we need to emit a patch that resolves
 470    * the class. */
 471   if ((TieredCompilation && need_resolve) || !obj->is_loaded() || PatchALot) {
 472     assert(info != NULL, "info must be set if class is not loaded");
 473     __ klass2reg_patch(NULL, r, info);
 474   } else {
 475     // no patching needed
 476     __ metadata2reg(obj->constant_encoding(), r);
 477   }
 478 }
 479 
 480 
 481 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
 482                                     CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
 483   CodeStub* stub = new RangeCheckStub(range_check_info, index, array);
 484   if (index->is_constant()) {
 485     cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
 486                 index->as_jint(), null_check_info);
 487     __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
 488   } else {
 489     cmp_reg_mem(lir_cond_aboveEqual, index, array,
 490                 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
 491     __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
 492   }
 493 }
 494 
 495 
 496 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
 497   CodeStub* stub = new RangeCheckStub(info, index);
 498   if (index->is_constant()) {
 499     cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
 500     __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
 501   } else {
 502     cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
 503                 java_nio_Buffer::limit_offset(), T_INT, info);
 504     __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
 505   }
 506   __ move(index, result);
 507 }
 508 
 509 
 510 
 511 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
 512   LIR_Opr result_op = result;
 513   LIR_Opr left_op   = left;
 514   LIR_Opr right_op  = right;
 515 
 516   if (TwoOperandLIRForm && left_op != result_op) {
 517     assert(right_op != result_op, "malformed");


1575   if (needs_store_check || x->check_boolean()) {
1576     value.load_item();
1577   } else {
1578     value.load_for_store(x->elt_type());
1579   }
1580 
1581   set_no_result(x);
1582 
1583   // the CodeEmitInfo must be duplicated for each different
1584   // LIR-instruction because spilling can occur anywhere between two
1585   // instructions and so the debug information must be different
1586   CodeEmitInfo* range_check_info = state_for(x);
1587   CodeEmitInfo* null_check_info = NULL;
1588   if (x->needs_null_check()) {
1589     null_check_info = new CodeEmitInfo(range_check_info);
1590   }
1591 
1592   if (GenerateRangeChecks && needs_range_check) {
1593     if (use_length) {
1594       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1595       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
1596     } else {
1597       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1598       // range_check also does the null check
1599       null_check_info = NULL;
1600     }
1601   }
1602 
1603   if (GenerateArrayStoreCheck && needs_store_check) {
1604     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1605     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1606   }
1607 
1608   DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY;
1609   if (x->check_boolean()) {
1610     decorators |= C1_MASK_BOOLEAN;
1611   }
1612 
1613   access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1614                   NULL, null_check_info);
1615 }


1739                  info ? new CodeEmitInfo(info) : NULL, info);
1740 }
1741 
1742 
1743 //------------------------java.nio.Buffer.checkIndex------------------------
1744 
1745 // int java.nio.Buffer.checkIndex(int)
1746 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1747   // NOTE: by the time we are in checkIndex() we are guaranteed that
1748   // the buffer is non-null (because checkIndex is package-private and
1749   // only called from within other methods in the buffer).
1750   assert(x->number_of_arguments() == 2, "wrong type");
1751   LIRItem buf  (x->argument_at(0), this);
1752   LIRItem index(x->argument_at(1), this);
1753   buf.load_item();
1754   index.load_item();
1755 
1756   LIR_Opr result = rlock_result(x);
1757   if (GenerateRangeChecks) {
1758     CodeEmitInfo* info = state_for(x);
1759     CodeStub* stub = new RangeCheckStub(info, index.result());
1760     if (index.result()->is_constant()) {
1761       cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1762       __ branch(lir_cond_belowEqual, T_INT, stub);
1763     } else {
1764       cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1765                   java_nio_Buffer::limit_offset(), T_INT, info);
1766       __ branch(lir_cond_aboveEqual, T_INT, stub);
1767     }
1768     __ move(index.result(), result);
1769   } else {
1770     // Just load the index into the result register
1771     __ move(index.result(), result);
1772   }
1773 }
1774 
1775 
1776 //------------------------array access--------------------------------------
1777 
1778 
1779 void LIRGenerator::do_ArrayLength(ArrayLength* x) {


1820   }
1821 
1822   CodeEmitInfo* range_check_info = state_for(x);
1823   CodeEmitInfo* null_check_info = NULL;
1824   if (x->needs_null_check()) {
1825     NullCheck* nc = x->explicit_null_check();
1826     if (nc != NULL) {
1827       null_check_info = state_for(nc);
1828     } else {
1829       null_check_info = range_check_info;
1830     }
1831     if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1832       LIR_Opr obj = new_register(T_OBJECT);
1833       __ move(LIR_OprFact::oopConst(NULL), obj);
1834       __ null_check(obj, new CodeEmitInfo(null_check_info));
1835     }
1836   }
1837 
1838   if (GenerateRangeChecks && needs_range_check) {
1839     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1840       __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result(), array.result()));
1841     } else if (use_length) {
1842       // TODO: use a (modified) version of array_range_check that does not require a
1843       //       constant length to be loaded to a register
1844       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1845       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
1846     } else {
1847       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1848       // The range check performs the null check, so clear it out for the load
1849       null_check_info = NULL;
1850     }
1851   }
1852 
1853   DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY;
1854 
1855   LIR_Opr result = rlock_result(x, x->elt_type());
1856   access_load_at(decorators, x->elt_type(),
1857                  array, index.result(), result,
1858                  NULL, null_check_info);
1859 }
1860 
1861 
1862 void LIRGenerator::do_NullCheck(NullCheck* x) {
1863   if (x->can_trap()) {
1864     LIRItem value(x->obj(), this);
1865     value.load_item();


< prev index next >