473 return state_for(x, x->exception_state());
474 }
475
476
477 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
478 /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if TieredCompilation
479 * is active and the class hasn't yet been resolved we need to emit a patch that resolves
480 * the class. */
481 if ((TieredCompilation && need_resolve) || !obj->is_loaded() || PatchALot) {
482 assert(info != NULL, "info must be set if class is not loaded");
483 __ klass2reg_patch(NULL, r, info);
484 } else {
485 // no patching needed
486 __ metadata2reg(obj->constant_encoding(), r);
487 }
488 }
489
490
491 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
492 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
493 CodeStub* stub = new RangeCheckStub(range_check_info, index);
494 if (index->is_constant()) {
495 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
496 index->as_jint(), null_check_info);
497 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
498 } else {
499 cmp_reg_mem(lir_cond_aboveEqual, index, array,
500 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
501 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
502 }
503 }
504
505
506 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
507 CodeStub* stub = new RangeCheckStub(info, index, true);
508 if (index->is_constant()) {
509 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
510 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
511 } else {
512 cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
513 java_nio_Buffer::limit_offset(), T_INT, info);
514 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
515 }
516 __ move(index, result);
517 }
518
519
520
521 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
522 LIR_Opr result_op = result;
523 LIR_Opr left_op = left;
524 LIR_Opr right_op = right;
525
526 if (TwoOperandLIRForm && left_op != result_op) {
527 assert(right_op != result_op, "malformed");
1872 }
1873 }
1874
1875
1876 //------------------------java.nio.Buffer.checkIndex------------------------
1877
1878 // int java.nio.Buffer.checkIndex(int)
1879 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1880 // NOTE: by the time we are in checkIndex() we are guaranteed that
1881 // the buffer is non-null (because checkIndex is package-private and
1882 // only called from within other methods in the buffer).
1883 assert(x->number_of_arguments() == 2, "wrong type");
1884 LIRItem buf (x->argument_at(0), this);
1885 LIRItem index(x->argument_at(1), this);
1886 buf.load_item();
1887 index.load_item();
1888
1889 LIR_Opr result = rlock_result(x);
1890 if (GenerateRangeChecks) {
1891 CodeEmitInfo* info = state_for(x);
1892 CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1893 if (index.result()->is_constant()) {
1894 cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1895 __ branch(lir_cond_belowEqual, T_INT, stub);
1896 } else {
1897 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1898 java_nio_Buffer::limit_offset(), T_INT, info);
1899 __ branch(lir_cond_aboveEqual, T_INT, stub);
1900 }
1901 __ move(index.result(), result);
1902 } else {
1903 // Just load the index into the result register
1904 __ move(index.result(), result);
1905 }
1906 }
1907
1908
1909 //------------------------array access--------------------------------------
1910
1911
1912 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1956 CodeEmitInfo* null_check_info = NULL;
1957 if (x->needs_null_check()) {
1958 NullCheck* nc = x->explicit_null_check();
1959 if (nc != NULL) {
1960 null_check_info = state_for(nc);
1961 } else {
1962 null_check_info = range_check_info;
1963 }
1964 if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1965 LIR_Opr obj = new_register(T_OBJECT);
1966 __ move(LIR_OprFact::oopConst(NULL), obj);
1967 __ null_check(obj, new CodeEmitInfo(null_check_info));
1968 }
1969 }
1970
1971 // emit array address setup early so it schedules better
1972 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1973
1974 if (GenerateRangeChecks && needs_range_check) {
1975 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1976 __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
1977 } else if (use_length) {
1978 // TODO: use a (modified) version of array_range_check that does not require a
1979 // constant length to be loaded to a register
1980 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1981 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1982 } else {
1983 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1984 // The range check performs the null check, so clear it out for the load
1985 null_check_info = NULL;
1986 }
1987 }
1988
1989 __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1990 }
1991
1992
1993 void LIRGenerator::do_NullCheck(NullCheck* x) {
1994 if (x->can_trap()) {
1995 LIRItem value(x->obj(), this);
1996 value.load_item();
1997 CodeEmitInfo* info = state_for(x);
1998 __ null_check(value.result(), info);
1999 }
2000 }
2001
|
473 return state_for(x, x->exception_state());
474 }
475
476
477 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
478 /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if TieredCompilation
479 * is active and the class hasn't yet been resolved we need to emit a patch that resolves
480 * the class. */
481 if ((TieredCompilation && need_resolve) || !obj->is_loaded() || PatchALot) {
482 assert(info != NULL, "info must be set if class is not loaded");
483 __ klass2reg_patch(NULL, r, info);
484 } else {
485 // no patching needed
486 __ metadata2reg(obj->constant_encoding(), r);
487 }
488 }
489
490
491 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
492 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
493 CodeStub* stub = new RangeCheckStub(range_check_info, index, array);
494 if (index->is_constant()) {
495 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
496 index->as_jint(), null_check_info);
497 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
498 } else {
499 cmp_reg_mem(lir_cond_aboveEqual, index, array,
500 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
501 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
502 }
503 }
504
505
506 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
507 CodeStub* stub = new RangeCheckStub(info, index);
508 if (index->is_constant()) {
509 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
510 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
511 } else {
512 cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
513 java_nio_Buffer::limit_offset(), T_INT, info);
514 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
515 }
516 __ move(index, result);
517 }
518
519
520
521 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
522 LIR_Opr result_op = result;
523 LIR_Opr left_op = left;
524 LIR_Opr right_op = right;
525
526 if (TwoOperandLIRForm && left_op != result_op) {
527 assert(right_op != result_op, "malformed");
1872 }
1873 }
1874
1875
1876 //------------------------java.nio.Buffer.checkIndex------------------------
1877
1878 // int java.nio.Buffer.checkIndex(int)
1879 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1880 // NOTE: by the time we are in checkIndex() we are guaranteed that
1881 // the buffer is non-null (because checkIndex is package-private and
1882 // only called from within other methods in the buffer).
1883 assert(x->number_of_arguments() == 2, "wrong type");
1884 LIRItem buf (x->argument_at(0), this);
1885 LIRItem index(x->argument_at(1), this);
1886 buf.load_item();
1887 index.load_item();
1888
1889 LIR_Opr result = rlock_result(x);
1890 if (GenerateRangeChecks) {
1891 CodeEmitInfo* info = state_for(x);
1892 CodeStub* stub = new RangeCheckStub(info, index.result());
1893 if (index.result()->is_constant()) {
1894 cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1895 __ branch(lir_cond_belowEqual, T_INT, stub);
1896 } else {
1897 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1898 java_nio_Buffer::limit_offset(), T_INT, info);
1899 __ branch(lir_cond_aboveEqual, T_INT, stub);
1900 }
1901 __ move(index.result(), result);
1902 } else {
1903 // Just load the index into the result register
1904 __ move(index.result(), result);
1905 }
1906 }
1907
1908
1909 //------------------------array access--------------------------------------
1910
1911
1912 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1956 CodeEmitInfo* null_check_info = NULL;
1957 if (x->needs_null_check()) {
1958 NullCheck* nc = x->explicit_null_check();
1959 if (nc != NULL) {
1960 null_check_info = state_for(nc);
1961 } else {
1962 null_check_info = range_check_info;
1963 }
1964 if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1965 LIR_Opr obj = new_register(T_OBJECT);
1966 __ move(LIR_OprFact::oopConst(NULL), obj);
1967 __ null_check(obj, new CodeEmitInfo(null_check_info));
1968 }
1969 }
1970
1971 // emit array address setup early so it schedules better
1972 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1973
1974 if (GenerateRangeChecks && needs_range_check) {
1975 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1976 __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result(), array.result()));
1977 } else if (use_length) {
1978 // TODO: use a (modified) version of array_range_check that does not require a
1979 // constant length to be loaded to a register
1980 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1981 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
1982 } else {
1983 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1984 // The range check performs the null check, so clear it out for the load
1985 null_check_info = NULL;
1986 }
1987 }
1988
1989 __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1990 }
1991
1992
1993 void LIRGenerator::do_NullCheck(NullCheck* x) {
1994 if (x->can_trap()) {
1995 LIRItem value(x->obj(), this);
1996 value.load_item();
1997 CodeEmitInfo* info = state_for(x);
1998 __ null_check(value.result(), info);
1999 }
2000 }
2001
|