src/share/vm/c1/c1_LIRGenerator.cpp

Print this page
rev 4136 : 7153771: array bound check elimination for c1
Summary: when possible optimize out array bound checks, inserting predicates when needed.
Reviewed-by:

*** 401,410 **** --- 401,414 ---- CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) { assert(state != NULL, "state must be defined"); + #ifndef PRODUCT + state->verify(); + #endif + ValueStack* s = state; for_each_state(s) { if (s->kind() == ValueStack::EmptyExceptionState) { assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty"); continue;
*** 451,461 **** } } } } ! return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers()); } CodeEmitInfo* LIRGenerator::state_for(Instruction* x) { return state_for(x, x->exception_state()); --- 455,465 ---- } } } } ! return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException)); } CodeEmitInfo* LIRGenerator::state_for(Instruction* x) { return state_for(x, x->exception_state());
*** 1790,1804 **** tty->print_cr(" ###class not loaded at load_%s bci %d", x->is_static() ? "static" : "field", x->printable_bci()); } #endif if (x->needs_null_check() && (needs_patching || ! MacroAssembler::needs_explicit_null_check(x->offset()))) { // emit an explicit null check because the offset is too large ! __ null_check(object.result(), new CodeEmitInfo(info)); } LIR_Opr reg = rlock_result(x, field_type); LIR_Address* address; if (needs_patching) { --- 1794,1815 ---- tty->print_cr(" ###class not loaded at load_%s bci %d", x->is_static() ? "static" : "field", x->printable_bci()); } #endif + bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception(); if (x->needs_null_check() && (needs_patching || ! MacroAssembler::needs_explicit_null_check(x->offset()) || ! stress_deopt)) { ! LIR_Opr obj = object.result(); ! if (stress_deopt) { ! obj = new_register(T_OBJECT); ! __ move(LIR_OprFact::oopConst(NULL), obj); ! } // emit an explicit null check because the offset is too large ! __ null_check(obj, new CodeEmitInfo(info)); } LIR_Opr reg = rlock_result(x, field_type); LIR_Address* address; if (needs_patching) {
*** 1859,1868 **** --- 1870,1881 ---- //------------------------array access-------------------------------------- void LIRGenerator::do_ArrayLength(ArrayLength* x) { + if (x->use_count() == 0 && !x->can_trap()) return; + LIRItem array(x->array(), this); array.load_item(); LIR_Opr reg = rlock_result(x); CodeEmitInfo* info = NULL;
*** 1871,1899 **** if (nc == NULL) { info = state_for(x); } else { info = state_for(nc); } } __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none); } void LIRGenerator::do_LoadIndexed(LoadIndexed* x) { bool use_length = x->length() != NULL; LIRItem array(x->array(), this); LIRItem index(x->index(), this); LIRItem length(this); ! bool needs_range_check = true; ! if (use_length) { ! needs_range_check = x->compute_needs_range_check(); ! if (needs_range_check) { length.set_instruction(x->length()); length.load_item(); } - } array.load_item(); if (index.is_constant() && can_inline_as_constant(x->index())) { // let it be a constant index.dont_load_item(); --- 1884,1914 ---- if (nc == NULL) { info = state_for(x); } else { info = state_for(nc); } + if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) { + LIR_Opr obj = new_register(T_OBJECT); + __ move(LIR_OprFact::oopConst(NULL), obj); + __ null_check(obj, new CodeEmitInfo(info)); + } } __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none); } void LIRGenerator::do_LoadIndexed(LoadIndexed* x) { bool use_length = x->length() != NULL; LIRItem array(x->array(), this); LIRItem index(x->index(), this); LIRItem length(this); ! bool needs_range_check = x->compute_needs_range_check(); ! if (use_length && needs_range_check) { length.set_instruction(x->length()); length.load_item(); } array.load_item(); if (index.is_constant() && can_inline_as_constant(x->index())) { // let it be a constant index.dont_load_item();
*** 1908,1924 **** if (nc != NULL) { null_check_info = state_for(nc); } else { null_check_info = range_check_info; } } // emit array address setup early so it schedules better LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false); if (GenerateRangeChecks && needs_range_check) { ! if (use_length) { // TODO: use a (modified) version of array_range_check that does not require a // constant length to be loaded to a register __ cmp(lir_cond_belowEqual, length.result(), index.result()); __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result())); } else { --- 1923,1946 ---- if (nc != NULL) { null_check_info = state_for(nc); } else { null_check_info = range_check_info; } + if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) { + LIR_Opr obj = new_register(T_OBJECT); + __ move(LIR_OprFact::oopConst(NULL), obj); + __ null_check(obj, new CodeEmitInfo(null_check_info)); + } } // emit array address setup early so it schedules better LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false); if (GenerateRangeChecks && needs_range_check) { ! if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) { ! __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result())); ! } else if (use_length) { // TODO: use a (modified) version of array_range_check that does not require a // constant length to be loaded to a register __ cmp(lir_cond_belowEqual, length.result(), index.result()); __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result())); } else {
*** 2632,2652 **** if (method()->is_synchronized() && GenerateSynchronizationCode) { LIR_Opr lock = new_register(T_INT); __ load_stack_address_monitor(0, lock); ! CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL); CodeStub* slow_path = new MonitorEnterStub(obj, lock, info); // receiver is guaranteed non-NULL so don't need CodeEmitInfo __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL); } } // increment invocation counters if needed if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting. ! CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL); increment_invocation_counter(info); } // all blocks with a successor must end with an unconditional jump // to the successor even if they are consecutive --- 2654,2674 ---- if (method()->is_synchronized() && GenerateSynchronizationCode) { LIR_Opr lock = new_register(T_INT); __ load_stack_address_monitor(0, lock); ! CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException)); CodeStub* slow_path = new MonitorEnterStub(obj, lock, info); // receiver is guaranteed non-NULL so don't need CodeEmitInfo __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL); } } // increment invocation counters if needed if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting. ! CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false); increment_invocation_counter(info); } // all blocks with a successor must end with an unconditional jump // to the successor even if they are consecutive
*** 3100,3109 **** --- 3122,3221 ---- } else { __ move(result, rlock_result(x)); } } + void LIRGenerator::do_Assert(Assert *x) { + #ifndef PRODUCT + ValueTag tag = x->x()->type()->tag(); + If::Condition cond = x->cond(); + + LIRItem xitem(x->x(), this); + LIRItem yitem(x->y(), this); + LIRItem* xin = &xitem; + LIRItem* yin = &yitem; + + assert(tag == intTag, "Only integer assertions are valid!"); + + xin->load_item(); + yin->dont_load_item(); + + set_no_result(x); + + LIR_Opr left = xin->result(); + LIR_Opr right = yin->result(); + + __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true); + + #endif + } + + + void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) { + + + Instruction *a = x->x(); + Instruction *b = x->y(); + if (!a || StressRangeCheckElimination) { + assert(!b || StressRangeCheckElimination, "B must also be null"); + + CodeEmitInfo *info = state_for(x, x->state()); + CodeStub* stub = new PredicateFailedStub(info); + + __ jump(stub); + } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) { + int a_int = a->type()->as_IntConstant()->value(); + int b_int = b->type()->as_IntConstant()->value(); + + bool ok = false; + + switch(x->cond()) { + case Instruction::eql: ok = (a_int == b_int); break; + case Instruction::neq: ok = (a_int != b_int); break; + case Instruction::lss: ok = (a_int < b_int); break; + case Instruction::leq: ok = (a_int <= b_int); break; + case Instruction::gtr: ok = (a_int > b_int); break; + case Instruction::geq: ok = (a_int >= b_int); break; + case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break; + case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break; + default: ShouldNotReachHere(); + } + + if (ok) { + + CodeEmitInfo *info = state_for(x, x->state()); + CodeStub* stub = new PredicateFailedStub(info); + + __ jump(stub); + } + } else { + + ValueTag tag = x->x()->type()->tag(); + If::Condition cond = x->cond(); + LIRItem xitem(x->x(), this); + LIRItem yitem(x->y(), this); + LIRItem* xin = &xitem; + LIRItem* yin = &yitem; + + assert(tag == intTag, "Only integer deoptimizations are valid!"); + + xin->load_item(); + yin->dont_load_item(); + set_no_result(x); + + LIR_Opr left = xin->result(); + LIR_Opr right = yin->result(); + + CodeEmitInfo *info = state_for(x, x->state()); + CodeStub* stub = new PredicateFailedStub(info); + + __ cmp(lir_cond(cond), left, right); + __ branch(lir_cond(cond), right->type(), stub); + } + } + + LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) { LIRItemList args(1); LIRItem value(arg1, this); args.append(&value); BasicTypeList signature;