src/cpu/x86/vm/c1_LinearScan_x86.cpp

Print this page
rev 4136 : 7153771: array bound check elimination for c1
Summary: when possible optimize out array bound checks, inserting predicates when needed.
Reviewed-by:


 658 void FpuStackAllocator::handle_op2(LIR_Op2* op2) {
 659   LIR_Opr left  = op2->in_opr1();
 660   if (!left->is_float_kind()) {
 661     return;
 662   }
 663   if (left->is_xmm_register()) {
 664     return;
 665   }
 666 
 667   LIR_Opr right = op2->in_opr2();
 668   LIR_Opr res   = op2->result_opr();
 669   LIR_Opr new_left  = left;  // new operands relative to the actual fpu stack top
 670   LIR_Opr new_right = right;
 671   LIR_Opr new_res   = res;
 672 
 673   assert(!left->is_xmm_register() && !right->is_xmm_register() && !res->is_xmm_register(), "not for xmm registers");
 674 
 675   switch (op2->code()) {
 676     case lir_cmp:
 677     case lir_cmp_fd2i:
 678     case lir_ucmp_fd2i: {

 679       assert(left->is_fpu_register(), "invalid LIR");
 680       assert(right->is_fpu_register(), "invalid LIR");
 681 
 682       // the left-hand side must be on top of stack.
 683       // the right-hand side is never popped, even if is_last_use is set
 684       insert_exchange(left);
 685       new_left = to_fpu_stack_top(left);
 686       new_right = to_fpu_stack(right);
 687       pop_if_last_use(op2, left);
 688       break;
 689     }
 690 
 691     case lir_mul_strictfp:
 692     case lir_div_strictfp: {
 693       assert(op2->tmp1_opr()->is_fpu_register(), "strict operations need temporary fpu stack slot");
 694       insert_free_if_dead(op2->tmp1_opr());
 695       assert(sim()->stack_size() <= 7, "at least one stack slot must be free");
 696       // fall-through: continue with the normal handling of lir_mul and lir_div
 697     }
 698     case lir_add:




 658 void FpuStackAllocator::handle_op2(LIR_Op2* op2) {
 659   LIR_Opr left  = op2->in_opr1();
 660   if (!left->is_float_kind()) {
 661     return;
 662   }
 663   if (left->is_xmm_register()) {
 664     return;
 665   }
 666 
 667   LIR_Opr right = op2->in_opr2();
 668   LIR_Opr res   = op2->result_opr();
 669   LIR_Opr new_left  = left;  // new operands relative to the actual fpu stack top
 670   LIR_Opr new_right = right;
 671   LIR_Opr new_res   = res;
 672 
 673   assert(!left->is_xmm_register() && !right->is_xmm_register() && !res->is_xmm_register(), "not for xmm registers");
 674 
 675   switch (op2->code()) {
 676     case lir_cmp:
 677     case lir_cmp_fd2i:
 678     case lir_ucmp_fd2i:
 679     case lir_assert: {
 680       assert(left->is_fpu_register(), "invalid LIR");
 681       assert(right->is_fpu_register(), "invalid LIR");
 682 
 683       // the left-hand side must be on top of stack.
 684       // the right-hand side is never popped, even if is_last_use is set
 685       insert_exchange(left);
 686       new_left = to_fpu_stack_top(left);
 687       new_right = to_fpu_stack(right);
 688       pop_if_last_use(op2, left);
 689       break;
 690     }
 691 
 692     case lir_mul_strictfp:
 693     case lir_div_strictfp: {
 694       assert(op2->tmp1_opr()->is_fpu_register(), "strict operations need temporary fpu stack slot");
 695       insert_free_if_dead(op2->tmp1_opr());
 696       assert(sim()->stack_size() <= 7, "at least one stack slot must be free");
 697       // fall-through: continue with the normal handling of lir_mul and lir_div
 698     }
 699     case lir_add: