< prev index next >

src/cpu/aarch64/vm/templateTable_aarch64.cpp

Print this page
rev 13098 : 8182161: aarch64: combine andr+cbnz into tbnz when possible
Summary: Combine andr+cbnz into tbnz when possible to save one instruction
Reviewed-by: aph


 229   switch (bc) {
 230   case Bytecodes::_fast_aputfield:
 231   case Bytecodes::_fast_bputfield:
 232   case Bytecodes::_fast_zputfield:
 233   case Bytecodes::_fast_cputfield:
 234   case Bytecodes::_fast_dputfield:
 235   case Bytecodes::_fast_fputfield:
 236   case Bytecodes::_fast_iputfield:
 237   case Bytecodes::_fast_lputfield:
 238   case Bytecodes::_fast_sputfield:
 239     {
 240       // We skip bytecode quickening for putfield instructions when
 241       // the put_code written to the constant pool cache is zero.
 242       // This is required so that every execution of this instruction
 243       // calls out to InterpreterRuntime::resolve_get_put to do
 244       // additional, required work.
 245       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 246       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 247       __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
 248       __ movw(bc_reg, bc);
 249       __ cmpw(temp_reg, (unsigned) 0);
 250       __ br(Assembler::EQ, L_patch_done);  // don't patch
 251     }
 252     break;
 253   default:
 254     assert(byte_no == -1, "sanity");
 255     // the pair bytecodes have already done the load.
 256     if (load_bc_into_bc_reg) {
 257       __ movw(bc_reg, bc);
 258     }
 259   }
 260 
 261   if (JvmtiExport::can_post_breakpoint()) {
 262     Label L_fast_patch;
 263     // if a breakpoint is present we can't rewrite the stream directly
 264     __ load_unsigned_byte(temp_reg, at_bcp(0));
 265     __ cmpw(temp_reg, Bytecodes::_breakpoint);
 266     __ br(Assembler::NE, L_fast_patch);
 267     // Let breakpoint table handling rewrite to quicker bytecode
 268     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), rmethod, rbcp, bc_reg);
 269     __ b(L_patch_done);
 270     __ bind(L_fast_patch);




 229   switch (bc) {
 230   case Bytecodes::_fast_aputfield:
 231   case Bytecodes::_fast_bputfield:
 232   case Bytecodes::_fast_zputfield:
 233   case Bytecodes::_fast_cputfield:
 234   case Bytecodes::_fast_dputfield:
 235   case Bytecodes::_fast_fputfield:
 236   case Bytecodes::_fast_iputfield:
 237   case Bytecodes::_fast_lputfield:
 238   case Bytecodes::_fast_sputfield:
 239     {
 240       // We skip bytecode quickening for putfield instructions when
 241       // the put_code written to the constant pool cache is zero.
 242       // This is required so that every execution of this instruction
 243       // calls out to InterpreterRuntime::resolve_get_put to do
 244       // additional, required work.
 245       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 246       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 247       __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
 248       __ movw(bc_reg, bc);
 249       __ cbzw(temp_reg, L_patch_done);  // don't patch

 250     }
 251     break;
 252   default:
 253     assert(byte_no == -1, "sanity");
 254     // the pair bytecodes have already done the load.
 255     if (load_bc_into_bc_reg) {
 256       __ movw(bc_reg, bc);
 257     }
 258   }
 259 
 260   if (JvmtiExport::can_post_breakpoint()) {
 261     Label L_fast_patch;
 262     // if a breakpoint is present we can't rewrite the stream directly
 263     __ load_unsigned_byte(temp_reg, at_bcp(0));
 264     __ cmpw(temp_reg, Bytecodes::_breakpoint);
 265     __ br(Assembler::NE, L_fast_patch);
 266     // Let breakpoint table handling rewrite to quicker bytecode
 267     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), rmethod, rbcp, bc_reg);
 268     __ b(L_patch_done);
 269     __ bind(L_fast_patch);


< prev index next >