< prev index next >

src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp

Print this page




 299   __ inline_cache_check(receiver, ic_klass);
 300 
 301   // if icache check fails, then jump to runtime routine
 302   // Note: RECEIVER must still contain the receiver!
 303   Label dont;
 304   __ br(Assembler::EQ, dont);
 305   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 306 
 307   // We align the verified entry point unless the method body
 308   // (including its inline cache check) will fit in a single 64-byte
 309   // icache line.
 310   if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) {
 311     // force alignment after the cache check.
 312     __ align(CodeEntryAlignment);
 313   }
 314 
 315   __ bind(dont);
 316   return start_offset;
 317 }
 318 



 319 
 320 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 321   if (o == NULL) {
 322     __ mov(reg, zr);
 323   } else {
 324     __ movoop(reg, o, /*immediate*/true);
 325   }
 326 }
 327 
 328 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
 329   address target = NULL;
 330   relocInfo::relocType reloc_type = relocInfo::none;
 331 
 332   switch (patching_id(info)) {
 333   case PatchingStub::access_field_id:
 334     target = Runtime1::entry_for(Runtime1::access_field_patching_id);
 335     reloc_type = relocInfo::section_word_type;
 336     break;
 337   case PatchingStub::load_klass_id:
 338     target = Runtime1::entry_for(Runtime1::load_klass_patching_id);




 299   __ inline_cache_check(receiver, ic_klass);
 300 
 301   // if icache check fails, then jump to runtime routine
 302   // Note: RECEIVER must still contain the receiver!
 303   Label dont;
 304   __ br(Assembler::EQ, dont);
 305   __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 306 
 307   // We align the verified entry point unless the method body
 308   // (including its inline cache check) will fit in a single 64-byte
 309   // icache line.
 310   if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) {
 311     // force alignment after the cache check.
 312     __ align(CodeEntryAlignment);
 313   }
 314 
 315   __ bind(dont);
 316   return start_offset;
 317 }
 318 
 319 void LIR_Assembler::clinit_barrier(ciMethod* method) {
 320   ShouldNotReachHere(); // not implemented
 321 }
 322 
 323 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 324   if (o == NULL) {
 325     __ mov(reg, zr);
 326   } else {
 327     __ movoop(reg, o, /*immediate*/true);
 328   }
 329 }
 330 
 331 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
 332   address target = NULL;
 333   relocInfo::relocType reloc_type = relocInfo::none;
 334 
 335   switch (patching_id(info)) {
 336   case PatchingStub::access_field_id:
 337     target = Runtime1::entry_for(Runtime1::access_field_patching_id);
 338     reloc_type = relocInfo::section_word_type;
 339     break;
 340   case PatchingStub::load_klass_id:
 341     target = Runtime1::entry_for(Runtime1::load_klass_patching_id);


< prev index next >