< prev index next >

src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp

Print this page
rev 54117 : AArch64: 64-bit Literal Oops


 231   // note: non-blocking leaf routine => no call info needed
 232   Runtime1::StubID exit_id;
 233   if (ce->compilation()->has_fpu_code()) {
 234     exit_id = Runtime1::monitorexit_id;
 235   } else {
 236     exit_id = Runtime1::monitorexit_nofpu_id;
 237   }
 238   __ adr(lr, _continuation);
 239   __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
 240 }
 241 
 242 
 243 // Implementation of patching:
 244 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
 245 // - Replace original code with a call to the stub
 246 // At Runtime:
 247 // - call to stub, jump to runtime
 248 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
 249 // - in runtime: after initializing class, restore original code, reexecute instruction
 250 
 251 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;






 252 
 253 void PatchingStub::align_patch_site(MacroAssembler* masm) {
 254 }
 255 
 256 void PatchingStub::emit_code(LIR_Assembler* ce) {
 257   assert(false, "AArch64 should not use C1 runtime patching");
 258 }
 259 
 260 
 261 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
 262   __ bind(_entry);
 263   ce->store_parameter(_trap_request, 0);
 264   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
 265   ce->add_call_info_here(_info);
 266   DEBUG_ONLY(__ should_not_reach_here());
 267 }
 268 
 269 
 270 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
 271   address a;




 231   // note: non-blocking leaf routine => no call info needed
 232   Runtime1::StubID exit_id;
 233   if (ce->compilation()->has_fpu_code()) {
 234     exit_id = Runtime1::monitorexit_id;
 235   } else {
 236     exit_id = Runtime1::monitorexit_nofpu_id;
 237   }
 238   __ adr(lr, _continuation);
 239   __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
 240 }
 241 
 242 
 243 // Implementation of patching:
 244 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
 245 // - Replace original code with a call to the stub
 246 // At Runtime:
 247 // - call to stub, jump to runtime
 248 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
 249 // - in runtime: after initializing class, restore original code, reexecute instruction
 250 
 251 // On AArch64 we can can generate addresses with 48-bit or 64-bit addresses in
 252 // with 3 or 4 instructions, switchable on the Use64BitLiteralAddresses option.
 253 // The value returned is therefore not a compile-time constant, unlike on other
 254 // platforms.
 255 int patch_info_offset() {
 256   return -NativeGeneralJump::instruction_size;
 257 }
 258 
 259 void PatchingStub::align_patch_site(MacroAssembler* masm) {
 260 }
 261 
 262 void PatchingStub::emit_code(LIR_Assembler* ce) {
 263   assert(false, "AArch64 should not use C1 runtime patching");
 264 }
 265 
 266 
 267 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
 268   __ bind(_entry);
 269   ce->store_parameter(_trap_request, 0);
 270   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
 271   ce->add_call_info_here(_info);
 272   DEBUG_ONLY(__ should_not_reach_here());
 273 }
 274 
 275 
 276 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
 277   address a;


< prev index next >