src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Sdiff src/cpu/aarch64/vm

src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp

Print this page




 358 // This specifies the rsp decrement needed to build the frame
 359 int LIR_Assembler::initial_frame_size_in_bytes() const {
 360   // if rounding, must let FrameMap know!
 361 
 362   // The frame_map records size in slots (32bit word)
 363 
 364   // subtract two words to account for return address and link
 365   return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word))  * VMRegImpl::stack_slot_size;
 366 }
 367 
 368 
 369 int LIR_Assembler::emit_exception_handler() {
 370   // if the last instruction is a call (typically to do a throw which
 371   // is coming at the end after block reordering) the return address
 372   // must still point into the code area in order to avoid assertion
 373   // failures when searching for the corresponding bci => add a nop
 374   // (was bug 5/14/1999 - gri)
 375   __ nop();
 376 
 377   // generate code for exception handler
 378   address handler_base = __ start_a_stub(exception_handler_size);
 379   if (handler_base == NULL) {
 380     // not enough space left for the handler
 381     bailout("exception handler overflow");
 382     return -1;
 383   }
 384 
 385   int offset = code_offset();
 386 
 387   // the exception oop and pc are in r0, and r3
 388   // no other registers need to be preserved, so invalidate them
 389   __ invalidate_registers(false, true, true, false, true, true);
 390 
 391   // check that there is really an exception
 392   __ verify_not_null_oop(r0);
 393 
 394   // search an exception handler (r0: exception oop, r3: throwing pc)
 395   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));  __ should_not_reach_here();
 396   guarantee(code_offset() - offset <= exception_handler_size, "overflow");
 397   __ end_a_stub();
 398 
 399   return offset;
 400 }
 401 
 402 
 403 // Emit the code to remove the frame from the stack in the exception
 404 // unwind path.
 405 int LIR_Assembler::emit_unwind_handler() {
 406 #ifndef PRODUCT
 407   if (CommentedAssembly) {
 408     _masm->block_comment("Unwind handler");
 409   }
 410 #endif
 411 
 412   int offset = code_offset();
 413 
 414   // Fetch the exception from TLS and clear out exception related thread state
 415   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
 416   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));


 450   __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 451 
 452   // Emit the slow path assembly
 453   if (stub != NULL) {
 454     stub->emit_code(this);
 455   }
 456 
 457   return offset;
 458 }
 459 
 460 
 461 int LIR_Assembler::emit_deopt_handler() {
 462   // if the last instruction is a call (typically to do a throw which
 463   // is coming at the end after block reordering) the return address
 464   // must still point into the code area in order to avoid assertion
 465   // failures when searching for the corresponding bci => add a nop
 466   // (was bug 5/14/1999 - gri)
 467   __ nop();
 468 
 469   // generate code for exception handler
 470   address handler_base = __ start_a_stub(deopt_handler_size);
 471   if (handler_base == NULL) {
 472     // not enough space left for the handler
 473     bailout("deopt handler overflow");
 474     return -1;
 475   }
 476 
 477   int offset = code_offset();
 478 
 479   __ adr(lr, pc());
 480   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 481   guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
 482   __ end_a_stub();
 483 
 484   return offset;
 485 }
 486 
 487 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 488   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 489   int pc_offset = code_offset();
 490   flush_debug_info(pc_offset);
 491   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 492   if (info->exception_handlers() != NULL) {
 493     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 494   }
 495 }
 496 
 497 // Rather than take a segfault when the polling page is protected,
 498 // explicitly check for a safepoint in progress and if there is one,
 499 // fake a call to the handler as if a segfault had been caught.
 500 void LIR_Assembler::poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info) {
 501   __ mov(rscratch1, SafepointSynchronize::address_of_state());


1984 
1985 
1986 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
1987   address call = __ ic_call(op->addr());
1988   if (call == NULL) {
1989     bailout("trampoline stub overflow");
1990     return;
1991   }
1992   add_call_info(code_offset(), op->info());
1993 }
1994 
1995 
1996 /* Currently, vtable-dispatch is only enabled for sparc platforms */
1997 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
1998   ShouldNotReachHere();
1999 }
2000 
2001 
2002 void LIR_Assembler::emit_static_call_stub() {
2003   address call_pc = __ pc();
2004   address stub = __ start_a_stub(call_stub_size);
2005   if (stub == NULL) {
2006     bailout("static call stub overflow");
2007     return;
2008   }
2009 
2010   int start = __ offset();
2011 
2012   __ relocate(static_stub_Relocation::spec(call_pc));
2013   __ mov_metadata(rmethod, (Metadata*)NULL);
2014   __ movptr(rscratch1, 0);
2015   __ br(rscratch1);
2016 
2017   assert(__ offset() - start <= call_stub_size, "stub too big");
2018   __ end_a_stub();
2019 }
2020 
2021 
2022 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2023   assert(exceptionOop->as_register() == r0, "must match");
2024   assert(exceptionPC->as_register() == r3, "must match");
2025 
2026   // exception object is not added to oop map by LinearScan
2027   // (LinearScan assumes that no oops are in fixed registers)
2028   info->add_register_oop(exceptionOop);
2029   Runtime1::StubID unwind_id;
2030 
2031   // get current pc information
2032   // pc is only needed if the method has an exception handler, the unwind code does not need it.
2033   int pc_for_athrow_offset = __ offset();
2034   InternalAddress pc_for_athrow(__ pc());
2035   __ adr(exceptionPC->as_register(), pc_for_athrow);
2036   add_call_info(pc_for_athrow_offset, info); // for exception handler
2037 




 358 // This specifies the rsp decrement needed to build the frame
 359 int LIR_Assembler::initial_frame_size_in_bytes() const {
 360   // if rounding, must let FrameMap know!
 361 
 362   // The frame_map records size in slots (32bit word)
 363 
 364   // subtract two words to account for return address and link
 365   return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word))  * VMRegImpl::stack_slot_size;
 366 }
 367 
 368 
 369 int LIR_Assembler::emit_exception_handler() {
 370   // if the last instruction is a call (typically to do a throw which
 371   // is coming at the end after block reordering) the return address
 372   // must still point into the code area in order to avoid assertion
 373   // failures when searching for the corresponding bci => add a nop
 374   // (was bug 5/14/1999 - gri)
 375   __ nop();
 376 
 377   // generate code for exception handler
 378   address handler_base = __ start_a_stub(exception_handler_size());
 379   if (handler_base == NULL) {
 380     // not enough space left for the handler
 381     bailout("exception handler overflow");
 382     return -1;
 383   }
 384 
 385   int offset = code_offset();
 386 
 387   // the exception oop and pc are in r0, and r3
 388   // no other registers need to be preserved, so invalidate them
 389   __ invalidate_registers(false, true, true, false, true, true);
 390 
 391   // check that there is really an exception
 392   __ verify_not_null_oop(r0);
 393 
 394   // search an exception handler (r0: exception oop, r3: throwing pc)
 395   __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));  __ should_not_reach_here();
 396   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
 397   __ end_a_stub();
 398 
 399   return offset;
 400 }
 401 
 402 
 403 // Emit the code to remove the frame from the stack in the exception
 404 // unwind path.
 405 int LIR_Assembler::emit_unwind_handler() {
 406 #ifndef PRODUCT
 407   if (CommentedAssembly) {
 408     _masm->block_comment("Unwind handler");
 409   }
 410 #endif
 411 
 412   int offset = code_offset();
 413 
 414   // Fetch the exception from TLS and clear out exception related thread state
 415   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
 416   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));


 450   __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 451 
 452   // Emit the slow path assembly
 453   if (stub != NULL) {
 454     stub->emit_code(this);
 455   }
 456 
 457   return offset;
 458 }
 459 
 460 
 461 int LIR_Assembler::emit_deopt_handler() {
 462   // if the last instruction is a call (typically to do a throw which
 463   // is coming at the end after block reordering) the return address
 464   // must still point into the code area in order to avoid assertion
 465   // failures when searching for the corresponding bci => add a nop
 466   // (was bug 5/14/1999 - gri)
 467   __ nop();
 468 
 469   // generate code for exception handler
 470   address handler_base = __ start_a_stub(deopt_handler_size());
 471   if (handler_base == NULL) {
 472     // not enough space left for the handler
 473     bailout("deopt handler overflow");
 474     return -1;
 475   }
 476 
 477   int offset = code_offset();
 478 
 479   __ adr(lr, pc());
 480   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 481   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 482   __ end_a_stub();
 483 
 484   return offset;
 485 }
 486 
 487 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 488   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 489   int pc_offset = code_offset();
 490   flush_debug_info(pc_offset);
 491   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 492   if (info->exception_handlers() != NULL) {
 493     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 494   }
 495 }
 496 
 497 // Rather than take a segfault when the polling page is protected,
 498 // explicitly check for a safepoint in progress and if there is one,
 499 // fake a call to the handler as if a segfault had been caught.
 500 void LIR_Assembler::poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info) {
 501   __ mov(rscratch1, SafepointSynchronize::address_of_state());


1984 
1985 
1986 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
1987   address call = __ ic_call(op->addr());
1988   if (call == NULL) {
1989     bailout("trampoline stub overflow");
1990     return;
1991   }
1992   add_call_info(code_offset(), op->info());
1993 }
1994 
1995 
1996 /* Currently, vtable-dispatch is only enabled for sparc platforms */
1997 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
1998   ShouldNotReachHere();
1999 }
2000 
2001 
2002 void LIR_Assembler::emit_static_call_stub() {
2003   address call_pc = __ pc();
2004   address stub = __ start_a_stub(call_stub_size());
2005   if (stub == NULL) {
2006     bailout("static call stub overflow");
2007     return;
2008   }
2009 
2010   int start = __ offset();
2011 
2012   __ relocate(static_stub_Relocation::spec(call_pc));
2013   __ mov_metadata(rmethod, (Metadata*)NULL);
2014   __ movptr(rscratch1, 0);
2015   __ br(rscratch1);
2016 
2017   assert(__ offset() - start <= call_stub_size(), "stub too big");
2018   __ end_a_stub();
2019 }
2020 
2021 
2022 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2023   assert(exceptionOop->as_register() == r0, "must match");
2024   assert(exceptionPC->as_register() == r3, "must match");
2025 
2026   // exception object is not added to oop map by LinearScan
2027   // (LinearScan assumes that no oops are in fixed registers)
2028   info->add_register_oop(exceptionOop);
2029   Runtime1::StubID unwind_id;
2030 
2031   // get current pc information
2032   // pc is only needed if the method has an exception handler, the unwind code does not need it.
2033   int pc_for_athrow_offset = __ offset();
2034   InternalAddress pc_for_athrow(__ pc());
2035   __ adr(exceptionPC->as_register(), pc_for_athrow);
2036   add_call_info(pc_for_athrow_offset, info); // for exception handler
2037 


src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File