src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp

Print this page




 507                                                                               (StackRedPages+StackYellowPages);
 508 
 509   // add in the red and yellow zone sizes
 510   __ add(r0, r0, max_pages * page_size * 2);
 511 
 512   // check against the current stack bottom
 513   __ cmp(sp, r0);
 514   __ br(Assembler::HI, after_frame_check);
 515 
 516   // Remove the incoming args, peeling the machine SP back to where it
 517   // was in the caller.  This is not strictly necessary, but unless we
 518   // do so the stack frame may have a garbage FP; this ensures a
 519   // correct call stack that we can always unwind.  The ANDR should be
 520   // unnecessary because the sender SP in r13 is always aligned, but
 521   // it doesn't hurt.
 522   __ andr(sp, r13, -16);
 523 
 524   // Note: the restored frame is not necessarily interpreted.
 525   // Use the shared runtime version of the StackOverflowError.
 526   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
 527   __ b(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
 528 
 529   // all done with frame size check
 530   __ bind(after_frame_check);
 531 }
 532 
 533 // Allocate monitor and lock method (asm interpreter)
 534 //
 535 // Args:
 536 //      rmethod: Method*
 537 //      rlocals: locals
 538 //
 539 // Kills:
 540 //      r0
 541 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
 542 //      rscratch1, rscratch2 (scratch regs)
 543 void InterpreterGenerator::lock_method(void) {
 544   // synchronize method
 545   const Address access_flags(rmethod, Method::access_flags_offset());
 546   const Address monitor_block_top(
 547         rfp,




 507                                                                               (StackRedPages+StackYellowPages);
 508 
 509   // add in the red and yellow zone sizes
 510   __ add(r0, r0, max_pages * page_size * 2);
 511 
 512   // check against the current stack bottom
 513   __ cmp(sp, r0);
 514   __ br(Assembler::HI, after_frame_check);
 515 
 516   // Remove the incoming args, peeling the machine SP back to where it
 517   // was in the caller.  This is not strictly necessary, but unless we
 518   // do so the stack frame may have a garbage FP; this ensures a
 519   // correct call stack that we can always unwind.  The ANDR should be
 520   // unnecessary because the sender SP in r13 is always aligned, but
 521   // it doesn't hurt.
 522   __ andr(sp, r13, -16);
 523 
 524   // Note: the restored frame is not necessarily interpreted.
 525   // Use the shared runtime version of the StackOverflowError.
 526   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
 527   __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
 528 
 529   // all done with frame size check
 530   __ bind(after_frame_check);
 531 }
 532 
 533 // Allocate monitor and lock method (asm interpreter)
 534 //
 535 // Args:
 536 //      rmethod: Method*
 537 //      rlocals: locals
 538 //
 539 // Kills:
 540 //      r0
 541 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
 542 //      rscratch1, rscratch2 (scratch regs)
 543 void InterpreterGenerator::lock_method(void) {
 544   // synchronize method
 545   const Address access_flags(rmethod, Method::access_flags_offset());
 546   const Address monitor_block_top(
 547         rfp,