< prev index next >

src/cpu/aarch32/vm/templateInterpreter_aarch32.cpp

Print this page
rev 8069 : 8164652: aarch32: C1 port


 125 
 126   __ call_VM(noreg,
 127              CAST_FROM_FN_PTR(address,
 128                               InterpreterRuntime::
 129                               throw_ClassCastException),
 130              c_rarg1);
 131   return entry;
 132 }
 133 
 134 address TemplateInterpreterGenerator::generate_exception_handler_common(
 135         const char* name, const char* message, bool pass_oop) {
 136   assert(!pass_oop || message == NULL, "either oop or message but not both");
 137   address entry = __ pc();
 138   if (pass_oop) {
 139     // object is at TOS
 140     __ pop(c_rarg2);
 141   }
 142   // expression stack must be empty before entering the VM if an
 143   // exception happened
 144   __ empty_expression_stack();






 145   // setup parameters
 146   __ lea(c_rarg1, Address((address)name));
 147   if (pass_oop) {
 148     __ call_VM(r0, CAST_FROM_FN_PTR(address,
 149                                     InterpreterRuntime::
 150                                     create_klass_exception),
 151                c_rarg1, c_rarg2);
 152   } else {
 153     // kind of lame ExternalAddress can't take NULL because
 154     // external_word_Relocation will assert.
 155     if (message != NULL) {
 156       __ lea(c_rarg2, Address((address)message));
 157     } else {
 158       __ mov(c_rarg2, NULL_WORD);
 159     }
 160     __ call_VM(r0,
 161                CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
 162                c_rarg1, c_rarg2);
 163   }
 164   // throw exception


 491   __ stop("stack size is zero");
 492   __ bind(stack_size_okay);
 493 #endif
 494 
 495   // Add stack base to locals and subtract stack size
 496   __ sub(rscratch1, rscratch1, rscratch2); // Stack limit
 497   __ add(r0, r0, rscratch1);
 498 
 499   // Use the maximum number of pages we might bang.
 500   const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
 501                                                                               (StackRedPages+StackYellowPages);
 502 
 503   // add in the red and yellow zone sizes
 504   __ add(r0, r0, max_pages * page_size * 2);
 505 
 506   // check against the current stack bottom
 507   __ cmp(sp, r0);
 508   __ b(after_frame_check, Assembler::HI);
 509 
 510   // Remove the incoming args, peeling the machine SP back to where it
 511   // was in the caller.


 512   __ mov(sp, r4);
 513 
 514   // Note: the restored frame is not necessarily interpreted.
 515   // Use the shared runtime version of the StackOverflowError.
 516   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
 517   __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
 518 
 519   // all done with frame size check
 520   __ bind(after_frame_check);
 521 }
 522 
 523 // Allocate monitor and lock method (asm interpreter)
 524 //
 525 // Args:
 526 //      rmethod: Method*
 527 //      rlocals: locals
 528 //
 529 // Kills:
 530 //      r0
 531 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)


1022   // make room for the pushes we're about to do
1023   //__ sub(rscratch1, sp, 4 * wordSize);
1024   //__ bic(sp, rscratch1, 0xf);
1025   // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1026   // in order to extract the result of a method call. If the order of these
1027   // pushes change or anything else is added to the stack then the code in
1028   // interpreter_frame_result must also change.
1029   __ reg_printf("Before push dtos, ltos. sp = %p\n", sp);
1030   __ push(dtos);
1031   __ push(ltos);
1032 
1033   // change thread state
1034   __ mov(rscratch1, _thread_in_native_trans);
1035   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1036   __ dmb(Assembler::ISH);
1037   __ str(rscratch1, Address(rscratch2));
1038   __ reg_printf("before os::is_MP\n");
1039   if (os::is_MP()) {
1040     if (UseMembar) {
1041       // Force this write out before the read below
1042       __ dsb(Assembler::SY);
1043     } else {
1044       // Write serialization page so VM thread can do a pseudo remote membar.
1045       // We use the current thread pointer to calculate a thread specific
1046       // offset to write to within the page. This minimizes bus traffic
1047       // due to cache line collision.
1048       __ serialize_memory(rthread, rscratch2);
1049     }
1050   }
1051   __ reg_printf("after os::is_MP\n");
1052   // check for safepoint operation in progress and/or pending suspend requests
1053   {
1054     Label Continue;
1055     __ lea(rscratch2, ExternalAddress(SafepointSynchronize::address_of_state()));
1056     assert(SafepointSynchronize::_not_synchronized == 0,
1057            "SafepointSynchronize::_not_synchronized");
1058     __ ldr(rscratch2, rscratch2);
1059     Label L;
1060     __ cbnz(rscratch2, L);
1061     __ ldr(rscratch2, Address(rthread, JavaThread::suspend_flags_offset()));
1062     __ cbz(rscratch2, Continue);


1975 }
1976 
1977 //-----------------------------------------------------------------------------
1978 // Generation of individual instructions
1979 
1980 // helpers for generate_and_dispatch
1981 
1982 
1983 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
1984   : TemplateInterpreterGenerator(code) {
1985    generate_all(); // down here so it can be "virtual"
1986 }
1987 
1988 //-----------------------------------------------------------------------------
1989 
1990 // Non-product code
1991 #ifndef PRODUCT
1992 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1993   address entry = __ pc();
1994 
1995   __ push(lr);
1996   __ push(state);
1997   __ push(RegSet::range(r0, r12), sp);

1998   __ mov(c_rarg2, r0);  // Pass itos
1999   __ mov(c_rarg3, r1);  // Pass ltos/dtos high part
2000   __ call_VM(noreg,
2001              CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
2002              c_rarg1, c_rarg2, c_rarg3);
2003   __ pop(RegSet::range(r0, r12), sp);
2004   __ pop(state);
2005   __ pop(lr);
2006   __ b(lr);                                   // return from result handler
2007 
2008   return entry;
2009 }
2010 
2011 void TemplateInterpreterGenerator::count_bytecode() {
2012   __ push(c_rarg0);
2013   __ push(rscratch1);
2014   __ push(rscratch2);
2015   Label L;
2016   __ mov(rscratch2, (address) &BytecodeCounter::_counter_value);
2017   __ bind(L);
2018   __ ldrex(rscratch1, rscratch2);
2019   __ add(rscratch1, rscratch1, 1);
2020   // strex stores 2nd arg to dest adressed by 3rd arg,
2021   // stores status to 1st arg. So, 1st and 2nd shoud be different.
2022   __ strex(c_rarg0, rscratch1, rscratch2);
2023   __ cmp(c_rarg0, 0);
2024   __ b(L, Assembler::NE);
2025   __ pop(rscratch2);




 125 
 126   __ call_VM(noreg,
 127              CAST_FROM_FN_PTR(address,
 128                               InterpreterRuntime::
 129                               throw_ClassCastException),
 130              c_rarg1);
 131   return entry;
 132 }
 133 
 134 address TemplateInterpreterGenerator::generate_exception_handler_common(
 135         const char* name, const char* message, bool pass_oop) {
 136   assert(!pass_oop || message == NULL, "either oop or message but not both");
 137   address entry = __ pc();
 138   if (pass_oop) {
 139     // object is at TOS
 140     __ pop(c_rarg2);
 141   }
 142   // expression stack must be empty before entering the VM if an
 143   // exception happened
 144   __ empty_expression_stack();
 145   // FIXME shouldn't it be in rest of generate_* ?
 146   // rdispatch assumed to cache dispatch table. This code can be called from
 147   // signal handler, so it can't assume execption caller preserved the register,
 148   // so restore it here
 149   __ get_dispatch();
 150   // FIXME shouldn't get_method be here ?
 151   // setup parameters
 152   __ lea(c_rarg1, Address((address)name));
 153   if (pass_oop) {
 154     __ call_VM(r0, CAST_FROM_FN_PTR(address,
 155                                     InterpreterRuntime::
 156                                     create_klass_exception),
 157                c_rarg1, c_rarg2);
 158   } else {
 159     // kind of lame ExternalAddress can't take NULL because
 160     // external_word_Relocation will assert.
 161     if (message != NULL) {
 162       __ lea(c_rarg2, Address((address)message));
 163     } else {
 164       __ mov(c_rarg2, NULL_WORD);
 165     }
 166     __ call_VM(r0,
 167                CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
 168                c_rarg1, c_rarg2);
 169   }
 170   // throw exception


 497   __ stop("stack size is zero");
 498   __ bind(stack_size_okay);
 499 #endif
 500 
 501   // Add stack base to locals and subtract stack size
 502   __ sub(rscratch1, rscratch1, rscratch2); // Stack limit
 503   __ add(r0, r0, rscratch1);
 504 
 505   // Use the maximum number of pages we might bang.
 506   const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
 507                                                                               (StackRedPages+StackYellowPages);
 508 
 509   // add in the red and yellow zone sizes
 510   __ add(r0, r0, max_pages * page_size * 2);
 511 
 512   // check against the current stack bottom
 513   __ cmp(sp, r0);
 514   __ b(after_frame_check, Assembler::HI);
 515 
 516   // Remove the incoming args, peeling the machine SP back to where it
 517   // was in the caller.  This is not strictly necessary, but unless we
 518   // do so the stack frame may have a garbage FP; this ensures a
 519   // correct call stack that we can always unwind.
 520   __ mov(sp, r4);
 521 
 522   // Note: the restored frame is not necessarily interpreted.
 523   // Use the shared runtime version of the StackOverflowError.
 524   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
 525   __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
 526 
 527   // all done with frame size check
 528   __ bind(after_frame_check);
 529 }
 530 
 531 // Allocate monitor and lock method (asm interpreter)
 532 //
 533 // Args:
 534 //      rmethod: Method*
 535 //      rlocals: locals
 536 //
 537 // Kills:
 538 //      r0
 539 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)


1030   // make room for the pushes we're about to do
1031   //__ sub(rscratch1, sp, 4 * wordSize);
1032   //__ bic(sp, rscratch1, 0xf);
1033   // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1034   // in order to extract the result of a method call. If the order of these
1035   // pushes change or anything else is added to the stack then the code in
1036   // interpreter_frame_result must also change.
1037   __ reg_printf("Before push dtos, ltos. sp = %p\n", sp);
1038   __ push(dtos);
1039   __ push(ltos);
1040 
1041   // change thread state
1042   __ mov(rscratch1, _thread_in_native_trans);
1043   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1044   __ dmb(Assembler::ISH);
1045   __ str(rscratch1, Address(rscratch2));
1046   __ reg_printf("before os::is_MP\n");
1047   if (os::is_MP()) {
1048     if (UseMembar) {
1049       // Force this write out before the read below
1050       __ membar(Assembler::AnyAny);
1051     } else {
1052       // Write serialization page so VM thread can do a pseudo remote membar.
1053       // We use the current thread pointer to calculate a thread specific
1054       // offset to write to within the page. This minimizes bus traffic
1055       // due to cache line collision.
1056       __ serialize_memory(rthread, rscratch2);
1057     }
1058   }
1059   __ reg_printf("after os::is_MP\n");
1060   // check for safepoint operation in progress and/or pending suspend requests
1061   {
1062     Label Continue;
1063     __ lea(rscratch2, ExternalAddress(SafepointSynchronize::address_of_state()));
1064     assert(SafepointSynchronize::_not_synchronized == 0,
1065            "SafepointSynchronize::_not_synchronized");
1066     __ ldr(rscratch2, rscratch2);
1067     Label L;
1068     __ cbnz(rscratch2, L);
1069     __ ldr(rscratch2, Address(rthread, JavaThread::suspend_flags_offset()));
1070     __ cbz(rscratch2, Continue);


1983 }
1984 
1985 //-----------------------------------------------------------------------------
1986 // Generation of individual instructions
1987 
1988 // helpers for generate_and_dispatch
1989 
1990 
1991 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
1992   : TemplateInterpreterGenerator(code) {
1993    generate_all(); // down here so it can be "virtual"
1994 }
1995 
1996 //-----------------------------------------------------------------------------
1997 
1998 // Non-product code
1999 #ifndef PRODUCT
2000 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
2001   address entry = __ pc();
2002 

2003   __ push(state);
2004   // Save all registers on stack, so omit SP and PC
2005   __ push(RegSet::range(r0, r12) + lr, sp);
2006   __ mov(c_rarg2, r0);  // Pass itos
2007   __ mov(c_rarg3, r1);  // Pass ltos/dtos high part
2008   __ call_VM(noreg,
2009              CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
2010              c_rarg1, c_rarg2, c_rarg3);
2011   __ pop(RegSet::range(r0, r12) + lr, sp);
2012   __ pop(state);

2013   __ b(lr);                                   // return from result handler
2014 
2015   return entry;
2016 }
2017 
2018 void TemplateInterpreterGenerator::count_bytecode() {
2019   __ push(c_rarg0);
2020   __ push(rscratch1);
2021   __ push(rscratch2);
2022   Label L;
2023   __ mov(rscratch2, (address) &BytecodeCounter::_counter_value);
2024   __ bind(L);
2025   __ ldrex(rscratch1, rscratch2);
2026   __ add(rscratch1, rscratch1, 1);
2027   // strex stores 2nd arg to dest adressed by 3rd arg,
2028   // stores status to 1st arg. So, 1st and 2nd shoud be different.
2029   __ strex(c_rarg0, rscratch1, rscratch2);
2030   __ cmp(c_rarg0, 0);
2031   __ b(L, Assembler::NE);
2032   __ pop(rscratch2);


< prev index next >