1 /*
   2  * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2015, Linaro Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "interp_masm_aarch32.hpp"
  30 #include "interpreter/bytecodeHistogram.hpp"
  31 #include "interpreter/bytecodeTracer.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interpreterGenerator.hpp"
  34 #include "interpreter/interpreterRuntime.hpp"
  35 #include "interpreter/templateTable.hpp"
  36 #include "oops/arrayOop.hpp"
  37 #include "oops/method.hpp"
  38 #include "oops/methodData.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "prims/jvmtiExport.hpp"
  41 #include "prims/jvmtiThreadState.hpp"
  42 #include "runtime/arguments.hpp"
  43 #include "runtime/deoptimization.hpp"
  44 #include "runtime/frame.inline.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "runtime/stubRoutines.hpp"
  47 #include "runtime/synchronizer.hpp"
  48 #include "runtime/timer.hpp"
  49 #include "runtime/vframeArray.hpp"
  50 #include "utilities/debug.hpp"
  51 
  52 #include <sys/types.h>
  53 
  54 #ifndef PRODUCT
  55 #include "oops/method.hpp"
  56 #endif // !PRODUCT
  57 
  58 #define __ _masm->
  59 
  60 #ifndef CC_INTERP
  61 
  62 //-----------------------------------------------------------------------------
  63 
  64 extern "C" void entry(CodeBuffer*);
  65 
  66 //-----------------------------------------------------------------------------
  67 
  68 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
  69   address entry = __ pc();
  70 
  71 #ifdef ASSERT
  72   {
  73     Label L;
  74     __ ldr(rscratch1, Address(rfp,
  75                        frame::interpreter_frame_monitor_block_top_offset *
  76                        wordSize));
  77     __ mov(rscratch2, sp);
  78     __ cmp(rscratch1, rscratch2); // maximal rsp for current rfp (stack
  79                            // grows negative)
  80     __ b(L, Assembler::HS); // check if frame is complete
  81     __ stop ("interpreter frame not set up");
  82     __ bind(L);
  83   }
  84 #endif // ASSERT
  85   // Restore bcp under the assumption that the current frame is still
  86   // interpreted
  87   __ restore_bcp();
  88 
  89   // expression stack must be empty before entering the VM if an
  90   // exception happened
  91   __ empty_expression_stack();
  92   // throw exception
  93   __ call_VM(noreg,
  94              CAST_FROM_FN_PTR(address,
  95                               InterpreterRuntime::throw_StackOverflowError));
  96   return entry;
  97 }
  98 
  99 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
 100         const char* name) {
 101   address entry = __ pc();
 102   // expression stack must be empty before entering the VM if an
 103   // exception happened
 104   __ empty_expression_stack();
 105   // setup parameters
 106   // ??? convention: expect aberrant index in register r2
 107   __ mov(c_rarg1, (address)name);
 108   __ call_VM(noreg,
 109              CAST_FROM_FN_PTR(address,
 110                               InterpreterRuntime::
 111                               throw_ArrayIndexOutOfBoundsException),
 112              c_rarg1, c_rarg2);
 113   return entry;
 114 }
 115 
 116 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
 117   address entry = __ pc();
 118 
 119   // object is at TOS
 120   __ pop(c_rarg1);
 121 
 122   // expression stack must be empty before entering the VM if an
 123   // exception happened
 124   __ empty_expression_stack();
 125 
 126   __ call_VM(noreg,
 127              CAST_FROM_FN_PTR(address,
 128                               InterpreterRuntime::
 129                               throw_ClassCastException),
 130              c_rarg1);
 131   return entry;
 132 }
 133 
 134 address TemplateInterpreterGenerator::generate_exception_handler_common(
 135         const char* name, const char* message, bool pass_oop) {
 136   assert(!pass_oop || message == NULL, "either oop or message but not both");
 137   address entry = __ pc();
 138   if (pass_oop) {
 139     // object is at TOS
 140     __ pop(c_rarg2);
 141   }
 142   // expression stack must be empty before entering the VM if an
 143   // exception happened
 144   __ empty_expression_stack();
 145   // FIXME shouldn't it be in rest of generate_* ?
 146   // rdispatch assumed to cache dispatch table. This code can be called from
 147   // signal handler, so it can't assume execption caller preserved the register,
 148   // so restore it here
 149   __ get_dispatch();
 150   // FIXME shouldn't get_method be here ?
 151   // setup parameters
 152   __ lea(c_rarg1, Address((address)name));
 153   if (pass_oop) {
 154     __ call_VM(r0, CAST_FROM_FN_PTR(address,
 155                                     InterpreterRuntime::
 156                                     create_klass_exception),
 157                c_rarg1, c_rarg2);
 158   } else {
 159     // kind of lame ExternalAddress can't take NULL because
 160     // external_word_Relocation will assert.
 161     if (message != NULL) {
 162       __ lea(c_rarg2, Address((address)message));
 163     } else {
 164       __ mov(c_rarg2, NULL_WORD);
 165     }
 166     __ call_VM(r0,
 167                CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
 168                c_rarg1, c_rarg2);
 169   }
 170   // throw exception
 171   __ b(address(Interpreter::throw_exception_entry()));
 172   return entry;
 173 }
 174 
 175 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
 176   address entry = __ pc();
 177   // NULL last_sp until next java call
 178   __ mov(rscratch1, 0);
 179   __ str(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 180   __ dispatch_next(state);
 181   return entry;
 182 }
 183 
 184 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
 185   address entry = __ pc();
 186 
 187   __ print_method_exit();
 188   __ reg_printf("A. return_entry <r1:r0> : 0x%08x%08x\n", r1, r0);
 189 
 190   // Restore stack bottom in case i2c adjusted stack
 191   __ ldr(sp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 192   // and NULL it as marker that sp is now tos until next java call
 193   __ mov(rscratch1, 0);
 194   __ str(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 195   __ reg_printf("B. return_entry <r1:r0> : 0x%08x%08x\n", r1, r0);
 196   __ restore_bcp();
 197   __ restore_locals();
 198   __ restore_constant_pool_cache();
 199   __ get_method(rmethod);
 200   __ reg_printf("C. return_entry <r1:r0> : 0x%08x%08x\n", r1, r0);
 201 
 202   // Pop N words from the stack
 203   __ get_cache_and_index_at_bcp(r3, r2, 1, index_size);
 204   __ reg_printf("D. return_entry <r1:r0> : 0x%08x%08x\n", r1, r0);
 205   __ ldr(r3, Address(r3, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
 206   __ andr(r3, r3, ConstantPoolCacheEntry::parameter_size_mask);
 207 
 208   __ add(sp, sp, r3, lsl(2));
 209 
 210   // Restore machine SP
 211   /*__ ldr(rscratch1, Address(rmethod, Method::const_offset()));
 212   __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
 213   __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
 214   __ ldr(rscratch2,
 215          Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
 216   __ sub(rscratch1, rscratch2, rscratch1, lsl(2));
 217   __ bic(sp, rscratch1, 0xf);*/
 218 
 219   __ get_dispatch();
 220   __ reg_printf("E. return_entry <r1:r0> : 0x%08x%08x\n", r1, r0);
 221   __ dispatch_next(state, step);
 222 
 223   return entry;
 224 }
 225 
 226 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
 227                                                                int step) {
 228   address entry = __ pc();
 229   __ restore_bcp();
 230   __ restore_locals();
 231   __ restore_constant_pool_cache();
 232   __ get_method(rmethod);
 233 
 234   // handle exceptions
 235   {
 236     Label L;
 237     __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
 238     __ cbz(rscratch1, L);
 239     __ call_VM(noreg,
 240                CAST_FROM_FN_PTR(address,
 241                                 InterpreterRuntime::throw_pending_exception));
 242     __ should_not_reach_here();
 243     __ bind(L);
 244   }
 245 
 246   __ get_dispatch();
 247 
 248   // Calculate stack limit
 249   __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
 250   __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
 251   __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
 252   __ ldr(rscratch2,
 253          Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
 254   __ sub(rscratch1, rscratch2, rscratch1, lsl(2));
 255   __ bic(sp, rscratch1, 0xf);
 256 
 257   // Restore expression stack pointer
 258   __ ldr(sp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 259   // NULL last_sp until next java call
 260   __ mov(rscratch1, 0);
 261   __ str(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 262 
 263   __ dispatch_next(state, step);
 264   return entry;
 265 }
 266 
 267 
 268 int AbstractInterpreter::BasicType_as_index(BasicType type) {
 269   int i = 0;
 270   switch (type) {
 271     case T_BOOLEAN: i = 0; break;
 272     case T_CHAR   : i = 1; break;
 273     case T_BYTE   : i = 2; break;
 274     case T_SHORT  : i = 3; break;
 275     case T_INT    : i = 4; break;
 276     case T_LONG   : i = 5; break;
 277     case T_VOID   : i = 6; break;
 278     case T_FLOAT  : i = 7; break;
 279     case T_DOUBLE : i = 8; break;
 280     case T_OBJECT : i = 9; break;
 281     case T_ARRAY  : i = 9; break;
 282     default       : ShouldNotReachHere();
 283   }
 284   assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
 285          "index out of bounds");
 286   return i;
 287 }
 288 
 289 
 290 address TemplateInterpreterGenerator::generate_result_handler_for(
 291         BasicType type) {
 292   address entry = __ pc();
 293   switch (type) {
 294   case T_BOOLEAN: __ uxtb(r0, r0);       break;
 295   case T_CHAR   : __ uxth(r0, r0);       break;
 296   case T_BYTE   : __ sxtb(r0, r0);       break;
 297   case T_SHORT  : __ sxth(r0, r0);       break;
 298   case T_INT    : /* nothing to do */    break;
 299   case T_LONG   : /* nothing to do */    break;
 300   case T_VOID   : /* nothing to do */    break;
 301   case T_FLOAT  : /* nothing to do */    break;
 302   case T_DOUBLE : /* nothing to do */    break;
 303   case T_OBJECT :
 304     // retrieve result from frame
 305     __ reg_printf("In object result handler\n");
 306     __ ldr(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
 307     // and verify it
 308     __ verify_oop(r0);
 309     break;
 310   default       : ShouldNotReachHere();
 311   }
 312   __ b(lr);                                  // return from result handler
 313   return entry;
 314 }
 315 
 316 address TemplateInterpreterGenerator::generate_safept_entry_for(
 317         TosState state,
 318         address runtime_entry) {
 319   address entry = __ pc();
 320   __ push(state);
 321   __ call_VM(noreg, runtime_entry);
 322   __ membar(Assembler::AnyAny);
 323   __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
 324   return entry;
 325 }
 326 
 327 // Helpers for commoning out cases in the various type of method entries.
 328 //
 329 
 330 
 331 // increment invocation count & check for overflow
 332 //
 333 // Note: checking for negative value instead of overflow
 334 //       so we have a 'sticky' overflow test
 335 //
 336 // rmethod: method
 337 //
 338 void InterpreterGenerator::generate_counter_incr(
 339         Label* overflow,
 340         Label* profile_method,
 341         Label* profile_method_continue) {
 342   Label done;
 343   // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
 344   if (TieredCompilation) {
 345     int increment = InvocationCounter::count_increment;
 346     int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
 347     Label no_mdo;
 348     if (ProfileInterpreter) {
 349       // Are we profiling?
 350       __ ldr(r0, Address(rmethod, Method::method_data_offset()));
 351       __ cbz(r0, no_mdo);
 352       // Increment counter in the MDO
 353       const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) +
 354                                            in_bytes(InvocationCounter::counter_offset()));
 355       __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, false, Assembler::EQ, overflow);
 356       __ b(done);
 357     }
 358     __ bind(no_mdo);
 359     // Increment counter in MethodCounters
 360     const Address invocation_counter(rscratch2,
 361                   MethodCounters::invocation_counter_offset() +
 362                   InvocationCounter::counter_offset());
 363     __ get_method_counters(rmethod, rscratch2, done);
 364     __ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, false, Assembler::EQ, overflow);
 365     __ bind(done);
 366   } else {
 367     const Address backedge_counter(rscratch2,
 368                   MethodCounters::backedge_counter_offset() +
 369                   InvocationCounter::counter_offset());
 370     const Address invocation_counter(rscratch2,
 371                   MethodCounters::invocation_counter_offset() +
 372                   InvocationCounter::counter_offset());
 373 
 374     __ get_method_counters(rmethod, rscratch2, done);
 375 
 376     if (ProfileInterpreter) { // %%% Merge this into MethodData*
 377       __ ldr(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
 378       __ add(r1, r1, 1);
 379       __ str(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
 380     }
 381     // Update standard invocation counters
 382     __ ldr(r1, invocation_counter);
 383     __ ldr(r0, backedge_counter);
 384 
 385     __ add(r1, r1, InvocationCounter::count_increment);
 386     __ mov(rscratch1, InvocationCounter::count_mask_value);
 387     __ andr(r0, r0, rscratch1);
 388 
 389     __ str(r1, invocation_counter);
 390     __ add(r0, r0, r1);                // add both counters
 391 
 392     // profile_method is non-null only for interpreted method so
 393     // profile_method != NULL == !native_call
 394 
 395     if (ProfileInterpreter && profile_method != NULL) {
 396       // Test to see if we should create a method data oop
 397       __ mov(rscratch2, ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
 398       __ ldr(rscratch2, rscratch2);
 399       __ cmp(r0, rscratch2);
 400       __ b(*profile_method_continue, Assembler::LT);
 401 
 402       // if no method data exists, go to profile_method
 403       __ test_method_data_pointer(r0, *profile_method);
 404     }
 405 
 406     {
 407       __ mov(rscratch2, ExternalAddress((address) &InvocationCounter::InterpreterInvocationLimit));
 408       __ ldr(rscratch2, rscratch2);
 409       __ cmp(r0, rscratch2);
 410       __ b(*overflow, Assembler::HS);
 411     }
 412     __ bind(done);
 413   }
 414 }
 415 
 416 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
 417 
 418   // Asm interpreter on entry
 419   // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
 420   // Everything as it was on entry
 421 
 422   // InterpreterRuntime::frequency_counter_overflow takes two
 423   // arguments, the first (thread) is passed by call_VM, the second
 424   // indicates if the counter overflow occurs at a backwards branch
 425   // (NULL bcp).  We pass zero for it.  The call returns the address
 426   // of the verified entry point for the method or NULL if the
 427   // compilation did not complete (either went background or bailed
 428   // out).
 429   __ mov(c_rarg1, 0);
 430   __ call_VM(noreg,
 431              CAST_FROM_FN_PTR(address,
 432                               InterpreterRuntime::frequency_counter_overflow),
 433              c_rarg1);
 434 
 435   __ b(*do_continue);
 436 }
 437 
 438 // See if we've got enough room on the stack for locals plus overhead.
 439 // The expression stack grows down incrementally, so the normal guard
 440 // page mechanism will work for that.
 441 //
 442 // NOTE: Since the additional locals are also always pushed (wasn't
 443 // obvious in generate_method_entry) so the guard should work for them
 444 // too.
 445 //
 446 // Args:
 447 //      r3: number of additional locals this frame needs (what we must check)
 448 //      rmethod: Method*
 449 //
 450 // Kills:
 451 //      r0
 452 void InterpreterGenerator::generate_stack_overflow_check(void) {
 453 
 454   // monitor entry size: see picture of stack set
 455   // (generate_method_entry) and frame_amd64.hpp
 456   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
 457 
 458   // total overhead size: entry_size + (saved rbp through expr stack
 459   // bottom).  be sure to change this if you add/subtract anything
 460   // to/from the overhead area
 461   const int overhead_size =
 462     -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
 463 
 464   const int page_size = os::vm_page_size();
 465 
 466   Label after_frame_check;
 467 
 468   // see if the frame is greater than one page in size. If so,
 469   // then we need to verify there is enough stack space remaining
 470   // for the additional locals.
 471   //
 472   __ mov(rscratch1, (page_size - overhead_size) / Interpreter::stackElementSize);
 473   __ cmp(r3, rscratch1);
 474   __ b(after_frame_check, Assembler::LS);
 475 
 476   // compute rsp as if this were going to be the last frame on
 477   // the stack before the red zone
 478 
 479   const Address stack_base(rthread, Thread::stack_base_offset());
 480   const Address stack_size(rthread, Thread::stack_size_offset());
 481 
 482   // locals + overhead, in bytes
 483   __ mov(r0, overhead_size);
 484   __ add(r0, r0, r3, lsl(Interpreter::logStackElementSize));  // 1 slot per parameter.
 485 
 486   __ ldr(rscratch1, stack_base);
 487   __ ldr(rscratch2, stack_size);
 488 
 489 #ifdef ASSERT
 490   Label stack_base_okay, stack_size_okay;
 491   // verify that thread stack base is non-zero
 492   __ cbnz(rscratch1, stack_base_okay);
 493   __ stop("stack base is zero");
 494   __ bind(stack_base_okay);
 495   // verify that thread stack size is non-zero
 496   __ cbnz(rscratch2, stack_size_okay);
 497   __ stop("stack size is zero");
 498   __ bind(stack_size_okay);
 499 #endif
 500 
 501   // Add stack base to locals and subtract stack size
 502   __ sub(rscratch1, rscratch1, rscratch2); // Stack limit
 503   __ add(r0, r0, rscratch1);
 504 
 505   // Use the maximum number of pages we might bang.
 506   const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
 507                                                                               (StackRedPages+StackYellowPages);
 508 
 509   // add in the red and yellow zone sizes
 510   __ add(r0, r0, max_pages * page_size * 2);
 511 
 512   // check against the current stack bottom
 513   __ cmp(sp, r0);
 514   __ b(after_frame_check, Assembler::HI);
 515 
 516   // Remove the incoming args, peeling the machine SP back to where it
 517   // was in the caller.  This is not strictly necessary, but unless we
 518   // do so the stack frame may have a garbage FP; this ensures a
 519   // correct call stack that we can always unwind.
 520   __ mov(sp, r4);
 521 
 522   // Note: the restored frame is not necessarily interpreted.
 523   // Use the shared runtime version of the StackOverflowError.
 524   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
 525   __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
 526 
 527   // all done with frame size check
 528   __ bind(after_frame_check);
 529 }
 530 
 531 // Allocate monitor and lock method (asm interpreter)
 532 //
 533 // Args:
 534 //      rmethod: Method*
 535 //      rlocals: locals
 536 //
 537 // Kills:
 538 //      r0
 539 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
 540 //      rscratch1, rscratch2 (scratch regs)
 541 void InterpreterGenerator::lock_method(void) {
 542   // synchronize method
 543   const Address access_flags(rmethod, Method::access_flags_offset());
 544   const Address monitor_block_top(
 545         rfp,
 546         frame::interpreter_frame_monitor_block_top_offset * wordSize);
 547   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
 548 
 549 #ifdef ASSERT
 550   {
 551     Label L;
 552     __ ldr(r0, access_flags);
 553     __ tst(r0, JVM_ACC_SYNCHRONIZED);
 554     __ b(L, Assembler::NE);
 555     __ stop("method doesn't need synchronization");
 556     __ bind(L);
 557   }
 558 #endif // ASSERT
 559 
 560   // get synchronization object
 561   {
 562     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 563     Label done;
 564     __ ldr(r0, access_flags);
 565     __ tst(r0, JVM_ACC_STATIC);
 566     // get receiver (assume this is frequent case)
 567     __ ldr(r0, Address(rlocals, Interpreter::local_offset_in_bytes(0)));
 568     __ b(done, Assembler::EQ);
 569     __ ldr(r0, Address(rmethod, Method::const_offset()));
 570     __ ldr(r0, Address(r0, ConstMethod::constants_offset()));
 571     __ ldr(r0, Address(r0,
 572                            ConstantPool::pool_holder_offset_in_bytes()));
 573     __ ldr(r0, Address(r0, mirror_offset));
 574 
 575 #ifdef ASSERT
 576     {
 577       Label L;
 578       __ cbnz(r0, L);
 579       __ stop("synchronization object is NULL");
 580       __ bind(L);
 581     }
 582 #endif // ASSERT
 583 
 584     __ bind(done);
 585   }
 586 
 587   // add space for monitor & lock
 588   __ sub(sp, sp, entry_size); // add space for a monitor entry
 589   __ mov(rscratch1, sp);
 590   __ str(rscratch1, monitor_block_top);  // set new monitor block top
 591   // store object
 592   __ str(r0, Address(sp, BasicObjectLock::obj_offset_in_bytes()));
 593   __ mov(c_rarg1, sp); // object address
 594   __ lock_object(c_rarg1);
 595 }
 596 
 597 // Generate a fixed interpreter frame. This is identical setup for
 598 // interpreted methods and for native methods hence the shared code.
 599 //
 600 // Args:
 601 //      lr: return address
 602 //      rmethod: Method*
 603 //      rlocals: pointer to locals
 604 //      rcpool: cp cache
 605 //      stack_pointer: previous sp
 606 //      r4 contains the sender sp
 607 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 608   // initialize fixed part of activation frame
 609   __ reg_printf("About to print native entry, rmethod = %p\n", rmethod);
 610   /*__ mov(rscratch1, (address)0x62829d20);
 611   __ cmp(rscratch1, rmethod);
 612   Label skip;
 613   __ b(skip, Assembler::NE);
 614   __ bkpt(111);
 615   __ bind(skip);*/
 616 
 617   __ print_method_entry(rmethod, native_call);
 618 
 619   if (native_call) {
 620     __ sub(sp, sp, 12 *  wordSize);
 621     __ mov(rbcp, 0);
 622     __ strd(sp, rbcp, Address(sp));
 623     // add 2 zero-initialized slots for native calls
 624     __ strd(rbcp, rbcp, Address(sp, 10 * wordSize));
 625     // Note using rbcp in strd
 626   } else {
 627     __ sub(sp, sp, 10 *  wordSize);
 628     __ ldr(rscratch1, Address(rmethod, Method::const_offset()));      // get ConstMethod
 629     __ add(rbcp, rscratch1, in_bytes(ConstMethod::codes_offset())); // get codebase
 630     __ strd(sp, rbcp, Address(sp));
 631   }
 632 
 633   if (ProfileInterpreter) {
 634     Label method_data_continue;
 635     __ ldr(rscratch1, Address(rmethod, Method::method_data_offset()));
 636     __ cbz(rscratch1, method_data_continue);
 637     __ lea(rscratch1, Address(rscratch1, in_bytes(MethodData::data_offset())));
 638     __ bind(method_data_continue);
 639     __ strd(rscratch1, rmethod, Address(sp, 4 * wordSize));  // save Method* and mdp (method data pointer)
 640   } else {
 641     __ mov(rscratch1, 0);
 642     __ strd(rscratch1, rmethod, Address(sp, 4 * wordSize));        // save Method* (no mdp)
 643   }
 644   __ ldr(rcpool, Address(rmethod, Method::const_offset()));
 645   __ ldr(rcpool, Address(rcpool, ConstMethod::constants_offset()));
 646   __ ldr(rcpool, Address(rcpool, ConstantPool::cache_offset_in_bytes()));
 647   __ strd(rlocals, rcpool, Address(sp, 2 * wordSize));
 648 
 649   // this code sets up the stack frame, in the same fashion as enter()
 650   __ strd(rfp, lr, Address(sp, 8 * wordSize));
 651   // point rfp to location of old pc
 652   __ add(rfp, sp, 9 * wordSize);
 653 
 654   __ reg_printf("Three-quarters through\n");
 655   // set sender sp
 656   // leave last_sp as null
 657   __ mov(rscratch1, 0);
 658   // r4 contains the sender sp
 659   __ strd(rscratch1, r4, Address(sp, 6 * wordSize));
 660 
 661   // Move SP out of the way
 662   /*if (! native_call) {
 663     __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
 664     __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
 665     __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
 666     __ sub(rscratch1, sp, rscratch1, lsl(2));
 667     __ bic(sp, rscratch1, 0xf);
 668   }*/
 669   // FIXME This code moves the sp to after the end of the stack - if this is what's happening
 670   // some calls out of the VM may need to be patched
 671   __ reg_printf("Fully through\n");
 672 }
 673 
 674 // End of helpers
 675 
 676 // Various method entries
 677 //------------------------------------------------------------------------------------------------------------------------
 678 //
 679 //
 680 
 681 // Method entry for java.lang.ref.Reference.get.
 682 address InterpreterGenerator::generate_Reference_get_entry(void) {
 683 #if INCLUDE_ALL_GCS
 684   // Code: _aload_0, _getfield, _areturn
 685   // parameter size = 1
 686   //
 687   // The code that gets generated by this routine is split into 2 parts:
 688   //    1. The "intrinsified" code for G1 (or any SATB based GC),
 689   //    2. The slow path - which is an expansion of the regular method entry.
 690   //
 691   // Notes:-
 692   // * In the G1 code we do not check whether we need to block for
 693   //   a safepoint. If G1 is enabled then we must execute the specialized
 694   //   code for Reference.get (except when the Reference object is null)
 695   //   so that we can log the value in the referent field with an SATB
 696   //   update buffer.
 697   //   If the code for the getfield template is modified so that the
 698   //   G1 pre-barrier code is executed when the current method is
 699   //   Reference.get() then going through the normal method entry
 700   //   will be fine.
 701   // * The G1 code can, however, check the receiver object (the instance
 702   //   of java.lang.Reference) and jump to the slow path if null. If the
 703   //   Reference object is null then we obviously cannot fetch the referent
 704   //   and so we don't need to call the G1 pre-barrier. Thus we can use the
 705   //   regular method entry code to generate the NPE.
 706   //
 707   // This code is based on generate_accessor_enty.
 708   //
 709   // rmethod: Method*
 710   // r13: senderSP must preserve for slow path, set SP to it on fast path
 711 
 712   address entry = __ pc();
 713 
 714   const int referent_offset = java_lang_ref_Reference::referent_offset;
 715   guarantee(referent_offset > 0, "referent offset not initialized");
 716 
 717   if (UseG1GC) {
 718     Label slow_path;
 719     const Register local_0 = c_rarg0;
 720     // Check if local 0 != NULL
 721     // If the receiver is null then it is OK to jump to the slow path.
 722     __ ldr(local_0, Address(sp, 0));
 723     __ cbz(local_0, slow_path);
 724 
 725 
 726     // Load the value of the referent field.
 727     const Address field_address(local_0, referent_offset);
 728     __ load_heap_oop(local_0, field_address);
 729 
 730     // Generate the G1 pre-barrier code to log the value of
 731     // the referent field in an SATB buffer.
 732     __ enter(); // g1_write may call runtime
 733     __ g1_write_barrier_pre(noreg /* obj */,
 734                             local_0 /* pre_val */,
 735                             rthread /* thread */,
 736                             rscratch2 /* tmp */,
 737                             true /* tosca_live */,
 738                             true /* expand_call */);
 739     __ leave();
 740     // areturn
 741     __ mov(sp, r4);           // set sp to sender sp
 742     __ stop("Check sp restored correctly, may be get_dispatch()?");
 743     //__ bic(sp, r13, 0xf);  // done with stack
 744     __ b(lr);
 745 
 746     // generate a vanilla interpreter entry as the slow path
 747     __ bind(slow_path);
 748     (void) generate_normal_entry(false);
 749 
 750     return entry;
 751   }
 752 #endif // INCLUDE_ALL_GCS
 753 
 754   // If G1 is not enabled then attempt to go through the accessor entry point
 755   // Reference.get is an accessor
 756   return generate_accessor_entry();
 757 }
 758 
 759 void InterpreterGenerator::bang_stack_shadow_pages(bool native_call, Register rscratch) {
 760   // Bang each page in the shadow zone. We can't assume it's been done for
 761   // an interpreter frame with greater than a page of locals, so each page
 762   // needs to be checked.  Only true for non-native.
 763   assert(rscratch != rscratch2, "can't be");
 764   if (UseStackBanging) {
 765     const int start_page = native_call ? StackShadowPages : 1;
 766     const int page_size = os::vm_page_size();
 767     __ mov(rscratch, 0);
 768     for (int pages = start_page; pages <= StackShadowPages ; pages++) {
 769       __ sub(rscratch2, sp, pages*page_size);
 770       __ str(rscratch, Address(rscratch2));
 771     }
 772   }
 773 }
 774 
 775 
 776 // Interpreter stub for calling a native method. (asm interpreter)
 777 // This sets up a somewhat different looking stack for calling the
 778 // native method than the typical interpreter frame setup.
 779 address InterpreterGenerator::generate_native_entry(bool synchronized) {
 780   // determine code generation flags
 781   bool inc_counter  = UseCompiler || CountCompiledCalls;
 782 
 783   // r1: Method*
 784   // r4: sender sp
 785 
 786   address entry_point = __ pc();
 787   __ reg_printf("entering generate_native_entry, lr = %p, rfp = %p\n\tRBCP = %p\n", lr, rfp, rbcp);
 788 
 789   const Address constMethod       (rmethod, Method::const_offset());
 790   const Address access_flags      (rmethod, Method::access_flags_offset());
 791   const Address size_of_parameters(r2, ConstMethod::
 792                                        size_of_parameters_offset());
 793 
 794   // get parameter size (always needed)
 795   __ ldr(r2, constMethod);
 796   __ load_unsigned_short(r2, size_of_parameters);
 797 
 798   // native calls don't need the stack size check since they have no
 799   // expression stack and the arguments are already on the stack and
 800   // we only add a handful of words to the stack
 801 
 802   // rmethod: Method*
 803   // r2: size of parameters
 804   // r4: sender sp
 805 
 806   // for natives the size of locals is zero
 807 
 808   // compute beginning of parameters (rlocals)
 809   __ add(rlocals, sp, r2, lsl(2));
 810   __ sub(rlocals, rlocals, wordSize);
 811   __ reg_printf("(start of parameters) rlocals = %p, nparams = %d\n", rlocals, r2);
 812 
 813   // initialize fixed part of activation frame
 814   generate_fixed_frame(true);
 815   __ reg_printf("pushed new fixed frame, lr = %p, rfp = %p\n", lr, rfp);
 816 
 817   Register locals_sp = r4; // the overwrites rdispatch, we can restore at end
 818   // !! If this canges, change the end of arguements in interpreterRT_aarch32.cpp
 819   //__ mov(r4, sp); //Save top of arguments
 820 
 821   // make sure method is native & not abstract
 822 #ifdef ASSERT
 823   __ ldr(r0, access_flags);
 824   {
 825     Label L;
 826     __ tst(r0, JVM_ACC_NATIVE);
 827     __ b(L, Assembler::NE);
 828     __ stop("tried to execute non-native method as native");
 829     __ bind(L);
 830   }
 831   {
 832     Label L;
 833     __ tst(r0, JVM_ACC_ABSTRACT);
 834     __ b(L, Assembler::EQ);
 835     __ stop("tried to execute abstract method in interpreter");
 836     __ bind(L);
 837   }
 838 #endif
 839 
 840   // Since at this point in the method invocation the exception
 841   // handler would try to exit the monitor of synchronized methods
 842   // which hasn't been entered yet, we set the thread local variable
 843   // _do_not_unlock_if_synchronized to true. The remove_activation
 844   // will check this flag.
 845 
 846    const Address do_not_unlock_if_synchronized(rthread,
 847         in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
 848   __ mov(rscratch2, true);
 849   __ strb(rscratch2, do_not_unlock_if_synchronized);
 850 
 851   // increment invocation count & check for overflow
 852   Label invocation_counter_overflow;
 853   if (inc_counter) {
 854     generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
 855   }
 856 
 857   Label continue_after_compile;
 858   __ bind(continue_after_compile);
 859 
 860   bang_stack_shadow_pages(true, rscratch1);
 861   // Note rscratch1 will contain zero here due to bang_stack_shadow_pages
 862   // reset the _do_not_unlock_if_synchronized flag
 863   //__ mov(rscratch1, 0);
 864   __ strb(rscratch1, do_not_unlock_if_synchronized);
 865 
 866   // check for synchronized methods
 867   // Must happen AFTER invocation_counter check and stack overflow check,
 868   // so method is not locked if overflows.
 869   if (synchronized) {
 870     lock_method();
 871   } else {
 872     // no synchronization necessary
 873 #ifdef ASSERT
 874     {
 875       Label L;
 876       __ ldr(r0, access_flags);
 877       __ tst(r0, JVM_ACC_SYNCHRONIZED);
 878       __ b(L, Assembler::EQ);
 879       __ stop("method needs synchronization");
 880       __ bind(L);
 881     }
 882 #endif
 883   }
 884 
 885   // start execution
 886 #ifdef ASSERT
 887   {
 888     Label L;
 889     const Address monitor_block_top(rfp,
 890                  frame::interpreter_frame_monitor_block_top_offset * wordSize);
 891     __ ldr(rscratch1, monitor_block_top);
 892     __ cmp(sp, rscratch1);
 893     __ b(L, Assembler::EQ);
 894     __ stop("broken stack frame setup in interpreter");
 895     __ bind(L);
 896   }
 897 #endif
 898 
 899   // jvmti support
 900   __ notify_method_entry();
 901 
 902   const Register result_handler = rlocals;
 903   //This is recomputed for the new function and result_handler is not written until
 904   // after the function has been called
 905 
 906   // allocate space for parameters
 907   __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
 908   __ load_unsigned_short(rscratch1, Address(rscratch1, ConstMethod::size_of_parameters_offset()));
 909 
 910   __ sub(sp, sp, rscratch1, lsl(Interpreter::logStackElementSize + 1));
 911   // This +1 is a hack to double the amount of space allocated for parameters, this is likely far
 912   // more than needed as in the worst case when parameters have to be placed on the stack they would be aligned
 913   // as follows LONG | INT | EMPTY | LONG ... This would only increase the space used by a half.
 914   __ align_stack();
 915   __ mov(locals_sp, sp);
 916   __ reg_printf("Stack Pointer on arg copy, sp = %p, locals_sp = %p, rlocals = %p\n", sp, locals_sp, rlocals);
 917 
 918   // get signature handler
 919   {
 920     Label L;
 921     __ ldr(rscratch1, Address(rmethod, Method::signature_handler_offset()));
 922     __ cmp(rscratch1, 0);
 923     __ b(L, Assembler::NE);
 924     __ reg_printf("Prepare_native_call, locals_sp = %p, rlocals = %p\n", locals_sp, rlocals);
 925     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
 926                                        InterpreterRuntime::prepare_native_call), rmethod);
 927     __ reg_printf("Finished prepare_native_call, locals_sp = %p, rlocals = %p\n", locals_sp, rlocals);
 928     __ ldr(rscratch1, Address(rmethod, Method::signature_handler_offset()));
 929     __ bind(L);
 930   }
 931 
 932   // call signature handler
 933   assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals,
 934          "adjust this code");
 935   assert(InterpreterRuntime::SignatureHandlerGenerator::to() == locals_sp,
 936          "adjust this code");
 937   assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
 938           "adjust this code");
 939 
 940   // The generated handlers do not touch rmethod (the method).
 941   // However, large signatures cannot be cached and are generated
 942   // each time here.  The slow-path generator can do a GC on return,
 943   // so we must reload it after the call.
 944   __ reg_printf("**BEFORE**\nrlocals = %p,locals_sp = %p, sp = %p\n", rlocals, locals_sp, sp);
 945   __ reg_printf("About to call the Method::signature_handler = %p\n", rscratch1);
 946   __ bl(rscratch1);
 947   __ reg_printf("**AFTER**\nr0 : %p, r1 : %p, r2 : %p\n", r0, r1, r2);
 948   __ reg_printf("r3 : %p, sp : %p\n", r3, sp);
 949   __ get_method(rmethod);        // slow path can do a GC, reload rmethod
 950 
 951 
 952 
 953   // result handler is in r0
 954   // set result handler
 955   __ mov(result_handler, r0);
 956   // pass mirror handle if static call
 957   {
 958     Label L;
 959     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 960     __ ldr(rscratch1, Address(rmethod, Method::access_flags_offset()));
 961     __ tst(rscratch1, JVM_ACC_STATIC);
 962     __ b(L, Assembler::EQ);
 963     // get mirror
 964     __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
 965     __ ldr(rscratch1, Address(rscratch1, ConstMethod::constants_offset()));
 966     __ ldr(rscratch1, Address(rscratch1, ConstantPool::pool_holder_offset_in_bytes()));
 967     __ ldr(rscratch1, Address(rscratch1, mirror_offset));
 968     // copy mirror into activation frame
 969     __ str(rscratch1, Address(rfp, frame::interpreter_frame_oop_temp_offset * wordSize));
 970     // pass handle to mirror
 971     __ add(c_rarg1, rfp, frame::interpreter_frame_oop_temp_offset * wordSize);
 972     __ bind(L);
 973   }
 974 
 975   // get native function entry point in r14
 976   Register native_entry_point = r14;
 977 
 978   {
 979     Label L;
 980     __ ldr(native_entry_point, Address(rmethod, Method::native_function_offset()));
 981     address unsatisfied = (SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
 982     __ mov(rscratch2, unsatisfied);
 983     __ ldr(rscratch2, rscratch2);
 984     __ reg_printf("QWERTY native_entry_point = %p, unsatisfied_link_entry_point = %p\n", native_entry_point, rscratch2);
 985     __ cmp(native_entry_point, rscratch2);
 986     __ b(L, Assembler::NE);
 987     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
 988                                        InterpreterRuntime::prepare_native_call), rmethod);
 989     __ get_method(rmethod);
 990     __ ldr(native_entry_point, Address(rmethod, Method::native_function_offset()));
 991     __ bind(L);
 992   }
 993 
 994   // pass JNIEnv
 995   __ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset()));
 996 
 997   // It is enough that the pc() points into the right code
 998   // segment. It does not have to be the correct return pc.
 999   __ set_last_Java_frame(sp, rfp, (address)NULL, rscratch1);
1000 
1001   // change thread state
1002 #ifdef ASSERT
1003   {
1004     Label L;
1005     __ ldr(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
1006     __ cmp(rscratch1, _thread_in_Java);
1007     __ b(L, Assembler::EQ);
1008     __ stop("Wrong thread state in native stub");
1009     __ bind(L);
1010   }
1011 #endif
1012 
1013   // Change state to native
1014   __ mov(rscratch1, _thread_in_native);
1015   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1016   __ dmb(Assembler::ISH);
1017   __ str(rscratch1, Address(rscratch2));
1018 
1019   __ reg_printf("Calling native method, lr = %p & rmethod = %p\n", lr, rmethod);
1020   // Call the native method.
1021   /*__ reg_printf("**ONCALL**\nr0 : %p\nr1 : %p\nr2 : %p\n", r0, r1, r2);
1022   __ reg_printf("r3 : %p\n\nr4 : %p\nrloc : %p\n", r3, r4, rlocals);*/
1023   __ reg_printf("Stack Pointer on entry to native, sp = %p\n", sp);
1024   __ bl(native_entry_point);
1025   __ reg_printf("Returned from native, lr = %p, r1 = %p, r0 = %p\n", lr, r1, r0);
1026   __ maybe_isb();
1027   __ get_method(rmethod);
1028   // result potentially in r0, <r0:r1> or v0
1029 
1030   // make room for the pushes we're about to do
1031   //__ sub(rscratch1, sp, 4 * wordSize);
1032   //__ bic(sp, rscratch1, 0xf);
1033   // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1034   // in order to extract the result of a method call. If the order of these
1035   // pushes change or anything else is added to the stack then the code in
1036   // interpreter_frame_result must also change.
1037   __ reg_printf("Before push dtos, ltos. sp = %p\n", sp);
1038   __ push(dtos);
1039   __ push(ltos);
1040 
1041   // change thread state
1042   __ mov(rscratch1, _thread_in_native_trans);
1043   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1044   __ dmb(Assembler::ISH);
1045   __ str(rscratch1, Address(rscratch2));
1046   __ reg_printf("before os::is_MP\n");
1047   if (os::is_MP()) {
1048     if (UseMembar) {
1049       // Force this write out before the read below
1050       __ membar(Assembler::AnyAny);
1051     } else {
1052       // Write serialization page so VM thread can do a pseudo remote membar.
1053       // We use the current thread pointer to calculate a thread specific
1054       // offset to write to within the page. This minimizes bus traffic
1055       // due to cache line collision.
1056       __ serialize_memory(rthread, rscratch2);
1057     }
1058   }
1059   __ reg_printf("after os::is_MP\n");
1060   // check for safepoint operation in progress and/or pending suspend requests
1061   {
1062     Label Continue;
1063     __ lea(rscratch2, ExternalAddress(SafepointSynchronize::address_of_state()));
1064     assert(SafepointSynchronize::_not_synchronized == 0,
1065            "SafepointSynchronize::_not_synchronized");
1066     __ ldr(rscratch2, rscratch2);
1067     Label L;
1068     __ cbnz(rscratch2, L);
1069     __ ldr(rscratch2, Address(rthread, JavaThread::suspend_flags_offset()));
1070     __ cbz(rscratch2, Continue);
1071     __ bind(L);
1072 
1073     // Don't use call_VM as it will see a possible pending exception
1074     // and forward it and never return here preventing us from
1075     // clearing _last_native_pc down below. So we do a runtime call by
1076     // hand.
1077     //
1078     __ mov(c_rarg0, rthread);
1079     __ mov(rscratch2, CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
1080     //__ blrt(rscratch2, 1, 0, 0);
1081     __ bl(rscratch2);
1082     __ maybe_isb();
1083     __ get_method(rmethod);
1084     __ bind(Continue);
1085   }
1086   __ reg_printf("finished safepoint check\n");
1087   // change thread state
1088   __ mov(rscratch1, _thread_in_Java);
1089   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1090   __ dmb(Assembler::ISH);
1091   __ str(rscratch1, Address(rscratch2));
1092 
1093   // reset_last_Java_frame
1094   __ reset_last_Java_frame(true, true);
1095 
1096   // reset handle block
1097   __ ldr(rscratch2, Address(rthread, JavaThread::active_handles_offset()));
1098   __ mov(rscratch1, 0);
1099   __ str(rscratch1, Address(rscratch2, JNIHandleBlock::top_offset_in_bytes()));
1100 
1101   // If result is an oop unbox and store it in frame where gc will see it
1102   // and result handler will pick it up
1103   __ reg_printf("finished checking last_Java_frame\n");
1104   {
1105     Label no_oop, store_result;
1106     //__ bkpt(345);
1107     //__ adr(rscratch2, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
1108     __ mov(rscratch2, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
1109     __ reg_printf("Comparing rscratch2 = %p and result_handler = %p\n", rscratch2, result_handler);
1110 
1111     __ cmp(rscratch2, result_handler);
1112     __ b(no_oop, Assembler::NE);
1113     __ reg_printf("It's an oop.\n");
1114     // retrieve result
1115     __ pop(ltos);
1116     __ cbz(r0, store_result);
1117     __ ldr(r0, Address(r0));
1118     __ bind(store_result);
1119     __ str(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
1120     // keep stack depth as expected by pushing oop which will eventually be discarded
1121     __ push(ltos);
1122     __ bind(no_oop);
1123   }
1124 
1125   {
1126     Label no_reguard;
1127     __ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset())));
1128     __ ldrb(rscratch1, Address(rscratch1));
1129     __ cmp(rscratch1, JavaThread::stack_guard_yellow_disabled);
1130     __ b(no_reguard, Assembler::NE);
1131 
1132     __ pusha(); // XXX only save smashed registers
1133     __ mov(c_rarg0, rthread);
1134     __ mov(rscratch2, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
1135     __ bl(rscratch2);
1136     __ popa(); // XXX only restore smashed registers
1137     __ bind(no_reguard);
1138   }
1139   __ reg_printf("Restoring java-ish things\n");
1140   // The method register is junk from after the thread_in_native transition
1141   // until here.  Also can't call_VM until the bcp has been
1142   // restored.  Need bcp for throwing exception below so get it now.
1143   __ get_method(rmethod);
1144   __ get_dispatch(); // used to save sp in for args
1145   // restore bcp to have legal interpreter frame, i.e., bci == 0 <=>
1146   // rbcp == code_base()
1147   __ ldr(rbcp, Address(rmethod, Method::const_offset()));   // get ConstMethod*
1148   __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));          // get codebase
1149   // handle exceptions (exception handling will handle unlocking!)
1150   {
1151     Label L;
1152     __ reg_printf("Checking pending exceptions\n");
1153     __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
1154     __ cbz(rscratch1, L);
1155     // Note: At some point we may want to unify this with the code
1156     // used in call_VM_base(); i.e., we should use the
1157     // StubRoutines::forward_exception code. For now this doesn't work
1158     // here because the rsp is not correctly set at this point.
1159     __ reg_printf("Calling vm to throw_pending_exception\n");
1160 
1161     // Need to restore lr? - introduced on aarch32 port
1162     //__ ldr(lr, Address(rfp, frame::return_addr_offset));
1163 
1164     __ MacroAssembler::call_VM(noreg,
1165                                CAST_FROM_FN_PTR(address,
1166                                InterpreterRuntime::throw_pending_exception));
1167     __ should_not_reach_here();
1168     __ bind(L);
1169   }
1170 
1171   // do unlocking if necessary
1172   {
1173     Label L;
1174     __ reg_printf("testing if we need to unlock\n");
1175     __ ldr(rscratch1, Address(rmethod, Method::access_flags_offset()));
1176     __ tst(rscratch1, JVM_ACC_SYNCHRONIZED);
1177     __ b(L, Assembler::EQ);
1178     // the code below should be shared with interpreter macro
1179     // assembler implementation
1180     {
1181       Label unlock;
1182       // BasicObjectLock will be first in list, since this is a
1183       // synchronized method. However, need to check that the object
1184       // has not been unlocked by an explicit monitorexit bytecode.
1185 
1186       // monitor expect in c_rarg1 for slow unlock path
1187       __ lea (c_rarg1, Address(rfp,   // address of first monitor
1188                                (intptr_t)(frame::interpreter_frame_initial_sp_offset *
1189                                           wordSize - sizeof(BasicObjectLock))));
1190 
1191       __ ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
1192       __ reg_printf("Checking if we are already unlocked\n");
1193       __ cbnz(rscratch1, unlock);
1194 
1195       // Entry already unlocked, need to throw exception
1196       __ MacroAssembler::call_VM(noreg,
1197                                  CAST_FROM_FN_PTR(address,
1198                    InterpreterRuntime::throw_illegal_monitor_state_exception));
1199       __ should_not_reach_here();
1200 
1201       __ bind(unlock);
1202       __ reg_printf("Doing unlock\n");
1203       __ unlock_object(c_rarg1);
1204     }
1205     __ bind(L);
1206   }
1207   __ reg_printf("finished unlocking\n");
1208   // jvmti support
1209   // Note: This must happen _after_ handling/throwing any exceptions since
1210   //       the exception handler code notifies the runtime of method exits
1211   //       too. If this happens before, method entry/exit notifications are
1212   //       not properly paired (was bug - gri 11/22/99).
1213   __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1214 
1215   // restore potential result in r0:d0, call result handler to
1216   // restore potential result in ST0 & handle result
1217   __ reg_printf("Before pop dtos, ltos. sp = %p\n", sp);
1218   __ pop(ltos);
1219   __ pop(dtos);
1220 
1221   __ reg_printf("Calling result handler, r1 = %p, r0 = %p\n", r1, r0);
1222   __ bl(result_handler);
1223   __ reg_printf("Finished result_handler\n RFP NOW = %p, r0 = %p\n", rfp, r0);
1224 
1225   // remove activation restore sp to sender_sp
1226   __ ldr(rscratch1, Address(rfp,
1227                     frame::interpreter_frame_sender_sp_offset *
1228                     wordSize)); // get sender sp
1229   // remove frame anchor & restore sp
1230   __ leave();
1231 
1232   __ mov(sp, rscratch1); // Native frame so two extra fields
1233   __ reg_printf("Returning to Java execution, restored frame = %p, lr = %p\n\tRBCP = %p\n", rfp, lr, rbcp);
1234   __ b(lr);
1235 
1236   if (inc_counter) {
1237     // Handle overflow of counter and compile method
1238     __ bind(invocation_counter_overflow);
1239     generate_counter_overflow(&continue_after_compile);
1240   }
1241 
1242   return entry_point;
1243 }
1244 
1245 address InterpreterGenerator::generate_CRC32_update_entry() {
1246   if (UseCRC32Intrinsics) {
1247     address entry = __ pc();
1248 
1249     // rmethod: Method*
1250     // sp: args
1251 
1252     Label slow_path;
1253     // If we need a safepoint check, generate full interpreter entry.
1254     __ lea(rscratch2, ExternalAddress(SafepointSynchronize::address_of_state()));
1255     assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
1256     __ ldr(rscratch2, Address(rscratch2));
1257     __ cbnz(rscratch2, slow_path);
1258 
1259     // We don't generate local frame and don't align stack because
1260     // we call stub code and there is no safepoint on this path.
1261 
1262     // Load parameters
1263     const Register crc = c_rarg0;  // crc
1264     const Register val = c_rarg1;  // source java byte value
1265     const Register tbl = c_rarg2;  // scratch
1266 
1267     // Arguments are reversed on java expression stack
1268     __ ldr(val, Address(sp, 0));              // byte value
1269     __ ldr(crc, Address(sp, wordSize));       // Initial CRC
1270 
1271     __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr()));
1272     __ inv(crc, crc);
1273     __ update_byte_crc32(crc, val, tbl);
1274     __ inv(crc, crc); // result in c_rarg0
1275 
1276     __ mov(sp, r4);
1277     __ ret(lr);
1278 
1279     // generate a vanilla native entry as the slow path
1280     __ bind(slow_path);
1281 
1282     (void) generate_native_entry(false);
1283 
1284     return entry;
1285   }
1286   return generate_native_entry(false);
1287 }
1288 
1289 address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
1290   if (UseCRC32Intrinsics) {
1291     address entry = __ pc();
1292 
1293     // rmethod,: Method*
1294     // sp: senderSP must preserved for slow path
1295 
1296     Label slow_path;
1297     // If we need a safepoint check, generate full interpreter entry.
1298     __ lea(rscratch2, ExternalAddress(SafepointSynchronize::address_of_state()));
1299     assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
1300     __ ldr(rscratch2, Address(rscratch2));
1301     __ cbnz(rscratch2, slow_path);
1302 
1303     // We don't generate local frame and don't align stack because
1304     // we call stub code and there is no safepoint on this path.
1305 
1306     // Load parameters
1307     const Register crc = c_rarg0;  // crc
1308     const Register buf = c_rarg1;  // source java byte array address
1309     const Register len = c_rarg2;  // length
1310     const Register off = len;      // offset (never overlaps with 'len')
1311 
1312     // Arguments are reversed on java expression stack
1313     // Calculate address of start element
1314     if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
1315       __ ldr(buf, Address(sp, 2*wordSize)); // long buf
1316       __ ldr(off, Address(sp, wordSize)); // offset
1317       __ add(buf, buf, off); // + offset
1318       __ ldr(crc, Address(sp, 4*wordSize)); // Initial CRC
1319     } else {
1320       __ ldr(buf, Address(sp, 2*wordSize)); // byte[] array
1321       __ add(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
1322       __ ldr(off, Address(sp, wordSize)); // offset
1323       __ add(buf, buf, off); // + offset
1324       __ ldr(crc, Address(sp, 3*wordSize)); // Initial CRC
1325     }
1326     // Can now load 'len' since we're finished with 'off'
1327     __ ldr(len, Address(sp)); // Length
1328 
1329     __ mov(sp, r4); // Restore the caller's SP
1330 
1331     // We are frameless so we can just jump to the stub.
1332     __ b(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()));
1333 
1334     // generate a vanilla native entry as the slow path
1335     __ bind(slow_path);
1336 
1337     (void) generate_native_entry(false);
1338 
1339     return entry;
1340   }
1341   return generate_native_entry(false);
1342 }
1343 
1344 //
1345 // Generic interpreted method entry to (asm) interpreter
1346 //
1347 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1348   // determine code generation flags
1349   bool inc_counter = UseCompiler || CountCompiledCalls;
1350 
1351   // r4: sender sp
1352   address entry_point = __ pc();
1353 
1354   const Address constMethod(rmethod, Method::const_offset());
1355   const Address access_flags(rmethod, Method::access_flags_offset());
1356   const Address size_of_parameters(r3,
1357                                    ConstMethod::size_of_parameters_offset());
1358   const Address size_of_locals(r3, ConstMethod::size_of_locals_offset());
1359 
1360   // get parameter size (always needed)
1361   // need to load the const method first
1362   __ ldr(r3, constMethod);
1363   __ load_unsigned_short(r2, size_of_parameters);
1364 
1365   // r2: size of parameters
1366 
1367   __ load_unsigned_short(r3, size_of_locals); // get size of locals in words
1368   __ sub(r3, r3, r2); // r3 = no. of additional locals
1369 
1370   // see if we've got enough room on the stack for locals plus overhead.
1371   generate_stack_overflow_check();
1372 
1373   // compute beginning of parameters (rlocals)
1374   __ add(rlocals, sp, r2, lsl(2));
1375   __ sub(rlocals, rlocals, wordSize);
1376 
1377   // Make room for locals
1378   __ sub(rscratch1, sp, r3, lsl(2));
1379   // Align the sp value
1380   __ bic(sp, rscratch1, StackAlignmentInBytes-1);
1381 
1382   // r3 - # of additional locals
1383   // allocate space for locals
1384   // explicitly initialize locals
1385   {
1386     Label exit, loop;
1387     __ mov(rscratch2, 0);
1388     __ cmp(r3, 0);
1389     __ b(exit, Assembler::LE); // do nothing if r3 <= 0
1390     __ bind(loop);
1391     __ str(rscratch2, Address(__ post(rscratch1, wordSize)));
1392     __ subs(r3, r3, 1); // until everything initialized
1393     __ b(loop, Assembler::NE);
1394     __ bind(exit);
1395   }
1396   __ reg_printf("Done locals space\n", r2);
1397 
1398   // initialize fixed part of activation frame
1399   __ reg_printf("About to do fixed frame\n", r2);
1400   generate_fixed_frame(false);
1401   // And the base dispatch table
1402   __ get_dispatch();
1403   // make sure method is not native & not abstract
1404   __ reg_printf("Just done generate_fixed_frame; rmethod = %p\n", rmethod);
1405 #ifdef ASSERT
1406   __ ldr(r0, access_flags);
1407   {
1408     Label L;
1409     __ tst(r0, JVM_ACC_NATIVE);
1410     __ b(L, Assembler::EQ);
1411     __ stop("tried to execute native method as non-native");
1412     __ bind(L);
1413   }
1414   {
1415     Label L;
1416     __ tst(r0, JVM_ACC_ABSTRACT);
1417     __ b(L, Assembler::EQ);
1418     __ stop("tried to execute abstract method in interpreter");
1419     __ bind(L);
1420   }
1421 #endif
1422 
1423   // Since at this point in the method invocation the exception
1424   // handler would try to exit the monitor of synchronized methods
1425   // which hasn't been entered yet, we set the thread local variable
1426   // _do_not_unlock_if_synchronized to true. The remove_activation
1427   // will check this flag.
1428 
1429    const Address do_not_unlock_if_synchronized(rthread,
1430         in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1431   __ mov(rscratch2, true);
1432   __ strb(rscratch2, do_not_unlock_if_synchronized);
1433 
1434   // increment invocation count & check for overflow
1435   Label invocation_counter_overflow;
1436   Label profile_method;
1437   Label profile_method_continue;
1438   if (inc_counter) {
1439     generate_counter_incr(&invocation_counter_overflow,
1440                           &profile_method,
1441                           &profile_method_continue);
1442     if (ProfileInterpreter) {
1443       __ bind(profile_method_continue);
1444     }
1445   }
1446 
1447   Label continue_after_compile;
1448   __ bind(continue_after_compile);
1449 
1450   bang_stack_shadow_pages(false, rscratch1);
1451   // Note rscratch1 will contain zero here
1452   // reset the _do_not_unlock_if_synchronized flag
1453   __ strb(rscratch1, do_not_unlock_if_synchronized);
1454 
1455   // check for synchronized methods
1456   // Must happen AFTER invocation_counter check and stack overflow check,
1457   // so method is not locked if overflows.
1458   if (synchronized) {
1459     // Allocate monitor and lock method
1460     lock_method();
1461   } else {
1462     // no synchronization necessary
1463 #ifdef ASSERT
1464     {
1465       Label L;
1466       __ reg_printf("Checking synchronization, rmethod = %p\n", rmethod);
1467       __ ldr(r0, access_flags);
1468       __ tst(r0, JVM_ACC_SYNCHRONIZED);
1469       __ b(L, Assembler::EQ);
1470       __ stop("method needs synchronization");
1471       __ bind(L);
1472     }
1473 #endif
1474   }
1475 
1476   // start execution
1477 #ifdef ASSERT
1478   {
1479     Label L;
1480      const Address monitor_block_top (rfp,
1481                  frame::interpreter_frame_monitor_block_top_offset * wordSize);
1482     __ ldr(rscratch1, monitor_block_top);
1483     __ cmp(sp, rscratch1);
1484     __ b(L, Assembler::EQ);
1485     __ stop("broken stack frame setup in interpreter");
1486     __ bind(L);
1487   }
1488 #endif
1489 
1490   // jvmti support
1491   __ notify_method_entry();
1492   __ reg_printf("About to dispatch, rmethod = %p, rlocals = %p\n", rmethod, rlocals);
1493   __ dispatch_next(vtos);
1494   __ reg_printf("Finshed dispatch? rmethod = %p\n", rmethod);
1495   // invocation counter overflow
1496   if (inc_counter) {
1497     if (ProfileInterpreter) {
1498       // We have decided to profile this method in the interpreter
1499       __ bind(profile_method);
1500       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1501       __ set_method_data_pointer_for_bcp();
1502       // don't think we need this
1503       __ get_method(r1);
1504       __ b(profile_method_continue);
1505     }
1506     // Handle overflow of counter and compile method
1507     __ bind(invocation_counter_overflow);
1508     generate_counter_overflow(&continue_after_compile);
1509   }
1510 
1511   __ reg_printf("Just completed normal entry, rmethod = %p\n", rmethod);
1512   return entry_point;
1513 }
1514 
1515 address AbstractInterpreterGenerator::generate_method_entry(
1516     AbstractInterpreter::MethodKind kind) {
1517   bool synchronized = false;
1518   address entry_point = NULL;
1519   InterpreterGenerator* ig = (InterpreterGenerator*) this;
1520 
1521   switch (kind) {
1522   case Interpreter::zerolocals:
1523     break;
1524   case Interpreter::zerolocals_synchronized:
1525     synchronized = true;
1526     break;
1527   case Interpreter::native:
1528     entry_point = ig->generate_native_entry(false);
1529     break;
1530   case Interpreter::native_synchronized:
1531     entry_point = ig->generate_native_entry(true);
1532     break;
1533   case Interpreter::empty:
1534     entry_point = ig->generate_empty_entry();
1535     break;
1536   case Interpreter::accessor:
1537     entry_point = ig->generate_accessor_entry();
1538     break;
1539   case Interpreter::abstract:
1540     entry_point = ig->generate_abstract_entry();
1541     break;
1542   case Interpreter::java_lang_math_sin:
1543   case Interpreter::java_lang_math_cos:
1544   case Interpreter::java_lang_math_tan:
1545   case Interpreter::java_lang_math_abs:
1546   case Interpreter::java_lang_math_sqrt:
1547   case Interpreter::java_lang_math_log:
1548   case Interpreter::java_lang_math_log10:
1549   case Interpreter::java_lang_math_pow:
1550   case Interpreter::java_lang_math_exp:
1551     entry_point = ig->generate_math_entry(kind);
1552     break;
1553   case Interpreter::java_lang_ref_reference_get:
1554     entry_point = ig->generate_Reference_get_entry();
1555     break;
1556   case Interpreter::java_util_zip_CRC32_update:
1557     entry_point = ig->generate_CRC32_update_entry();
1558     break;
1559   case Interpreter::java_util_zip_CRC32_updateBytes:
1560   case Interpreter::java_util_zip_CRC32_updateByteBuffer:
1561     entry_point = ig->generate_CRC32_updateBytes_entry(kind);
1562     break;
1563   default:
1564     ShouldNotReachHere();
1565     break;
1566   }
1567 
1568   if (entry_point != NULL) {
1569     return entry_point;
1570   }
1571 
1572   return ig->generate_normal_entry(synchronized);
1573 }
1574 
1575 // These should never be compiled since the interpreter will prefer
1576 // the compiled version to the intrinsic version.
1577 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
1578   switch (method_kind(m)) {
1579     case Interpreter::java_lang_math_sin     : // fall thru
1580     case Interpreter::java_lang_math_cos     : // fall thru
1581     case Interpreter::java_lang_math_tan     : // fall thru
1582     case Interpreter::java_lang_math_abs     : // fall thru
1583     case Interpreter::java_lang_math_log     : // fall thru
1584     case Interpreter::java_lang_math_log10   : // fall thru
1585     case Interpreter::java_lang_math_sqrt    : // fall thru
1586     case Interpreter::java_lang_math_pow     : // fall thru
1587     case Interpreter::java_lang_math_exp     :
1588       return false;
1589     default:
1590       return true;
1591   }
1592 }
1593 
1594 // How much stack a method activation needs in words.
1595 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
1596   const int entry_size = frame::interpreter_frame_monitor_size();
1597 
1598   // total overhead size: entry_size + (saved rfp thru expr stack
1599   // bottom).  be sure to change this if you add/subtract anything
1600   // to/from the overhead area
1601   const int overhead_size =
1602     -(frame::interpreter_frame_initial_sp_offset) + entry_size;
1603 
1604   const int stub_code = frame::entry_frame_after_call_words;
1605   const int method_stack = (method->max_locals() + method->max_stack()) *
1606                            Interpreter::stackElementWords;
1607   return (overhead_size + method_stack + stub_code);
1608 }
1609 
1610 // asm based interpreter deoptimization helpers
1611 int AbstractInterpreter::size_activation(int max_stack,
1612                                          int temps,
1613                                          int extra_args,
1614                                          int monitors,
1615                                          int callee_params,
1616                                          int callee_locals,
1617                                          bool is_top_frame) {
1618   // Note: This calculation must exactly parallel the frame setup
1619   // in AbstractInterpreterGenerator::generate_method_entry.
1620 
1621   // fixed size of an interpreter frame:
1622   int overhead = frame::sender_sp_offset -
1623                  frame::interpreter_frame_initial_sp_offset;
1624   // Our locals were accounted for by the caller (or last_frame_adjust
1625   // on the transistion) Since the callee parameters already account
1626   // for the callee's params we only need to account for the extra
1627   // locals.
1628   int size = overhead +
1629          (callee_locals - callee_params)*Interpreter::stackElementWords +
1630          monitors * frame::interpreter_frame_monitor_size() +
1631          temps* Interpreter::stackElementWords + extra_args;
1632 
1633   // On AArch64 we always keep the stack pointer 16-aligned, so we
1634   // must round up here.
1635   size = round_to(size, 2);
1636 
1637   return size;
1638 }
1639 
1640 void AbstractInterpreter::layout_activation(Method* method,
1641                                             int tempcount,
1642                                             int popframe_extra_args,
1643                                             int moncount,
1644                                             int caller_actual_parameters,
1645                                             int callee_param_count,
1646                                             int callee_locals,
1647                                             frame* caller,
1648                                             frame* interpreter_frame,
1649                                             bool is_top_frame,
1650                                             bool is_bottom_frame) {
1651   // The frame interpreter_frame is guaranteed to be the right size,
1652   // as determined by a previous call to the size_activation() method.
1653   // It is also guaranteed to be walkable even though it is in a
1654   // skeletal state
1655 
1656   int max_locals = method->max_locals() * Interpreter::stackElementWords;
1657   int extra_locals = (method->max_locals() - method->size_of_parameters()) *
1658     Interpreter::stackElementWords;
1659 
1660 #ifdef ASSERT
1661   assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable");
1662 #endif
1663 
1664   interpreter_frame->interpreter_frame_set_method(method);
1665   // NOTE the difference in using sender_sp and
1666   // interpreter_frame_sender_sp interpreter_frame_sender_sp is
1667   // the original sp of the caller (the unextended_sp) and
1668   // sender_sp is fp+8/16 (32bit/64bit) XXX
1669   intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
1670 
1671 #ifdef ASSERT
1672   if (caller->is_interpreted_frame()) {
1673     assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
1674   }
1675 #endif
1676 
1677   interpreter_frame->interpreter_frame_set_locals(locals);
1678   BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
1679   BasicObjectLock* monbot = montop - moncount;
1680   interpreter_frame->interpreter_frame_set_monitor_end(monbot);
1681 
1682   // Set last_sp
1683   intptr_t*  last_sp = (intptr_t*) monbot -
1684     tempcount*Interpreter::stackElementWords -
1685     popframe_extra_args;
1686   interpreter_frame->interpreter_frame_set_last_sp(last_sp);
1687 
1688   // All frames but the initial (oldest) interpreter frame we fill in have
1689   // a value for sender_sp that allows walking the stack but isn't
1690   // truly correct. Correct the value here.
1691   if (extra_locals != 0 &&
1692       interpreter_frame->sender_sp() ==
1693       interpreter_frame->interpreter_frame_sender_sp()) {
1694     interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
1695                                                        extra_locals);
1696   }
1697   *interpreter_frame->interpreter_frame_cache_addr() =
1698     method->constants()->cache();
1699 }
1700 
1701 
1702 //-----------------------------------------------------------------------------
1703 // Exceptions
1704 
1705 void TemplateInterpreterGenerator::generate_throw_exception() {
1706   // Entry point in previous activation (i.e., if the caller was
1707   // interpreted)
1708   Interpreter::_rethrow_exception_entry = __ pc();
1709   __ reg_printf("rethrow_exception_entry\n");
1710 
1711   // Restore sp to interpreter_frame_last_sp even though we are going
1712   // to empty the expression stack for the exception processing.
1713   __ mov(rscratch1, 0);
1714   __ str(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
1715   // r0: exception
1716   // r3: return address/pc that threw exception
1717   __ restore_bcp();    // rbcp points to call/send
1718   __ restore_locals();
1719   __ restore_constant_pool_cache();
1720   __ get_dispatch();
1721 
1722   // Entry point for exceptions thrown within interpreter code
1723   Interpreter::_throw_exception_entry = __ pc();
1724   __ reg_printf("throw_exception_entry\n");
1725   // If we came here via a NullPointerException on the receiver of a
1726   // method, rmethod may be corrupt.
1727   __ get_method(rmethod);
1728   // expression stack is undefined here
1729   // r0: exception
1730   // rbcp: exception bcp
1731   __ verify_oop(r0);
1732   __ mov(c_rarg1, r0);
1733 
1734   // expression stack must be empty before entering the VM in case of
1735   // an exception
1736   __ empty_expression_stack();
1737   // find exception handler address and preserve exception oop
1738   __ call_VM(r3,
1739              CAST_FROM_FN_PTR(address,
1740                           InterpreterRuntime::exception_handler_for_exception),
1741              c_rarg1);
1742 
1743   // Calculate stack limit
1744   /*__ ldr(rscratch1, Address(rmethod, Method::const_offset()));
1745   __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
1746   __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4);
1747   __ ldr(rscratch2,
1748          Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
1749   __ sub(rscratch1, rscratch2, rscratch1, lsl(2));
1750   __ bic(sp, rscratch1, 0xf);*/
1751   // Don't do this as we don't have a stack pointer
1752 
1753   // r0: exception handler entry point
1754   // r3: preserved exception oop
1755   // rbcp: bcp for exception handler
1756   __ push_ptr(r3); // push exception which is now the only value on the stack
1757   __ b(r0); // jump to exception handler (may be _remove_activation_entry!)
1758 
1759   // If the exception is not handled in the current frame the frame is
1760   // removed and the exception is rethrown (i.e. exception
1761   // continuation is _rethrow_exception).
1762   //
1763   // Note: At this point the bci is still the bxi for the instruction
1764   // which caused the exception and the expression stack is
1765   // empty. Thus, for any VM calls at this point, GC will find a legal
1766   // oop map (with empty expression stack).
1767 
1768   //
1769   // JVMTI PopFrame support
1770   //
1771 
1772   Interpreter::_remove_activation_preserving_args_entry = __ pc();
1773   __ print_method_exit(false);
1774   __ reg_printf("remove_activation_preserving_args_entry\n");
1775   __ empty_expression_stack();
1776   // Set the popframe_processing bit in pending_popframe_condition
1777   // indicating that we are currently handling popframe, so that
1778   // call_VMs that may happen later do not trigger new popframe
1779   // handling cycles.
1780   __ ldr(r3, Address(rthread, JavaThread::popframe_condition_offset()));
1781   __ orr(r3, r3, JavaThread::popframe_processing_bit);
1782   __ str(r3, Address(rthread, JavaThread::popframe_condition_offset()));
1783 
1784   {
1785     // Check to see whether we are returning to a deoptimized frame.
1786     // (The PopFrame call ensures that the caller of the popped frame is
1787     // either interpreted or compiled and deoptimizes it if compiled.)
1788     // In this case, we can't call dispatch_next() after the frame is
1789     // popped, but instead must save the incoming arguments and restore
1790     // them after deoptimization has occurred.
1791     //
1792     // Note that we don't compare the return PC against the
1793     // deoptimization blob's unpack entry because of the presence of
1794     // adapter frames in C2.
1795     Label caller_not_deoptimized;
1796     __ ldr(c_rarg1, Address(rfp, frame::return_addr_offset * wordSize));
1797     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1798                                InterpreterRuntime::interpreter_contains), c_rarg1);
1799     __ cbnz(r0, caller_not_deoptimized);
1800 
1801     // Compute size of arguments for saving when returning to
1802     // deoptimized caller
1803     __ get_method(r0);
1804     __ ldr(r0, Address(r0, Method::const_offset()));
1805     __ load_unsigned_short(r0, Address(r0, in_bytes(ConstMethod::
1806                                                     size_of_parameters_offset())));
1807     __ lsl(r0, r0, Interpreter::logStackElementSize);
1808     __ restore_locals(); // XXX do we need this?
1809     __ sub(rlocals, rlocals, r0);
1810     __ add(rlocals, rlocals, wordSize);
1811     // Save these arguments
1812     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1813                                            Deoptimization::
1814                                            popframe_preserve_args),
1815                           rthread, r0, rlocals);
1816 
1817     __ remove_activation(vtos,
1818                          /* throw_monitor_exception */ false,
1819                          /* install_monitor_exception */ false,
1820                          /* notify_jvmdi */ false);
1821 
1822     // Inform deoptimization that it is responsible for restoring
1823     // these arguments
1824     __ mov(rscratch1, JavaThread::popframe_force_deopt_reexecution_bit);
1825     __ str(rscratch1, Address(rthread, JavaThread::popframe_condition_offset()));
1826 
1827     // Continue in deoptimization handler
1828     __ b(lr);
1829 
1830     __ bind(caller_not_deoptimized);
1831   }
1832 
1833   __ remove_activation(vtos,
1834                        /* throw_monitor_exception */ false,
1835                        /* install_monitor_exception */ false,
1836                        /* notify_jvmdi */ false);
1837 
1838   // Restore the last_sp and null it out
1839   __ ldr(sp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
1840   __ mov(rscratch1, 0);
1841   __ str(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
1842   // remove_activation restores sp?
1843 
1844   __ restore_bcp();
1845   __ restore_locals();
1846   __ restore_constant_pool_cache();
1847   __ get_method(rmethod);
1848   __ get_dispatch();
1849 
1850   // The method data pointer was incremented already during
1851   // call profiling. We have to restore the mdp for the current bcp.
1852   if (ProfileInterpreter) {
1853     __ set_method_data_pointer_for_bcp();
1854   }
1855 
1856   // Clear the popframe condition flag
1857   __ mov(rscratch1, JavaThread::popframe_inactive);
1858   __ str(rscratch1, Address(rthread, JavaThread::popframe_condition_offset()));
1859   assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive");
1860 
1861 #if INCLUDE_JVMTI
1862   {
1863     Label L_done;
1864     __ ldrb(rscratch1, Address(rbcp, 0));
1865     __ cmp(rscratch1, Bytecodes::_invokestatic);
1866     __ b(L_done, Assembler::EQ);
1867 
1868     // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1869     // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
1870 
1871     __ ldr(c_rarg0, Address(rlocals, 0));
1872     __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), c_rarg0, rmethod, rbcp);
1873 
1874     __ cbz(r0, L_done);
1875 
1876     __ str(r0, Address(sp, 0));
1877     __ bind(L_done);
1878   }
1879 #endif // INCLUDE_JVMTI
1880 
1881   // Restore machine SP
1882   /*__ ldr(rscratch1, Address(rmethod, Method::const_offset()));
1883   __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
1884   __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4);
1885   __ ldr(rscratch2,
1886          Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
1887   __ sub(rscratch1, rscratch2, rscratch1, lsl(2));
1888   __ bic(sp, rscratch1, 0xf);*/
1889 
1890   __ dispatch_next(vtos);
1891   // end of PopFrame support
1892 
1893   Interpreter::_remove_activation_entry = __ pc();
1894   __ print_method_exit(false);
1895   __ reg_printf("remove_activation_entry\n");
1896 
1897   // preserve exception over this code sequence
1898   __ pop_ptr(r0);
1899   __ str(r0, Address(rthread, JavaThread::vm_result_offset()));
1900   // remove the activation (without doing throws on illegalMonitorExceptions)
1901   __ remove_activation(vtos, false, true, false);
1902   // restore exception
1903   // restore exception
1904   __ get_vm_result(r0, rthread);
1905 
1906   // In between activations - previous activation type unknown yet
1907   // compute continuation point - the continuation point expects the
1908   // following registers set up:
1909   //
1910   // r0: exception
1911   // lr: return address/pc that threw exception
1912   // rsp: expression stack of caller
1913   // rfp: fp of caller
1914   // FIXME: There's no point saving LR here because VM calls don't trash it
1915   __ strd(r0, lr, Address(__ pre(sp, -2 * wordSize)));  // save exception & return address
1916   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1917                           SharedRuntime::exception_handler_for_return_address),
1918                         rthread, lr);
1919   __ mov(r1, r0);                               // save exception handler
1920   __ ldrd(r0, lr, Address(__ post(sp, 2 * wordSize)));  // restore exception & return address
1921   // We might be returning to a deopt handler that expects r3 to
1922   // contain the exception pc
1923   __ mov(r3, lr);
1924   // Note that an "issuing PC" is actually the next PC after the call
1925   __ b(r1);                                    // jump to exception
1926                                                 // handler of caller
1927 }
1928 
1929 
1930 //
1931 // JVMTI ForceEarlyReturn support
1932 //
1933 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1934   address entry = __ pc();
1935   __ restore_bcp();
1936   __ restore_locals();
1937   __ empty_expression_stack();
1938   __ load_earlyret_value(state);
1939 
1940   __ ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));
1941   Address cond_addr(rscratch1, JvmtiThreadState::earlyret_state_offset());
1942 
1943   // Clear the earlyret state
1944   assert(JvmtiThreadState::earlyret_inactive == 0, "should be");
1945   __ mov(rscratch2, 0);
1946   __ str(rscratch2, cond_addr);
1947 
1948   __ remove_activation(state,
1949                        false, /* throw_monitor_exception */
1950                        false, /* install_monitor_exception */
1951                        true); /* notify_jvmdi */
1952   __ b(lr);
1953 
1954   return entry;
1955 } // end of ForceEarlyReturn support
1956 
1957 
1958 
1959 //-----------------------------------------------------------------------------
1960 // Helper for vtos entry point generation
1961 
1962 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
1963                                                          address& bep,
1964                                                          address& cep,
1965                                                          address& sep,
1966                                                          address& aep,
1967                                                          address& iep,
1968                                                          address& lep,
1969                                                          address& fep,
1970                                                          address& dep,
1971                                                          address& vep) {
1972   assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1973   Label L;
1974   aep = __ pc();  __ push_ptr();  __ b(L);
1975   fep = __ pc();  __ push_f();    __ b(L);
1976   dep = __ pc();  __ push_d();    __ b(L);
1977   lep = __ pc();  __ push_l();    __ b(L);
1978   bep = cep = sep =
1979   iep = __ pc();  __ push_i();
1980   vep = __ pc();
1981   __ bind(L);
1982   generate_and_dispatch(t);
1983 }
1984 
1985 //-----------------------------------------------------------------------------
1986 // Generation of individual instructions
1987 
1988 // helpers for generate_and_dispatch
1989 
1990 
1991 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
1992   : TemplateInterpreterGenerator(code) {
1993    generate_all(); // down here so it can be "virtual"
1994 }
1995 
1996 //-----------------------------------------------------------------------------
1997 
1998 // Non-product code
1999 #ifndef PRODUCT
2000 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
2001   address entry = __ pc();
2002 
2003   __ push(state);
2004   // Save all registers on stack, so omit SP and PC
2005   __ push(RegSet::range(r0, r12) + lr, sp);
2006   __ mov(c_rarg2, r0);  // Pass itos
2007   __ mov(c_rarg3, r1);  // Pass ltos/dtos high part
2008   __ call_VM(noreg,
2009              CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
2010              c_rarg1, c_rarg2, c_rarg3);
2011   __ pop(RegSet::range(r0, r12) + lr, sp);
2012   __ pop(state);
2013   __ b(lr);                                   // return from result handler
2014 
2015   return entry;
2016 }
2017 
2018 void TemplateInterpreterGenerator::count_bytecode() {
2019   __ push(c_rarg0);
2020   __ push(rscratch1);
2021   __ push(rscratch2);
2022   Label L;
2023   __ mov(rscratch2, (address) &BytecodeCounter::_counter_value);
2024   __ bind(L);
2025   __ ldrex(rscratch1, rscratch2);
2026   __ add(rscratch1, rscratch1, 1);
2027   // strex stores 2nd arg to dest adressed by 3rd arg,
2028   // stores status to 1st arg. So, 1st and 2nd shoud be different.
2029   __ strex(c_rarg0, rscratch1, rscratch2);
2030   __ cmp(c_rarg0, 0);
2031   __ b(L, Assembler::NE);
2032   __ pop(rscratch2);
2033   __ pop(rscratch1);
2034   __ pop(c_rarg0);
2035 }
2036 
2037 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { ; }
2038 
2039 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { ; }
2040 
2041 
2042 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
2043   // Call a little run-time stub to avoid blow-up for each bytecode.
2044   // The run-time runtime saves the right registers, depending on
2045   // the tosca in-state for the given template.
2046 
2047   assert(Interpreter::trace_code(t->tos_in()) != NULL,
2048          "entry must have been generated");
2049   __ bl(Interpreter::trace_code(t->tos_in()));
2050 }
2051 
2052 
2053 void TemplateInterpreterGenerator::stop_interpreter_at() {
2054   Label L;
2055   __ push(rscratch1);
2056   __ mov(rscratch1, (address) &BytecodeCounter::_counter_value);
2057   __ ldr(rscratch1, Address(rscratch1));
2058   __ mov(rscratch2, StopInterpreterAt);
2059   __ cmp(rscratch1, rscratch2);
2060   __ b(L, Assembler::NE);
2061   __ bkpt(0);
2062   __ bind(L);
2063   __ pop(rscratch1);
2064 }
2065 
2066 #endif // !PRODUCT
2067 #endif // ! CC_INTERP