1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "interpreter/bytecodeHistogram.hpp"
  29 #include "interpreter/interp_masm.hpp"
  30 #include "interpreter/interpreter.hpp"
  31 #include "interpreter/interpreterRuntime.hpp"
  32 #include "interpreter/templateInterpreterGenerator.hpp"
  33 #include "interpreter/templateTable.hpp"
  34 #include "oops/arrayOop.hpp"
  35 #include "oops/methodData.hpp"
  36 #include "oops/method.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "prims/jvmtiExport.hpp"
  39 #include "prims/jvmtiThreadState.hpp"
  40 #include "prims/methodHandles.hpp"
  41 #include "runtime/arguments.hpp"
  42 #include "runtime/deoptimization.hpp"
  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/stubRoutines.hpp"
  46 #include "runtime/synchronizer.hpp"
  47 #include "runtime/timer.hpp"
  48 #include "runtime/vframeArray.hpp"
  49 #include "utilities/align.hpp"
  50 #include "utilities/debug.hpp"
  51 #include "utilities/macros.hpp"
  52 
  53 // Size of interpreter code.  Increase if too small.  Interpreter will
  54 // fail with a guarantee ("not enough space for interpreter generation");
  55 // if too small.
  56 // Run with +PrintInterpreter to get the VM to print out the size.
  57 // Max size with JVMTI
  58 int TemplateInterpreter::InterpreterCodeSize = 180 * 1024;
  59 
  60 #define __ _masm->
  61 
  62 //------------------------------------------------------------------------------------------------------------------------
  63 
  64 address TemplateInterpreterGenerator::generate_slow_signature_handler() {
  65   address entry = __ pc();
  66 
  67   // callee-save register for saving LR, shared with generate_native_entry
  68   const Register Rsaved_ret_addr = AARCH64_ONLY(R21) NOT_AARCH64(Rtmp_save0);
  69 
  70   __ mov(Rsaved_ret_addr, LR);
  71 
  72   __ mov(R1, Rmethod);
  73   __ mov(R2, Rlocals);
  74   __ mov(R3, SP);
  75 
  76 #ifdef AARCH64
  77   // expand expr. stack and extended SP to avoid cutting SP in call_VM
  78   __ mov(Rstack_top, SP);
  79   __ str(Rstack_top, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
  80   __ check_stack_top();
  81 
  82   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), R1, R2, R3, false);
  83 
  84   __ ldp(ZR,      c_rarg1, Address(SP, 2*wordSize, post_indexed));
  85   __ ldp(c_rarg2, c_rarg3, Address(SP, 2*wordSize, post_indexed));
  86   __ ldp(c_rarg4, c_rarg5, Address(SP, 2*wordSize, post_indexed));
  87   __ ldp(c_rarg6, c_rarg7, Address(SP, 2*wordSize, post_indexed));
  88 
  89   __ ldp_d(V0, V1, Address(SP, 2*wordSize, post_indexed));
  90   __ ldp_d(V2, V3, Address(SP, 2*wordSize, post_indexed));
  91   __ ldp_d(V4, V5, Address(SP, 2*wordSize, post_indexed));
  92   __ ldp_d(V6, V7, Address(SP, 2*wordSize, post_indexed));
  93 #else
  94 
  95   // Safer to save R9 (when scratched) since callers may have been
  96   // written assuming R9 survives. This is suboptimal but
  97   // probably not important for this slow case call site.
  98   // Note for R9 saving: slow_signature_handler may copy register
  99   // arguments above the current SP (passed as R3). It is safe for
 100   // call_VM to use push and pop to protect additional values on the
 101   // stack if needed.
 102   __ call_VM(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), true /* save R9 if needed*/);
 103   __ add(SP, SP, wordSize);     // Skip R0
 104   __ pop(RegisterSet(R1, R3));  // Load arguments passed in registers
 105 #ifdef __ABI_HARD__
 106   // Few alternatives to an always-load-FP-registers approach:
 107   // - parse method signature to detect FP arguments
 108   // - keep a counter/flag on a stack indicationg number of FP arguments in the method.
 109   // The later has been originally implemented and tested but a conditional path could
 110   // eliminate any gain imposed by avoiding 8 double word loads.
 111   __ fldmiad(SP, FloatRegisterSet(D0, 8), writeback);
 112 #endif // __ABI_HARD__
 113 #endif // AARCH64
 114 
 115   __ ret(Rsaved_ret_addr);
 116 
 117   return entry;
 118 }
 119 
 120 
 121 //
 122 // Various method entries (that c++ and asm interpreter agree upon)
 123 //------------------------------------------------------------------------------------------------------------------------
 124 //
 125 //
 126 
 127 // Abstract method entry
 128 // Attempt to execute abstract method. Throw exception
 129 address TemplateInterpreterGenerator::generate_abstract_entry(void) {
 130   address entry_point = __ pc();
 131 
 132 #ifdef AARCH64
 133   __ restore_sp_after_call(Rtemp);
 134   __ restore_stack_top();
 135 #endif
 136 
 137   __ empty_expression_stack();
 138 
 139   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
 140 
 141   DEBUG_ONLY(STOP("generate_abstract_entry");) // Should not reach here
 142   return entry_point;
 143 }
 144 
 145 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
 146   if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
 147 
 148   // TODO: ARM
 149   return NULL;
 150 
 151   address entry_point = __ pc();
 152   STOP("generate_math_entry");
 153   return entry_point;
 154 }
 155 
 156 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
 157   address entry = __ pc();
 158 
 159   // Note: There should be a minimal interpreter frame set up when stack
 160   // overflow occurs since we check explicitly for it now.
 161   //
 162 #ifdef ASSERT
 163   { Label L;
 164     __ sub(Rtemp, FP, - frame::interpreter_frame_monitor_block_top_offset * wordSize);
 165     __ cmp(SP, Rtemp);  // Rtemp = maximal SP for current FP,
 166                         //  (stack grows negative)
 167     __ b(L, ls); // check if frame is complete
 168     __ stop ("interpreter frame not set up");
 169     __ bind(L);
 170   }
 171 #endif // ASSERT
 172 
 173   // Restore bcp under the assumption that the current frame is still
 174   // interpreted
 175   __ restore_bcp();
 176 
 177   // expression stack must be empty before entering the VM if an exception
 178   // happened
 179   __ empty_expression_stack();
 180 
 181   // throw exception
 182   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
 183 
 184   __ should_not_reach_here();
 185 
 186   return entry;
 187 }
 188 
 189 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() {
 190   address entry = __ pc();
 191 
 192   // index is in R4_ArrayIndexOutOfBounds_index
 193 
 194   // expression stack must be empty before entering the VM if an exception happened
 195   __ empty_expression_stack();
 196 
 197   // setup parameters
 198   // Array expected in R1.
 199   __ mov(R2, R4_ArrayIndexOutOfBounds_index);
 200 
 201   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), R1, R2);
 202 
 203   __ nop(); // to avoid filling CPU pipeline with invalid instructions
 204   __ nop();
 205   __ should_not_reach_here();
 206 
 207   return entry;
 208 }
 209 
 210 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
 211   address entry = __ pc();
 212 
 213   // object is in R2_ClassCastException_obj
 214 
 215   // expression stack must be empty before entering the VM if an exception
 216   // happened
 217   __ empty_expression_stack();
 218 
 219   __ mov(R1, R2_ClassCastException_obj);
 220   __ call_VM(noreg,
 221              CAST_FROM_FN_PTR(address,
 222                               InterpreterRuntime::throw_ClassCastException),
 223              R1);
 224 
 225   __ should_not_reach_here();
 226 
 227   return entry;
 228 }
 229 
 230 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
 231   assert(!pass_oop || message == NULL, "either oop or message but not both");
 232   address entry = __ pc();
 233 
 234   InlinedString Lname(name);
 235   InlinedString Lmessage(message);
 236 
 237   if (pass_oop) {
 238     // object is at TOS
 239     __ pop_ptr(R2);
 240   }
 241 
 242   // expression stack must be empty before entering the VM if an exception happened
 243   __ empty_expression_stack();
 244 
 245   // setup parameters
 246   __ ldr_literal(R1, Lname);
 247 
 248   if (pass_oop) {
 249     __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), R1, R2);
 250   } else {
 251     if (message != NULL) {
 252       __ ldr_literal(R2, Lmessage);
 253     } else {
 254       __ mov(R2, 0);
 255     }
 256     __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), R1, R2);
 257   }
 258 
 259   // throw exception
 260   __ b(Interpreter::throw_exception_entry());
 261 
 262   __ nop(); // to avoid filling CPU pipeline with invalid instructions
 263   __ nop();
 264   __ bind_literal(Lname);
 265   if (!pass_oop && (message != NULL)) {
 266     __ bind_literal(Lmessage);
 267   }
 268 
 269   return entry;
 270 }
 271 
 272 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
 273   address entry = __ pc();
 274 
 275   __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
 276 
 277 #ifdef AARCH64
 278   __ restore_sp_after_call(Rtemp);  // Restore SP to extended SP
 279   __ restore_stack_top();
 280 #else
 281   // Restore stack bottom in case i2c adjusted stack
 282   __ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
 283   // and NULL it as marker that SP is now tos until next java call
 284   __ mov(Rtemp, (int)NULL_WORD);
 285   __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
 286 #endif // AARCH64
 287 
 288   __ restore_method();
 289   __ restore_bcp();
 290   __ restore_dispatch();
 291   __ restore_locals();
 292 
 293   const Register Rcache = R2_tmp;
 294   const Register Rindex = R3_tmp;
 295   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size);
 296 
 297   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
 298   __ ldrb(Rtemp, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
 299   __ check_stack_top();
 300   __ add(Rstack_top, Rstack_top, AsmOperand(Rtemp, lsl, Interpreter::logStackElementSize));
 301 
 302 #ifndef AARCH64
 303   __ convert_retval_to_tos(state);
 304 #endif // !AARCH64
 305 
 306  __ check_and_handle_popframe();
 307  __ check_and_handle_earlyret();
 308 
 309   __ dispatch_next(state, step);
 310 
 311   return entry;
 312 }
 313 
 314 
 315 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) {
 316   address entry = __ pc();
 317 
 318   __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
 319 
 320 #ifdef AARCH64
 321   __ restore_sp_after_call(Rtemp);  // Restore SP to extended SP
 322   __ restore_stack_top();
 323 #else
 324   // The stack is not extended by deopt but we must NULL last_sp as this
 325   // entry is like a "return".
 326   __ mov(Rtemp, 0);
 327   __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
 328 #endif // AARCH64
 329 
 330   __ restore_method();
 331   __ restore_bcp();
 332   __ restore_dispatch();
 333   __ restore_locals();
 334 
 335   // handle exceptions
 336   { Label L;
 337     __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset()));
 338     __ cbz(Rtemp, L);
 339     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
 340     __ should_not_reach_here();
 341     __ bind(L);
 342   }
 343 
 344   if (continuation == NULL) {
 345     __ dispatch_next(state, step);
 346   } else {
 347     __ jump_to_entry(continuation);
 348   }
 349 
 350   return entry;
 351 }
 352 
 353 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
 354   address entry = __ pc();
 355   switch (type) {
 356     case T_BOOLEAN: __ c2bool(R0); break;
 357     case T_CHAR   : AARCH64_ONLY(__ zero_extend(R0, R0, 16);)  break;
 358     case T_BYTE   : AARCH64_ONLY(__ sign_extend(R0, R0,  8);)  break;
 359     case T_SHORT  : AARCH64_ONLY(__ sign_extend(R0, R0, 16);)  break;
 360     case T_INT    : // fall through
 361     case T_LONG   : // fall through
 362     case T_VOID   : // fall through
 363     case T_FLOAT  : // fall through
 364     case T_DOUBLE : /* nothing to do */          break;
 365     case T_OBJECT :
 366       // retrieve result from frame
 367       __ ldr(R0, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize));
 368       // and verify it
 369       __ verify_oop(R0);
 370       break;
 371     default       : __ should_not_reach_here(); break;
 372   }
 373   __ ret();
 374   return entry;
 375 }
 376 
 377 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
 378   address entry = __ pc();
 379   __ push(state);
 380   __ call_VM(noreg, runtime_entry);
 381 
 382   // load current bytecode
 383   __ ldrb(R3_bytecode, Address(Rbcp));
 384   __ dispatch_only_normal(vtos);
 385   return entry;
 386 }
 387 
 388 
 389 // Helpers for commoning out cases in the various type of method entries.
 390 //
 391 
 392 // increment invocation count & check for overflow
 393 //
 394 // Note: checking for negative value instead of overflow
 395 //       so we have a 'sticky' overflow test
 396 //
 397 // In: Rmethod.
 398 //
 399 // Uses R0, R1, Rtemp.
 400 //
 401 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow,
 402                                                  Label* profile_method,
 403                                                  Label* profile_method_continue) {
 404   Label done;
 405   const Register Rcounters = Rtemp;
 406   const Address invocation_counter(Rcounters,
 407                 MethodCounters::invocation_counter_offset() +
 408                 InvocationCounter::counter_offset());
 409 
 410   // Note: In tiered we increment either counters in MethodCounters* or
 411   // in MDO depending if we're profiling or not.
 412   if (TieredCompilation) {
 413     int increment = InvocationCounter::count_increment;
 414     Label no_mdo;
 415     if (ProfileInterpreter) {
 416       // Are we profiling?
 417       __ ldr(R1_tmp, Address(Rmethod, Method::method_data_offset()));
 418       __ cbz(R1_tmp, no_mdo);
 419       // Increment counter in the MDO
 420       const Address mdo_invocation_counter(R1_tmp,
 421                     in_bytes(MethodData::invocation_counter_offset()) +
 422                     in_bytes(InvocationCounter::counter_offset()));
 423       const Address mask(R1_tmp, in_bytes(MethodData::invoke_mask_offset()));
 424       __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, R0_tmp, Rtemp, eq, overflow);
 425       __ b(done);
 426     }
 427     __ bind(no_mdo);
 428     __ get_method_counters(Rmethod, Rcounters, done);
 429     const Address mask(Rcounters, in_bytes(MethodCounters::invoke_mask_offset()));
 430     __ increment_mask_and_jump(invocation_counter, increment, mask, R0_tmp, R1_tmp, eq, overflow);
 431     __ bind(done);
 432   } else { // not TieredCompilation
 433     const Address backedge_counter(Rcounters,
 434                   MethodCounters::backedge_counter_offset() +
 435                   InvocationCounter::counter_offset());
 436 
 437     const Register Ricnt = R0_tmp;  // invocation counter
 438     const Register Rbcnt = R1_tmp;  // backedge counter
 439 
 440     __ get_method_counters(Rmethod, Rcounters, done);
 441 
 442     if (ProfileInterpreter) {
 443       const Register Riic = R1_tmp;
 444       __ ldr_s32(Riic, Address(Rcounters, MethodCounters::interpreter_invocation_counter_offset()));
 445       __ add(Riic, Riic, 1);
 446       __ str_32(Riic, Address(Rcounters, MethodCounters::interpreter_invocation_counter_offset()));
 447     }
 448 
 449     // Update standard invocation counters
 450 
 451     __ ldr_u32(Ricnt, invocation_counter);
 452     __ ldr_u32(Rbcnt, backedge_counter);
 453 
 454     __ add(Ricnt, Ricnt, InvocationCounter::count_increment);
 455 
 456 #ifdef AARCH64
 457     __ andr(Rbcnt, Rbcnt, (unsigned int)InvocationCounter::count_mask_value); // mask out the status bits
 458 #else
 459     __ bic(Rbcnt, Rbcnt, ~InvocationCounter::count_mask_value); // mask out the status bits
 460 #endif // AARCH64
 461 
 462     __ str_32(Ricnt, invocation_counter);            // save invocation count
 463     __ add(Ricnt, Ricnt, Rbcnt);                     // add both counters
 464 
 465     // profile_method is non-null only for interpreted method so
 466     // profile_method != NULL == !native_call
 467     // BytecodeInterpreter only calls for native so code is elided.
 468 
 469     if (ProfileInterpreter && profile_method != NULL) {
 470       assert(profile_method_continue != NULL, "should be non-null");
 471 
 472       // Test to see if we should create a method data oop
 473       // Reuse R1_tmp as we don't need backedge counters anymore.
 474       Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
 475       __ ldr_s32(R1_tmp, profile_limit);
 476       __ cmp_32(Ricnt, R1_tmp);
 477       __ b(*profile_method_continue, lt);
 478 
 479       // if no method data exists, go to profile_method
 480       __ test_method_data_pointer(R1_tmp, *profile_method);
 481     }
 482 
 483     Address invoke_limit(Rcounters, in_bytes(MethodCounters::interpreter_invocation_limit_offset()));
 484     __ ldr_s32(R1_tmp, invoke_limit);
 485     __ cmp_32(Ricnt, R1_tmp);
 486     __ b(*overflow, hs);
 487     __ bind(done);
 488   }
 489 }
 490 
 491 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
 492   // InterpreterRuntime::frequency_counter_overflow takes one argument
 493   // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
 494   // The call returns the address of the verified entry point for the method or NULL
 495   // if the compilation did not complete (either went background or bailed out).
 496   __ mov(R1, (int)false);
 497   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
 498 
 499   // jump to the interpreted entry.
 500   __ b(do_continue);
 501 }
 502 
 503 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
 504   // Check if we've got enough room on the stack for
 505   //  - overhead;
 506   //  - locals;
 507   //  - expression stack.
 508   //
 509   // Registers on entry:
 510   //
 511   // R3 = number of additional locals
 512   // R11 = max expression stack slots (AArch64 only)
 513   // Rthread
 514   // Rmethod
 515   // Registers used: R0, R1, R2, Rtemp.
 516 
 517   const Register Radditional_locals = R3;
 518   const Register RmaxStack = AARCH64_ONLY(R11) NOT_AARCH64(R2);
 519 
 520   // monitor entry size
 521   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
 522 
 523   // total overhead size: entry_size + (saved registers, thru expr stack bottom).
 524   // be sure to change this if you add/subtract anything to/from the overhead area
 525   const int overhead_size = (frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset)*wordSize + entry_size;
 526 
 527   // Pages reserved for VM runtime calls and subsequent Java calls.
 528   const int reserved_pages = JavaThread::stack_shadow_zone_size();
 529 
 530   // Thread::stack_size() includes guard pages, and they should not be touched.
 531   const int guard_pages = JavaThread::stack_guard_zone_size();
 532 
 533   __ ldr(R0, Address(Rthread, Thread::stack_base_offset()));
 534   __ ldr(R1, Address(Rthread, Thread::stack_size_offset()));
 535 #ifndef AARCH64
 536   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
 537   __ ldrh(RmaxStack, Address(Rtemp, ConstMethod::max_stack_offset()));
 538 #endif // !AARCH64
 539   __ sub_slow(Rtemp, SP, overhead_size + reserved_pages + guard_pages + Method::extra_stack_words());
 540 
 541   // reserve space for additional locals
 542   __ sub(Rtemp, Rtemp, AsmOperand(Radditional_locals, lsl, Interpreter::logStackElementSize));
 543 
 544   // stack size
 545   __ sub(R0, R0, R1);
 546 
 547   // reserve space for expression stack
 548   __ sub(Rtemp, Rtemp, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize));
 549 
 550   __ cmp(Rtemp, R0);
 551 
 552 #ifdef AARCH64
 553   Label L;
 554   __ b(L, hi);
 555   __ mov(SP, Rsender_sp);  // restore SP
 556   __ b(StubRoutines::throw_StackOverflowError_entry());
 557   __ bind(L);
 558 #else
 559   __ mov(SP, Rsender_sp, ls);  // restore SP
 560   __ b(StubRoutines::throw_StackOverflowError_entry(), ls);
 561 #endif // AARCH64
 562 }
 563 
 564 
 565 // Allocate monitor and lock method (asm interpreter)
 566 //
 567 void TemplateInterpreterGenerator::lock_method() {
 568   // synchronize method
 569 
 570   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
 571   assert ((entry_size % StackAlignmentInBytes) == 0, "should keep stack alignment");
 572 
 573   #ifdef ASSERT
 574     { Label L;
 575       __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
 576       __ tbnz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L);
 577       __ stop("method doesn't need synchronization");
 578       __ bind(L);
 579     }
 580   #endif // ASSERT
 581 
 582   // get synchronization object
 583   { Label done;
 584     __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
 585 #ifdef AARCH64
 586     __ ldr(R0, Address(Rlocals, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case)
 587     __ tbz(Rtemp, JVM_ACC_STATIC_BIT, done);
 588 #else
 589     __ tst(Rtemp, JVM_ACC_STATIC);
 590     __ ldr(R0, Address(Rlocals, Interpreter::local_offset_in_bytes(0)), eq); // get receiver (assume this is frequent case)
 591     __ b(done, eq);
 592 #endif // AARCH64
 593     __ load_mirror(R0, Rmethod, Rtemp);
 594     __ bind(done);
 595   }
 596 
 597   // add space for monitor & lock
 598 
 599 #ifdef AARCH64
 600   __ check_extended_sp(Rtemp);
 601   __ sub(SP, SP, entry_size);                  // adjust extended SP
 602   __ mov(Rtemp, SP);
 603   __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
 604 #endif // AARCH64
 605 
 606   __ sub(Rstack_top, Rstack_top, entry_size);
 607   __ check_stack_top_on_expansion();
 608                                               // add space for a monitor entry
 609   __ str(Rstack_top, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
 610                                               // set new monitor block top
 611   __ str(R0, Address(Rstack_top, BasicObjectLock::obj_offset_in_bytes()));
 612                                               // store object
 613   __ mov(R1, Rstack_top);                     // monitor entry address
 614   __ lock_object(R1);
 615 }
 616 
 617 #ifdef AARCH64
 618 
 619 //
 620 // Generate a fixed interpreter frame. This is identical setup for interpreted methods
 621 // and for native methods hence the shared code.
 622 //
 623 // On entry:
 624 //   R10 = ConstMethod
 625 //   R11 = max expr. stack (in slots), if !native_call
 626 //
 627 // On exit:
 628 //   Rbcp, Rstack_top are initialized, SP is extended
 629 //
 630 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 631   // Incoming registers
 632   const Register RconstMethod = R10;
 633   const Register RmaxStack = R11;
 634   // Temporary registers
 635   const Register RextendedSP = R0;
 636   const Register Rcache = R1;
 637   const Register Rmdp = ProfileInterpreter ? R2 : ZR;
 638 
 639   // Generates the following stack layout (stack grows up in this picture):
 640   //
 641   // [ expr. stack bottom ]
 642   // [ saved Rbcp         ]
 643   // [ current Rlocals    ]
 644   // [ cache              ]
 645   // [ mdx                ]
 646   // [ mirror             ]
 647   // [ Method*            ]
 648   // [ extended SP        ]
 649   // [ expr. stack top    ]
 650   // [ sender_sp          ]
 651   // [ saved FP           ] <--- FP
 652   // [ saved LR           ]
 653 
 654   // initialize fixed part of activation frame
 655   __ stp(FP, LR, Address(SP, -2*wordSize, pre_indexed));
 656   __ mov(FP, SP);                                     // establish new FP
 657 
 658   // setup Rbcp
 659   if (native_call) {
 660     __ mov(Rbcp, ZR);                                 // bcp = 0 for native calls
 661   } else {
 662     __ add(Rbcp, RconstMethod, in_bytes(ConstMethod::codes_offset())); // get codebase
 663   }
 664 
 665   // Rstack_top & RextendedSP
 666   __ sub(Rstack_top, SP, 10*wordSize);
 667   if (native_call) {
 668     __ sub(RextendedSP, Rstack_top, align_up(wordSize, StackAlignmentInBytes));    // reserve 1 slot for exception handling
 669   } else {
 670     __ sub(RextendedSP, Rstack_top, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize));
 671     __ align_reg(RextendedSP, RextendedSP, StackAlignmentInBytes);
 672   }
 673   __ mov(SP, RextendedSP);
 674   __ check_stack_top();
 675 
 676   // Load Rmdp
 677   if (ProfileInterpreter) {
 678     __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
 679     __ tst(Rtemp, Rtemp);
 680     __ add(Rtemp, Rtemp, in_bytes(MethodData::data_offset()));
 681     __ csel(Rmdp, ZR, Rtemp, eq);
 682   }
 683 
 684   // Load Rcache
 685   __ ldr(Rtemp, Address(RconstMethod, ConstMethod::constants_offset()));
 686   __ ldr(Rcache, Address(Rtemp, ConstantPool::cache_offset_in_bytes()));
 687   // Get mirror and store it in the frame as GC root for this Method*
 688   __ load_mirror(Rtemp, Rmethod, Rtemp);
 689 
 690   // Build fixed frame
 691   __ stp(Rstack_top, Rbcp, Address(FP, -10*wordSize));
 692   __ stp(Rlocals, Rcache,  Address(FP,  -8*wordSize));
 693   __ stp(Rmdp, Rtemp,          Address(FP,  -6*wordSize));
 694   __ stp(Rmethod, RextendedSP, Address(FP,  -4*wordSize));
 695   __ stp(ZR, Rsender_sp,   Address(FP,  -2*wordSize));
 696   assert(frame::interpreter_frame_initial_sp_offset == -10, "interpreter frame broken");
 697   assert(frame::interpreter_frame_stack_top_offset  == -2, "stack top broken");
 698 }
 699 
 700 #else // AARCH64
 701 
 702 //
 703 // Generate a fixed interpreter frame. This is identical setup for interpreted methods
 704 // and for native methods hence the shared code.
 705 
 706 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 707   // Generates the following stack layout:
 708   //
 709   // [ expr. stack bottom ]
 710   // [ saved Rbcp         ]
 711   // [ current Rlocals    ]
 712   // [ cache              ]
 713   // [ mdx                ]
 714   // [ Method*            ]
 715   // [ last_sp            ]
 716   // [ sender_sp          ]
 717   // [ saved FP           ] <--- FP
 718   // [ saved LR           ]
 719 
 720   // initialize fixed part of activation frame
 721   __ push(LR);                                        // save return address
 722   __ push(FP);                                        // save FP
 723   __ mov(FP, SP);                                     // establish new FP
 724 
 725   __ push(Rsender_sp);
 726 
 727   __ mov(R0, 0);
 728   __ push(R0);                                        // leave last_sp as null
 729 
 730   // setup Rbcp
 731   if (native_call) {
 732     __ mov(Rbcp, 0);                                  // bcp = 0 for native calls
 733   } else {
 734     __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); // get ConstMethod*
 735     __ add(Rbcp, Rtemp, ConstMethod::codes_offset()); // get codebase
 736   }
 737 
 738   __ push(Rmethod);                                    // save Method*
 739   // Get mirror and store it in the frame as GC root for this Method*
 740   __ load_mirror(Rtemp, Rmethod, Rtemp);
 741   __ push(Rtemp);
 742 
 743   if (ProfileInterpreter) {
 744     __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
 745     __ tst(Rtemp, Rtemp);
 746     __ add(Rtemp, Rtemp, in_bytes(MethodData::data_offset()), ne);
 747     __ push(Rtemp);                                    // set the mdp (method data pointer)
 748   } else {
 749     __ push(R0);
 750   }
 751 
 752   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
 753   __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
 754   __ ldr(Rtemp, Address(Rtemp, ConstantPool::cache_offset_in_bytes()));
 755   __ push(Rtemp);                                      // set constant pool cache
 756   __ push(Rlocals);                                    // set locals pointer
 757   __ push(Rbcp);                                       // set bcp
 758   __ push(R0);                                         // reserve word for pointer to expression stack bottom
 759   __ str(SP, Address(SP, 0));                          // set expression stack bottom
 760 }
 761 
 762 #endif // AARCH64
 763 
 764 // End of helpers
 765 
 766 //------------------------------------------------------------------------------------------------------------------------
 767 // Entry points
 768 //
 769 // Here we generate the various kind of entries into the interpreter.
 770 // The two main entry type are generic bytecode methods and native call method.
 771 // These both come in synchronized and non-synchronized versions but the
 772 // frame layout they create is very similar. The other method entry
 773 // types are really just special purpose entries that are really entry
 774 // and interpretation all in one. These are for trivial methods like
 775 // accessor, empty, or special math methods.
 776 //
 777 // When control flow reaches any of the entry types for the interpreter
 778 // the following holds ->
 779 //
 780 // Arguments:
 781 //
 782 // Rmethod: Method*
 783 // Rthread: thread
 784 // Rsender_sp:  sender sp
 785 // Rparams (SP on 32-bit ARM): pointer to method parameters
 786 //
 787 // LR: return address
 788 //
 789 // Stack layout immediately at entry
 790 //
 791 // [ optional padding(*)] <--- SP (AArch64)
 792 // [ parameter n        ] <--- Rparams (SP on 32-bit ARM)
 793 //   ...
 794 // [ parameter 1        ]
 795 // [ expression stack   ] (caller's java expression stack)
 796 
 797 // Assuming that we don't go to one of the trivial specialized
 798 // entries the stack will look like below when we are ready to execute
 799 // the first bytecode (or call the native routine). The register usage
 800 // will be as the template based interpreter expects.
 801 //
 802 // local variables follow incoming parameters immediately; i.e.
 803 // the return address is saved at the end of the locals.
 804 //
 805 // [ reserved stack (*) ] <--- SP (AArch64)
 806 // [ expr. stack        ] <--- Rstack_top (SP on 32-bit ARM)
 807 // [ monitor entry      ]
 808 //   ...
 809 // [ monitor entry      ]
 810 // [ expr. stack bottom ]
 811 // [ saved Rbcp         ]
 812 // [ current Rlocals    ]
 813 // [ cache              ]
 814 // [ mdx                ]
 815 // [ mirror             ]
 816 // [ Method*            ]
 817 //
 818 // 32-bit ARM:
 819 // [ last_sp            ]
 820 //
 821 // AArch64:
 822 // [ extended SP (*)    ]
 823 // [ stack top (*)      ]
 824 //
 825 // [ sender_sp          ]
 826 // [ saved FP           ] <--- FP
 827 // [ saved LR           ]
 828 // [ optional padding(*)]
 829 // [ local variable m   ]
 830 //   ...
 831 // [ local variable 1   ]
 832 // [ parameter n        ]
 833 //   ...
 834 // [ parameter 1        ] <--- Rlocals
 835 //
 836 // (*) - AArch64 only
 837 //
 838 
 839 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
 840   // Code: _aload_0, _getfield, _areturn
 841   // parameter size = 1
 842   //
 843   // The code that gets generated by this routine is split into 2 parts:
 844   //    1. The "intrinsified" code performing an ON_WEAK_OOP_REF load,
 845   //    2. The slow path - which is an expansion of the regular method entry.
 846   //
 847   // Notes:-
 848   // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed.
 849   // * We may jump to the slow path iff the receiver is null. If the
 850   //   Reference object is null then we no longer perform an ON_WEAK_OOP_REF load
 851   //   Thus we can use the regular method entry code to generate the NPE.
 852   //
 853   // Rmethod: Method*
 854   // Rthread: thread
 855   // Rsender_sp: sender sp, must be preserved for slow path, set SP to it on fast path
 856   // Rparams: parameters
 857 
 858   address entry = __ pc();
 859   Label slow_path;
 860   const Register Rthis = R0;
 861   const Register Rret_addr = Rtmp_save1;
 862   assert_different_registers(Rthis, Rret_addr, Rsender_sp);
 863 
 864   const int referent_offset = java_lang_ref_Reference::referent_offset;
 865   guarantee(referent_offset > 0, "referent offset not initialized");
 866 
 867   // Check if local 0 != NULL
 868   // If the receiver is null then it is OK to jump to the slow path.
 869   __ ldr(Rthis, Address(Rparams));
 870   __ cbz(Rthis, slow_path);
 871 
 872   // Preserve LR
 873   __ mov(Rret_addr, LR);
 874 
 875   // Load the value of the referent field.
 876   const Address field_address(Rthis, referent_offset);
 877   __ load_heap_oop(R0, field_address, Rtemp, R1_tmp, R2_tmp, ON_WEAK_OOP_REF);
 878 
 879   // _areturn
 880   __ mov(SP, Rsender_sp);
 881   __ ret(Rret_addr);
 882 
 883   // generate a vanilla interpreter entry as the slow path
 884   __ bind(slow_path);
 885   __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
 886   return entry;
 887 }
 888 
 889 // Not supported
 890 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { return NULL; }
 891 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
 892 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
 893 
 894 //
 895 // Interpreter stub for calling a native method. (asm interpreter)
 896 // This sets up a somewhat different looking stack for calling the native method
 897 // than the typical interpreter frame setup.
 898 //
 899 
 900 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
 901   // determine code generation flags
 902   bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
 903 
 904   // Incoming registers:
 905   //
 906   // Rmethod: Method*
 907   // Rthread: thread
 908   // Rsender_sp: sender sp
 909   // Rparams: parameters
 910 
 911   address entry_point = __ pc();
 912 
 913   // Register allocation
 914   const Register Rsize_of_params = AARCH64_ONLY(R20) NOT_AARCH64(R6);
 915   const Register Rsig_handler    = AARCH64_ONLY(R21) NOT_AARCH64(Rtmp_save0 /* R4 */);
 916   const Register Rnative_code    = AARCH64_ONLY(R22) NOT_AARCH64(Rtmp_save1 /* R5 */);
 917   const Register Rresult_handler = AARCH64_ONLY(Rsig_handler) NOT_AARCH64(R6);
 918 
 919 #ifdef AARCH64
 920   const Register RconstMethod = R10; // also used in generate_fixed_frame (should match)
 921   const Register Rsaved_result = Rnative_code;
 922   const FloatRegister Dsaved_result = V8;
 923 #else
 924   const Register Rsaved_result_lo = Rtmp_save0;  // R4
 925   const Register Rsaved_result_hi = Rtmp_save1;  // R5
 926   FloatRegister saved_result_fp;
 927 #endif // AARCH64
 928 
 929 
 930 #ifdef AARCH64
 931   __ ldr(RconstMethod, Address(Rmethod, Method::const_offset()));
 932   __ ldrh(Rsize_of_params,  Address(RconstMethod, ConstMethod::size_of_parameters_offset()));
 933 #else
 934   __ ldr(Rsize_of_params, Address(Rmethod, Method::const_offset()));
 935   __ ldrh(Rsize_of_params,  Address(Rsize_of_params, ConstMethod::size_of_parameters_offset()));
 936 #endif // AARCH64
 937 
 938   // native calls don't need the stack size check since they have no expression stack
 939   // and the arguments are already on the stack and we only add a handful of words
 940   // to the stack
 941 
 942   // compute beginning of parameters (Rlocals)
 943   __ sub(Rlocals, Rparams, wordSize);
 944   __ add(Rlocals, Rlocals, AsmOperand(Rsize_of_params, lsl, Interpreter::logStackElementSize));
 945 
 946 #ifdef AARCH64
 947   int extra_stack_reserve = 2*wordSize; // extra space for oop_temp
 948   if(__ can_post_interpreter_events()) {
 949     // extra space for saved results
 950     extra_stack_reserve += 2*wordSize;
 951   }
 952   // reserve extra stack space and nullify oop_temp slot
 953   __ stp(ZR, ZR, Address(SP, -extra_stack_reserve, pre_indexed));
 954 #else
 955   // reserve stack space for oop_temp
 956   __ mov(R0, 0);
 957   __ push(R0);
 958 #endif // AARCH64
 959 
 960   generate_fixed_frame(true); // Note: R9 is now saved in the frame
 961 
 962   // make sure method is native & not abstract
 963 #ifdef ASSERT
 964   __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
 965   {
 966     Label L;
 967     __ tbnz(Rtemp, JVM_ACC_NATIVE_BIT, L);
 968     __ stop("tried to execute non-native method as native");
 969     __ bind(L);
 970   }
 971   { Label L;
 972     __ tbz(Rtemp, JVM_ACC_ABSTRACT_BIT, L);
 973     __ stop("tried to execute abstract method in interpreter");
 974     __ bind(L);
 975   }
 976 #endif
 977 
 978   // increment invocation count & check for overflow
 979   Label invocation_counter_overflow;
 980   if (inc_counter) {
 981     if (synchronized) {
 982       // Avoid unlocking method's monitor in case of exception, as it has not
 983       // been locked yet.
 984       __ set_do_not_unlock_if_synchronized(true, Rtemp);
 985     }
 986     generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
 987   }
 988 
 989   Label continue_after_compile;
 990   __ bind(continue_after_compile);
 991 
 992   if (inc_counter && synchronized) {
 993     __ set_do_not_unlock_if_synchronized(false, Rtemp);
 994   }
 995 
 996   // check for synchronized methods
 997   // Must happen AFTER invocation_counter check and stack overflow check,
 998   // so method is not locked if overflows.
 999   //
1000   if (synchronized) {
1001     lock_method();
1002   } else {
1003     // no synchronization necessary
1004 #ifdef ASSERT
1005       { Label L;
1006         __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
1007         __ tbz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L);
1008         __ stop("method needs synchronization");
1009         __ bind(L);
1010       }
1011 #endif
1012   }
1013 
1014   // start execution
1015 #ifdef ASSERT
1016   { Label L;
1017     __ ldr(Rtemp, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
1018     __ cmp(Rtemp, Rstack_top);
1019     __ b(L, eq);
1020     __ stop("broken stack frame setup in interpreter");
1021     __ bind(L);
1022   }
1023 #endif
1024   __ check_extended_sp(Rtemp);
1025 
1026   // jvmti/dtrace support
1027   __ notify_method_entry();
1028 #if R9_IS_SCRATCHED
1029   __ restore_method();
1030 #endif
1031 
1032   {
1033     Label L;
1034     __ ldr(Rsig_handler, Address(Rmethod, Method::signature_handler_offset()));
1035     __ cbnz(Rsig_handler, L);
1036     __ mov(R1, Rmethod);
1037     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R1, true);
1038     __ ldr(Rsig_handler, Address(Rmethod, Method::signature_handler_offset()));
1039     __ bind(L);
1040   }
1041 
1042   {
1043     Label L;
1044     __ ldr(Rnative_code, Address(Rmethod, Method::native_function_offset()));
1045     __ cbnz(Rnative_code, L);
1046     __ mov(R1, Rmethod);
1047     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R1);
1048     __ ldr(Rnative_code, Address(Rmethod, Method::native_function_offset()));
1049     __ bind(L);
1050   }
1051 
1052   // Allocate stack space for arguments
1053 
1054 #ifdef AARCH64
1055   __ sub(Rtemp, SP, Rsize_of_params, ex_uxtw, LogBytesPerWord);
1056   __ align_reg(SP, Rtemp, StackAlignmentInBytes);
1057 
1058   // Allocate more stack space to accomodate all arguments passed on GP and FP registers:
1059   // 8 * wordSize for GPRs
1060   // 8 * wordSize for FPRs
1061   int reg_arguments = align_up(8*wordSize + 8*wordSize, StackAlignmentInBytes);
1062 #else
1063 
1064   // C functions need aligned stack
1065   __ bic(SP, SP, StackAlignmentInBytes - 1);
1066   // Multiply by BytesPerLong instead of BytesPerWord, because calling convention
1067   // may require empty slots due to long alignment, e.g. func(int, jlong, int, jlong)
1068   __ sub(SP, SP, AsmOperand(Rsize_of_params, lsl, LogBytesPerLong));
1069 
1070 #ifdef __ABI_HARD__
1071   // Allocate more stack space to accomodate all GP as well as FP registers:
1072   // 4 * wordSize
1073   // 8 * BytesPerLong
1074   int reg_arguments = align_up((4*wordSize) + (8*BytesPerLong), StackAlignmentInBytes);
1075 #else
1076   // Reserve at least 4 words on the stack for loading
1077   // of parameters passed on registers (R0-R3).
1078   // See generate_slow_signature_handler().
1079   // It is also used for JNIEnv & class additional parameters.
1080   int reg_arguments = 4 * wordSize;
1081 #endif // __ABI_HARD__
1082 #endif // AARCH64
1083 
1084   __ sub(SP, SP, reg_arguments);
1085 
1086 
1087   // Note: signature handler blows R4 (32-bit ARM) or R21 (AArch64) besides all scratch registers.
1088   // See AbstractInterpreterGenerator::generate_slow_signature_handler().
1089   __ call(Rsig_handler);
1090 #if R9_IS_SCRATCHED
1091   __ restore_method();
1092 #endif
1093   __ mov(Rresult_handler, R0);
1094 
1095   // Pass JNIEnv and mirror for static methods
1096   {
1097     Label L;
1098     __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
1099     __ add(R0, Rthread, in_bytes(JavaThread::jni_environment_offset()));
1100     __ tbz(Rtemp, JVM_ACC_STATIC_BIT, L);
1101     __ load_mirror(Rtemp, Rmethod, Rtemp);
1102     __ add(R1, FP, frame::interpreter_frame_oop_temp_offset * wordSize);
1103     __ str(Rtemp, Address(R1, 0));
1104     __ bind(L);
1105   }
1106 
1107   __ set_last_Java_frame(SP, FP, true, Rtemp);
1108 
1109   // Changing state to _thread_in_native must be the last thing to do
1110   // before the jump to native code. At this moment stack must be
1111   // safepoint-safe and completely prepared for stack walking.
1112 #ifdef ASSERT
1113   {
1114     Label L;
1115     __ ldr_u32(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
1116     __ cmp_32(Rtemp, _thread_in_Java);
1117     __ b(L, eq);
1118     __ stop("invalid thread state");
1119     __ bind(L);
1120   }
1121 #endif
1122 
1123 #ifdef AARCH64
1124   __ mov(Rtemp, _thread_in_native);
1125   __ add(Rtemp2, Rthread, in_bytes(JavaThread::thread_state_offset()));
1126   // STLR is used to force all preceding writes to be observed prior to thread state change
1127   __ stlr_w(Rtemp, Rtemp2);
1128 #else
1129   // Force all preceding writes to be observed prior to thread state change
1130   __ membar(MacroAssembler::StoreStore, Rtemp);
1131 
1132   __ mov(Rtemp, _thread_in_native);
1133   __ str(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
1134 #endif // AARCH64
1135 
1136   __ call(Rnative_code);
1137 #if R9_IS_SCRATCHED
1138   __ restore_method();
1139 #endif
1140 
1141   // Set FPSCR/FPCR to a known state
1142   if (AlwaysRestoreFPU) {
1143     __ restore_default_fp_mode();
1144   }
1145 
1146   // Do safepoint check
1147   __ mov(Rtemp, _thread_in_native_trans);
1148   __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
1149 
1150     // Force this write out before the read below
1151   __ membar(MacroAssembler::StoreLoad, Rtemp);
1152 
1153   __ ldr_global_s32(Rtemp, SafepointSynchronize::address_of_state());
1154 
1155   // Protect the return value in the interleaved code: save it to callee-save registers.
1156 #ifdef AARCH64
1157   __ mov(Rsaved_result, R0);
1158   __ fmov_d(Dsaved_result, D0);
1159 #else
1160   __ mov(Rsaved_result_lo, R0);
1161   __ mov(Rsaved_result_hi, R1);
1162 #ifdef __ABI_HARD__
1163   // preserve native FP result in a callee-saved register
1164   saved_result_fp = D8;
1165   __ fcpyd(saved_result_fp, D0);
1166 #else
1167   saved_result_fp = fnoreg;
1168 #endif // __ABI_HARD__
1169 #endif // AARCH64
1170 
1171   {
1172     __ ldr_u32(R3, Address(Rthread, JavaThread::suspend_flags_offset()));
1173     __ cmp(Rtemp, SafepointSynchronize::_not_synchronized);
1174     __ cond_cmp(R3, 0, eq);
1175 
1176 #ifdef AARCH64
1177     Label L;
1178     __ b(L, eq);
1179     __ mov(R0, Rthread);
1180     __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::none);
1181     __ bind(L);
1182 #else
1183   __ mov(R0, Rthread, ne);
1184   __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::none, ne);
1185 #if R9_IS_SCRATCHED
1186   __ restore_method();
1187 #endif
1188 #endif // AARCH64
1189   }
1190 
1191   // Perform Native->Java thread transition
1192   __ mov(Rtemp, _thread_in_Java);
1193   __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
1194 
1195   // Zero handles and last_java_sp
1196   __ reset_last_Java_frame(Rtemp);
1197   __ ldr(R3, Address(Rthread, JavaThread::active_handles_offset()));
1198   __ str_32(__ zero_register(Rtemp), Address(R3, JNIHandleBlock::top_offset_in_bytes()));
1199   if (CheckJNICalls) {
1200     __ str(__ zero_register(Rtemp), Address(Rthread, JavaThread::pending_jni_exception_check_fn_offset()));
1201   }
1202 
1203   // Unbox oop result, e.g. JNIHandles::resolve result if it's an oop.
1204   {
1205     Label Lnot_oop;
1206     __ mov_slow(Rtemp, AbstractInterpreter::result_handler(T_OBJECT));
1207     __ cmp(Rresult_handler, Rtemp);
1208     __ b(Lnot_oop, ne);
1209     Register value = AARCH64_ONLY(Rsaved_result) NOT_AARCH64(Rsaved_result_lo);
1210     __ resolve_jobject(value,   // value
1211                        Rtemp,   // tmp1
1212                        R1_tmp); // tmp2
1213     // Store resolved result in frame for GC visibility.
1214     __ str(value, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize));
1215     __ bind(Lnot_oop);
1216   }
1217 
1218 #ifdef AARCH64
1219   // Restore SP (drop native parameters area), to keep SP in sync with extended_sp in frame
1220   __ restore_sp_after_call(Rtemp);
1221   __ check_stack_top();
1222 #endif // AARCH64
1223 
1224   // reguard stack if StackOverflow exception happened while in native.
1225   {
1226     __ ldr_u32(Rtemp, Address(Rthread, JavaThread::stack_guard_state_offset()));
1227     __ cmp_32(Rtemp, JavaThread::stack_guard_yellow_reserved_disabled);
1228 #ifdef AARCH64
1229     Label L;
1230     __ b(L, ne);
1231     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none);
1232     __ bind(L);
1233 #else
1234   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none, eq);
1235 #if R9_IS_SCRATCHED
1236   __ restore_method();
1237 #endif
1238 #endif // AARCH64
1239   }
1240 
1241   // check pending exceptions
1242   {
1243     __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset()));
1244 #ifdef AARCH64
1245     Label L;
1246     __ cbz(Rtemp, L);
1247     __ mov_pc_to(Rexception_pc);
1248     __ b(StubRoutines::forward_exception_entry());
1249     __ bind(L);
1250 #else
1251     __ cmp(Rtemp, 0);
1252     __ mov(Rexception_pc, PC, ne);
1253     __ b(StubRoutines::forward_exception_entry(), ne);
1254 #endif // AARCH64
1255   }
1256 
1257   if (synchronized) {
1258     // address of first monitor
1259     __ sub(R1, FP, - (frame::interpreter_frame_monitor_block_bottom_offset - frame::interpreter_frame_monitor_size()) * wordSize);
1260     __ unlock_object(R1);
1261   }
1262 
1263   // jvmti/dtrace support
1264   // Note: This must happen _after_ handling/throwing any exceptions since
1265   //       the exception handler code notifies the runtime of method exits
1266   //       too. If this happens before, method entry/exit notifications are
1267   //       not properly paired (was bug - gri 11/22/99).
1268 #ifdef AARCH64
1269   __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI, true, Rsaved_result, noreg, Dsaved_result);
1270 #else
1271   __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI, true, Rsaved_result_lo, Rsaved_result_hi, saved_result_fp);
1272 #endif // AARCH64
1273 
1274   // Restore the result. Oop result is restored from the stack.
1275 #ifdef AARCH64
1276   __ mov(R0, Rsaved_result);
1277   __ fmov_d(D0, Dsaved_result);
1278 
1279   __ blr(Rresult_handler);
1280 #else
1281   __ mov(R0, Rsaved_result_lo);
1282   __ mov(R1, Rsaved_result_hi);
1283 
1284 #ifdef __ABI_HARD__
1285   // reload native FP result
1286   __ fcpyd(D0, D8);
1287 #endif // __ABI_HARD__
1288   __ blx(Rresult_handler);
1289 #endif // AARCH64
1290 
1291   // Restore FP/LR, sender_sp and return
1292 #ifdef AARCH64
1293   __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
1294   __ ldp(FP, LR, Address(FP));
1295   __ mov(SP, Rtemp);
1296 #else
1297   __ mov(Rtemp, FP);
1298   __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
1299   __ ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize));
1300 #endif // AARCH64
1301 
1302   __ ret();
1303 
1304   if (inc_counter) {
1305     // Handle overflow of counter and compile method
1306     __ bind(invocation_counter_overflow);
1307     generate_counter_overflow(continue_after_compile);
1308   }
1309 
1310   return entry_point;
1311 }
1312 
1313 //
1314 // Generic interpreted method entry to (asm) interpreter
1315 //
1316 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1317   // determine code generation flags
1318   bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1319 
1320   // Rmethod: Method*
1321   // Rthread: thread
1322   // Rsender_sp: sender sp (could differ from SP if we were called via c2i)
1323   // Rparams: pointer to the last parameter in the stack
1324 
1325   address entry_point = __ pc();
1326 
1327   const Register RconstMethod = AARCH64_ONLY(R10) NOT_AARCH64(R3);
1328 
1329 #ifdef AARCH64
1330   const Register RmaxStack = R11;
1331   const Register RlocalsBase = R12;
1332 #endif // AARCH64
1333 
1334   __ ldr(RconstMethod, Address(Rmethod, Method::const_offset()));
1335 
1336   __ ldrh(R2, Address(RconstMethod, ConstMethod::size_of_parameters_offset()));
1337   __ ldrh(R3, Address(RconstMethod, ConstMethod::size_of_locals_offset()));
1338 
1339   // setup Rlocals
1340   __ sub(Rlocals, Rparams, wordSize);
1341   __ add(Rlocals, Rlocals, AsmOperand(R2, lsl, Interpreter::logStackElementSize));
1342 
1343   __ sub(R3, R3, R2); // number of additional locals
1344 
1345 #ifdef AARCH64
1346   // setup RmaxStack
1347   __ ldrh(RmaxStack, Address(RconstMethod, ConstMethod::max_stack_offset()));
1348   // We have to add extra reserved slots to max_stack. There are 3 users of the extra slots,
1349   // none of which are at the same time, so we just need to make sure there is enough room
1350   // for the biggest user:
1351   //   -reserved slot for exception handler
1352   //   -reserved slots for JSR292. Method::extra_stack_entries() is the size.
1353   //   -3 reserved slots so get_method_counters() can save some registers before call_VM().
1354   __ add(RmaxStack, RmaxStack, MAX2(3, Method::extra_stack_entries()));
1355 #endif // AARCH64
1356 
1357   // see if we've got enough room on the stack for locals plus overhead.
1358   generate_stack_overflow_check();
1359 
1360 #ifdef AARCH64
1361 
1362   // allocate space for locals
1363   {
1364     __ sub(RlocalsBase, Rparams, AsmOperand(R3, lsl, Interpreter::logStackElementSize));
1365     __ align_reg(SP, RlocalsBase, StackAlignmentInBytes);
1366   }
1367 
1368   // explicitly initialize locals
1369   {
1370     Label zero_loop, done;
1371     __ cbz(R3, done);
1372 
1373     __ tbz(R3, 0, zero_loop);
1374     __ subs(R3, R3, 1);
1375     __ str(ZR, Address(RlocalsBase, wordSize, post_indexed));
1376     __ b(done, eq);
1377 
1378     __ bind(zero_loop);
1379     __ subs(R3, R3, 2);
1380     __ stp(ZR, ZR, Address(RlocalsBase, 2*wordSize, post_indexed));
1381     __ b(zero_loop, ne);
1382 
1383     __ bind(done);
1384   }
1385 
1386 #else
1387   // allocate space for locals
1388   // explicitly initialize locals
1389 
1390   // Loop is unrolled 4 times
1391   Label loop;
1392   __ mov(R0, 0);
1393   __ bind(loop);
1394 
1395   // #1
1396   __ subs(R3, R3, 1);
1397   __ push(R0, ge);
1398 
1399   // #2
1400   __ subs(R3, R3, 1, ge);
1401   __ push(R0, ge);
1402 
1403   // #3
1404   __ subs(R3, R3, 1, ge);
1405   __ push(R0, ge);
1406 
1407   // #4
1408   __ subs(R3, R3, 1, ge);
1409   __ push(R0, ge);
1410 
1411   __ b(loop, gt);
1412 #endif // AARCH64
1413 
1414   // initialize fixed part of activation frame
1415   generate_fixed_frame(false);
1416 
1417   __ restore_dispatch();
1418 
1419   // make sure method is not native & not abstract
1420 #ifdef ASSERT
1421   __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
1422   {
1423     Label L;
1424     __ tbz(Rtemp, JVM_ACC_NATIVE_BIT, L);
1425     __ stop("tried to execute native method as non-native");
1426     __ bind(L);
1427   }
1428   { Label L;
1429     __ tbz(Rtemp, JVM_ACC_ABSTRACT_BIT, L);
1430     __ stop("tried to execute abstract method in interpreter");
1431     __ bind(L);
1432   }
1433 #endif
1434 
1435   // increment invocation count & check for overflow
1436   Label invocation_counter_overflow;
1437   Label profile_method;
1438   Label profile_method_continue;
1439   if (inc_counter) {
1440     if (synchronized) {
1441       // Avoid unlocking method's monitor in case of exception, as it has not
1442       // been locked yet.
1443       __ set_do_not_unlock_if_synchronized(true, Rtemp);
1444     }
1445     generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
1446     if (ProfileInterpreter) {
1447       __ bind(profile_method_continue);
1448     }
1449   }
1450   Label continue_after_compile;
1451   __ bind(continue_after_compile);
1452 
1453   if (inc_counter && synchronized) {
1454     __ set_do_not_unlock_if_synchronized(false, Rtemp);
1455   }
1456 #if R9_IS_SCRATCHED
1457   __ restore_method();
1458 #endif
1459 
1460   // check for synchronized methods
1461   // Must happen AFTER invocation_counter check and stack overflow check,
1462   // so method is not locked if overflows.
1463   //
1464   if (synchronized) {
1465     // Allocate monitor and lock method
1466     lock_method();
1467   } else {
1468     // no synchronization necessary
1469 #ifdef ASSERT
1470       { Label L;
1471         __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
1472         __ tbz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L);
1473         __ stop("method needs synchronization");
1474         __ bind(L);
1475       }
1476 #endif
1477   }
1478 
1479   // start execution
1480 #ifdef ASSERT
1481   { Label L;
1482     __ ldr(Rtemp, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
1483     __ cmp(Rtemp, Rstack_top);
1484     __ b(L, eq);
1485     __ stop("broken stack frame setup in interpreter");
1486     __ bind(L);
1487   }
1488 #endif
1489   __ check_extended_sp(Rtemp);
1490 
1491   // jvmti support
1492   __ notify_method_entry();
1493 #if R9_IS_SCRATCHED
1494   __ restore_method();
1495 #endif
1496 
1497   __ dispatch_next(vtos);
1498 
1499   // invocation counter overflow
1500   if (inc_counter) {
1501     if (ProfileInterpreter) {
1502       // We have decided to profile this method in the interpreter
1503       __ bind(profile_method);
1504 
1505       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1506       __ set_method_data_pointer_for_bcp();
1507 
1508       __ b(profile_method_continue);
1509     }
1510 
1511     // Handle overflow of counter and compile method
1512     __ bind(invocation_counter_overflow);
1513     generate_counter_overflow(continue_after_compile);
1514   }
1515 
1516   return entry_point;
1517 }
1518 
1519 //------------------------------------------------------------------------------------------------------------------------
1520 // Exceptions
1521 
1522 void TemplateInterpreterGenerator::generate_throw_exception() {
1523   // Entry point in previous activation (i.e., if the caller was interpreted)
1524   Interpreter::_rethrow_exception_entry = __ pc();
1525   // Rexception_obj: exception
1526 
1527 #ifndef AARCH64
1528   // Clear interpreter_frame_last_sp.
1529   __ mov(Rtemp, 0);
1530   __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
1531 #endif // !AARCH64
1532 
1533 #if R9_IS_SCRATCHED
1534   __ restore_method();
1535 #endif
1536   __ restore_bcp();
1537   __ restore_dispatch();
1538   __ restore_locals();
1539 
1540 #ifdef AARCH64
1541   __ restore_sp_after_call(Rtemp);
1542 #endif // AARCH64
1543 
1544   // Entry point for exceptions thrown within interpreter code
1545   Interpreter::_throw_exception_entry = __ pc();
1546 
1547   // expression stack is undefined here
1548   // Rexception_obj: exception
1549   // Rbcp: exception bcp
1550   __ verify_oop(Rexception_obj);
1551 
1552   // expression stack must be empty before entering the VM in case of an exception
1553   __ empty_expression_stack();
1554   // find exception handler address and preserve exception oop
1555   __ mov(R1, Rexception_obj);
1556   __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), R1);
1557   // R0: exception handler entry point
1558   // Rexception_obj: preserved exception oop
1559   // Rbcp: bcp for exception handler
1560   __ push_ptr(Rexception_obj);                    // push exception which is now the only value on the stack
1561   __ jump(R0);                                    // jump to exception handler (may be _remove_activation_entry!)
1562 
1563   // If the exception is not handled in the current frame the frame is removed and
1564   // the exception is rethrown (i.e. exception continuation is _rethrow_exception).
1565   //
1566   // Note: At this point the bci is still the bxi for the instruction which caused
1567   //       the exception and the expression stack is empty. Thus, for any VM calls
1568   //       at this point, GC will find a legal oop map (with empty expression stack).
1569 
1570   // In current activation
1571   // tos: exception
1572   // Rbcp: exception bcp
1573 
1574   //
1575   // JVMTI PopFrame support
1576   //
1577    Interpreter::_remove_activation_preserving_args_entry = __ pc();
1578 
1579 #ifdef AARCH64
1580   __ restore_sp_after_call(Rtemp); // restore SP to extended SP
1581 #endif // AARCH64
1582 
1583   __ empty_expression_stack();
1584 
1585   // Set the popframe_processing bit in _popframe_condition indicating that we are
1586   // currently handling popframe, so that call_VMs that may happen later do not trigger new
1587   // popframe handling cycles.
1588 
1589   __ ldr_s32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset()));
1590   __ orr(Rtemp, Rtemp, (unsigned)JavaThread::popframe_processing_bit);
1591   __ str_32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset()));
1592 
1593   {
1594     // Check to see whether we are returning to a deoptimized frame.
1595     // (The PopFrame call ensures that the caller of the popped frame is
1596     // either interpreted or compiled and deoptimizes it if compiled.)
1597     // In this case, we can't call dispatch_next() after the frame is
1598     // popped, but instead must save the incoming arguments and restore
1599     // them after deoptimization has occurred.
1600     //
1601     // Note that we don't compare the return PC against the
1602     // deoptimization blob's unpack entry because of the presence of
1603     // adapter frames in C2.
1604     Label caller_not_deoptimized;
1605     __ ldr(R0, Address(FP, frame::return_addr_offset * wordSize));
1606     __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), R0);
1607     __ cbnz_32(R0, caller_not_deoptimized);
1608 #ifdef AARCH64
1609     __ NOT_TESTED();
1610 #endif
1611 
1612     // Compute size of arguments for saving when returning to deoptimized caller
1613     __ restore_method();
1614     __ ldr(R0, Address(Rmethod, Method::const_offset()));
1615     __ ldrh(R0, Address(R0, ConstMethod::size_of_parameters_offset()));
1616 
1617     __ logical_shift_left(R1, R0, Interpreter::logStackElementSize);
1618     // Save these arguments
1619     __ restore_locals();
1620     __ sub(R2, Rlocals, R1);
1621     __ add(R2, R2, wordSize);
1622     __ mov(R0, Rthread);
1623     __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R0, R1, R2);
1624 
1625     __ remove_activation(vtos, LR,
1626                          /* throw_monitor_exception */ false,
1627                          /* install_monitor_exception */ false,
1628                          /* notify_jvmdi */ false);
1629 
1630     // Inform deoptimization that it is responsible for restoring these arguments
1631     __ mov(Rtemp, JavaThread::popframe_force_deopt_reexecution_bit);
1632     __ str_32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset()));
1633 
1634     // Continue in deoptimization handler
1635     __ ret();
1636 
1637     __ bind(caller_not_deoptimized);
1638   }
1639 
1640   __ remove_activation(vtos, R4,
1641                        /* throw_monitor_exception */ false,
1642                        /* install_monitor_exception */ false,
1643                        /* notify_jvmdi */ false);
1644 
1645 #ifndef AARCH64
1646   // Finish with popframe handling
1647   // A previous I2C followed by a deoptimization might have moved the
1648   // outgoing arguments further up the stack. PopFrame expects the
1649   // mutations to those outgoing arguments to be preserved and other
1650   // constraints basically require this frame to look exactly as
1651   // though it had previously invoked an interpreted activation with
1652   // no space between the top of the expression stack (current
1653   // last_sp) and the top of stack. Rather than force deopt to
1654   // maintain this kind of invariant all the time we call a small
1655   // fixup routine to move the mutated arguments onto the top of our
1656   // expression stack if necessary.
1657   __ mov(R1, SP);
1658   __ ldr(R2, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
1659   // PC must point into interpreter here
1660   __ set_last_Java_frame(SP, FP, true, Rtemp);
1661   __ mov(R0, Rthread);
1662   __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), R0, R1, R2);
1663   __ reset_last_Java_frame(Rtemp);
1664 #endif // !AARCH64
1665 
1666 #ifdef AARCH64
1667   __ restore_sp_after_call(Rtemp);
1668   __ restore_stack_top();
1669 #else
1670   // Restore the last_sp and null it out
1671   __ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
1672   __ mov(Rtemp, (int)NULL_WORD);
1673   __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
1674 #endif // AARCH64
1675 
1676   __ restore_bcp();
1677   __ restore_dispatch();
1678   __ restore_locals();
1679   __ restore_method();
1680 
1681   // The method data pointer was incremented already during
1682   // call profiling. We have to restore the mdp for the current bcp.
1683   if (ProfileInterpreter) {
1684     __ set_method_data_pointer_for_bcp();
1685   }
1686 
1687   // Clear the popframe condition flag
1688   assert(JavaThread::popframe_inactive == 0, "adjust this code");
1689   __ str_32(__ zero_register(Rtemp), Address(Rthread, JavaThread::popframe_condition_offset()));
1690 
1691 #if INCLUDE_JVMTI
1692   {
1693     Label L_done;
1694 
1695     __ ldrb(Rtemp, Address(Rbcp, 0));
1696     __ cmp(Rtemp, Bytecodes::_invokestatic);
1697     __ b(L_done, ne);
1698 
1699     // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1700     // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
1701 
1702     // get local0
1703     __ ldr(R1, Address(Rlocals, 0));
1704     __ mov(R2, Rmethod);
1705     __ mov(R3, Rbcp);
1706     __ call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R1, R2, R3);
1707 
1708     __ cbz(R0, L_done);
1709 
1710     __ str(R0, Address(Rstack_top));
1711     __ bind(L_done);
1712   }
1713 #endif // INCLUDE_JVMTI
1714 
1715   __ dispatch_next(vtos);
1716   // end of PopFrame support
1717 
1718   Interpreter::_remove_activation_entry = __ pc();
1719 
1720   // preserve exception over this code sequence
1721   __ pop_ptr(R0_tos);
1722   __ str(R0_tos, Address(Rthread, JavaThread::vm_result_offset()));
1723   // remove the activation (without doing throws on illegalMonitorExceptions)
1724   __ remove_activation(vtos, Rexception_pc, false, true, false);
1725   // restore exception
1726   __ get_vm_result(Rexception_obj, Rtemp);
1727 
1728   // Inbetween activations - previous activation type unknown yet
1729   // compute continuation point - the continuation point expects
1730   // the following registers set up:
1731   //
1732   // Rexception_obj: exception
1733   // Rexception_pc: return address/pc that threw exception
1734   // SP: expression stack of caller
1735   // FP: frame pointer of caller
1736   __ mov(c_rarg0, Rthread);
1737   __ mov(c_rarg1, Rexception_pc);
1738   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), c_rarg0, c_rarg1);
1739   // Note that an "issuing PC" is actually the next PC after the call
1740 
1741   __ jump(R0);                             // jump to exception handler of caller
1742 }
1743 
1744 
1745 //
1746 // JVMTI ForceEarlyReturn support
1747 //
1748 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1749   address entry = __ pc();
1750 
1751 #ifdef AARCH64
1752   __ restore_sp_after_call(Rtemp); // restore SP to extended SP
1753 #endif // AARCH64
1754 
1755   __ restore_bcp();
1756   __ restore_dispatch();
1757   __ restore_locals();
1758 
1759   __ empty_expression_stack();
1760 
1761   __ load_earlyret_value(state);
1762 
1763   // Clear the earlyret state
1764   __ ldr(Rtemp, Address(Rthread, JavaThread::jvmti_thread_state_offset()));
1765 
1766   assert(JvmtiThreadState::earlyret_inactive == 0, "adjust this code");
1767   __ str_32(__ zero_register(R2), Address(Rtemp, JvmtiThreadState::earlyret_state_offset()));
1768 
1769   __ remove_activation(state, LR,
1770                        false, /* throw_monitor_exception */
1771                        false, /* install_monitor_exception */
1772                        true); /* notify_jvmdi */
1773 
1774 #ifndef AARCH64
1775   // According to interpreter calling conventions, result is returned in R0/R1,
1776   // so ftos (S0) and dtos (D0) are moved to R0/R1.
1777   // This conversion should be done after remove_activation, as it uses
1778   // push(state) & pop(state) to preserve return value.
1779   __ convert_tos_to_retval(state);
1780 #endif // !AARCH64
1781   __ ret();
1782 
1783   return entry;
1784 } // end of ForceEarlyReturn support
1785 
1786 
1787 //------------------------------------------------------------------------------------------------------------------------
1788 // Helper for vtos entry point generation
1789 
1790 void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
1791   assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1792   Label L;
1793 
1794 #ifdef __SOFTFP__
1795   dep = __ pc();                // fall through
1796 #else
1797   fep = __ pc(); __ push(ftos); __ b(L);
1798   dep = __ pc(); __ push(dtos); __ b(L);
1799 #endif // __SOFTFP__
1800 
1801   lep = __ pc(); __ push(ltos); __ b(L);
1802 
1803   if (AARCH64_ONLY(true) NOT_AARCH64(VerifyOops)) {  // can't share atos entry with itos on AArch64 or if VerifyOops
1804     aep = __ pc(); __ push(atos); __ b(L);
1805   } else {
1806     aep = __ pc();              // fall through
1807   }
1808 
1809 #ifdef __SOFTFP__
1810   fep = __ pc();                // fall through
1811 #endif // __SOFTFP__
1812 
1813   bep = cep = sep =             // fall through
1814   iep = __ pc(); __ push(itos); // fall through
1815   vep = __ pc(); __ bind(L);    // fall through
1816   generate_and_dispatch(t);
1817 }
1818 
1819 //------------------------------------------------------------------------------------------------------------------------
1820 
1821 // Non-product code
1822 #ifndef PRODUCT
1823 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1824   address entry = __ pc();
1825 
1826   // prepare expression stack
1827   __ push(state);       // save tosca
1828 
1829   // pass tosca registers as arguments
1830   __ mov(R2, R0_tos);
1831 #ifdef AARCH64
1832   __ mov(R3, ZR);
1833 #else
1834   __ mov(R3, R1_tos_hi);
1835 #endif // AARCH64
1836   __ mov(R1, LR);       // save return address
1837 
1838   // call tracer
1839   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), R1, R2, R3);
1840 
1841   __ mov(LR, R0);       // restore return address
1842   __ pop(state);        // restore tosca
1843 
1844   // return
1845   __ ret();
1846 
1847   return entry;
1848 }
1849 
1850 
1851 void TemplateInterpreterGenerator::count_bytecode() {
1852   __ inc_global_counter((address) &BytecodeCounter::_counter_value, 0, Rtemp, R2_tmp, true);
1853 }
1854 
1855 
1856 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1857   __ inc_global_counter((address)&BytecodeHistogram::_counters[0], sizeof(BytecodeHistogram::_counters[0]) * t->bytecode(), Rtemp, R2_tmp, true);
1858 }
1859 
1860 
1861 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1862   const Register Rindex_addr = R2_tmp;
1863   Label Lcontinue;
1864   InlinedAddress Lcounters((address)BytecodePairHistogram::_counters);
1865   InlinedAddress Lindex((address)&BytecodePairHistogram::_index);
1866   const Register Rcounters_addr = R2_tmp;
1867   const Register Rindex = R4_tmp;
1868 
1869   // calculate new index for counter:
1870   // index = (_index >> log2_number_of_codes) | (bytecode << log2_number_of_codes).
1871   // (_index >> log2_number_of_codes) is previous bytecode
1872 
1873   __ ldr_literal(Rindex_addr, Lindex);
1874   __ ldr_s32(Rindex, Address(Rindex_addr));
1875   __ mov_slow(Rtemp, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1876   __ orr(Rindex, Rtemp, AsmOperand(Rindex, lsr, BytecodePairHistogram::log2_number_of_codes));
1877   __ str_32(Rindex, Address(Rindex_addr));
1878 
1879   // Rindex (R4) contains index of counter
1880 
1881   __ ldr_literal(Rcounters_addr, Lcounters);
1882   __ ldr_s32(Rtemp, Address::indexed_32(Rcounters_addr, Rindex));
1883   __ adds_32(Rtemp, Rtemp, 1);
1884   __ b(Lcontinue, mi);                           // avoid overflow
1885   __ str_32(Rtemp, Address::indexed_32(Rcounters_addr, Rindex));
1886 
1887   __ b(Lcontinue);
1888 
1889   __ bind_literal(Lindex);
1890   __ bind_literal(Lcounters);
1891 
1892   __ bind(Lcontinue);
1893 }
1894 
1895 
1896 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1897   // Call a little run-time stub to avoid blow-up for each bytecode.
1898   // The run-time runtime saves the right registers, depending on
1899   // the tosca in-state for the given template.
1900   assert(Interpreter::trace_code(t->tos_in()) != NULL,
1901          "entry must have been generated");
1902   address trace_entry = Interpreter::trace_code(t->tos_in());
1903   __ call(trace_entry, relocInfo::none);
1904 }
1905 
1906 
1907 void TemplateInterpreterGenerator::stop_interpreter_at() {
1908   Label Lcontinue;
1909   const Register stop_at = R2_tmp;
1910 
1911   __ ldr_global_s32(Rtemp, (address) &BytecodeCounter::_counter_value);
1912   __ mov_slow(stop_at, StopInterpreterAt);
1913 
1914   // test bytecode counter
1915   __ cmp(Rtemp, stop_at);
1916   __ b(Lcontinue, ne);
1917 
1918   __ trace_state("stop_interpreter_at");
1919   __ breakpoint();
1920 
1921   __ bind(Lcontinue);
1922 }
1923 #endif // !PRODUCT