1 /*
   2  * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.hpp"
  27 #include "interpreter/bytecodeHistogram.hpp"
  28 #include "interpreter/interp_masm.hpp"
  29 #include "interpreter/interpreter.hpp"
  30 #include "interpreter/interpreterRuntime.hpp"
  31 #include "interpreter/templateInterpreterGenerator.hpp"
  32 #include "interpreter/templateTable.hpp"
  33 #include "oops/arrayOop.hpp"
  34 #include "oops/methodData.hpp"
  35 #include "oops/method.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "prims/jvmtiExport.hpp"
  38 #include "prims/jvmtiThreadState.hpp"
  39 #include "prims/methodHandles.hpp"
  40 #include "runtime/arguments.hpp"
  41 #include "runtime/deoptimization.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/stubRoutines.hpp"
  45 #include "runtime/synchronizer.hpp"
  46 #include "runtime/timer.hpp"
  47 #include "runtime/vframeArray.hpp"
  48 #include "utilities/debug.hpp"
  49 #include "utilities/macros.hpp"
  50 
  51 // Size of interpreter code.  Increase if too small.  Interpreter will
  52 // fail with a guarantee ("not enough space for interpreter generation");
  53 // if too small.
  54 // Run with +PrintInterpreter to get the VM to print out the size.
  55 // Max size with JVMTI
  56 int TemplateInterpreter::InterpreterCodeSize = 180 * 1024;
  57 
  58 #define __ _masm->
  59 
  60 //------------------------------------------------------------------------------------------------------------------------
  61 
  62 address TemplateInterpreterGenerator::generate_slow_signature_handler() {
  63   address entry = __ pc();
  64 
  65   // callee-save register for saving LR, shared with generate_native_entry
  66   const Register Rsaved_ret_addr = AARCH64_ONLY(R21) NOT_AARCH64(Rtmp_save0);
  67 
  68   __ mov(Rsaved_ret_addr, LR);
  69 
  70   __ mov(R1, Rmethod);
  71   __ mov(R2, Rlocals);
  72   __ mov(R3, SP);
  73 
  74 #ifdef AARCH64
  75   // expand expr. stack and extended SP to avoid cutting SP in call_VM
  76   __ mov(Rstack_top, SP);
  77   __ str(Rstack_top, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
  78   __ check_stack_top();
  79 
  80   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), R1, R2, R3, false);
  81 
  82   __ ldp(ZR,      c_rarg1, Address(SP, 2*wordSize, post_indexed));
  83   __ ldp(c_rarg2, c_rarg3, Address(SP, 2*wordSize, post_indexed));
  84   __ ldp(c_rarg4, c_rarg5, Address(SP, 2*wordSize, post_indexed));
  85   __ ldp(c_rarg6, c_rarg7, Address(SP, 2*wordSize, post_indexed));
  86 
  87   __ ldp_d(V0, V1, Address(SP, 2*wordSize, post_indexed));
  88   __ ldp_d(V2, V3, Address(SP, 2*wordSize, post_indexed));
  89   __ ldp_d(V4, V5, Address(SP, 2*wordSize, post_indexed));
  90   __ ldp_d(V6, V7, Address(SP, 2*wordSize, post_indexed));
  91 #else
  92 
  93   // Safer to save R9 (when scratched) since callers may have been
  94   // written assuming R9 survives. This is suboptimal but
  95   // probably not important for this slow case call site.
  96   // Note for R9 saving: slow_signature_handler may copy register
  97   // arguments above the current SP (passed as R3). It is safe for
  98   // call_VM to use push and pop to protect additional values on the
  99   // stack if needed.
 100   __ call_VM(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), true /* save R9 if needed*/);
 101   __ add(SP, SP, wordSize);     // Skip R0
 102   __ pop(RegisterSet(R1, R3));  // Load arguments passed in registers
 103 #ifdef __ABI_HARD__
 104   // Few alternatives to an always-load-FP-registers approach:
 105   // - parse method signature to detect FP arguments
 106   // - keep a counter/flag on a stack indicationg number of FP arguments in the method.
 107   // The later has been originally implemented and tested but a conditional path could
 108   // eliminate any gain imposed by avoiding 8 double word loads.
 109   __ fldmiad(SP, FloatRegisterSet(D0, 8), writeback);
 110 #endif // __ABI_HARD__
 111 #endif // AARCH64
 112 
 113   __ ret(Rsaved_ret_addr);
 114 
 115   return entry;
 116 }
 117 
 118 
 119 //
 120 // Various method entries (that c++ and asm interpreter agree upon)
 121 //------------------------------------------------------------------------------------------------------------------------
 122 //
 123 //
 124 
 125 // Abstract method entry
 126 // Attempt to execute abstract method. Throw exception
 127 address TemplateInterpreterGenerator::generate_abstract_entry(void) {
 128   address entry_point = __ pc();
 129 
 130 #ifdef AARCH64
 131   __ restore_sp_after_call(Rtemp);
 132   __ restore_stack_top();
 133 #endif
 134 
 135   __ empty_expression_stack();
 136 
 137   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
 138 
 139   DEBUG_ONLY(STOP("generate_abstract_entry");) // Should not reach here
 140   return entry_point;
 141 }
 142 
 143 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
 144   if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
 145 
 146   // TODO: ARM
 147   return NULL;
 148 
 149   address entry_point = __ pc();
 150   STOP("generate_math_entry");
 151   return entry_point;
 152 }
 153 
 154 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
 155   address entry = __ pc();
 156 
 157   // Note: There should be a minimal interpreter frame set up when stack
 158   // overflow occurs since we check explicitly for it now.
 159   //
 160 #ifdef ASSERT
 161   { Label L;
 162     __ sub(Rtemp, FP, - frame::interpreter_frame_monitor_block_top_offset * wordSize);
 163     __ cmp(SP, Rtemp);  // Rtemp = maximal SP for current FP,
 164                         //  (stack grows negative)
 165     __ b(L, ls); // check if frame is complete
 166     __ stop ("interpreter frame not set up");
 167     __ bind(L);
 168   }
 169 #endif // ASSERT
 170 
 171   // Restore bcp under the assumption that the current frame is still
 172   // interpreted
 173   __ restore_bcp();
 174 
 175   // expression stack must be empty before entering the VM if an exception
 176   // happened
 177   __ empty_expression_stack();
 178 
 179   // throw exception
 180   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
 181 
 182   __ should_not_reach_here();
 183 
 184   return entry;
 185 }
 186 
 187 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
 188   address entry = __ pc();
 189 
 190   // index is in R4_ArrayIndexOutOfBounds_index
 191 
 192   InlinedString Lname(name);
 193 
 194   // expression stack must be empty before entering the VM if an exception happened
 195   __ empty_expression_stack();
 196 
 197   // setup parameters
 198   __ ldr_literal(R1, Lname);
 199   __ mov(R2, R4_ArrayIndexOutOfBounds_index);
 200 
 201   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), R1, R2);
 202 
 203   __ nop(); // to avoid filling CPU pipeline with invalid instructions
 204   __ nop();
 205   __ should_not_reach_here();
 206   __ bind_literal(Lname);
 207 
 208   return entry;
 209 }
 210 
 211 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
 212   address entry = __ pc();
 213 
 214   // object is in R2_ClassCastException_obj
 215 
 216   // expression stack must be empty before entering the VM if an exception
 217   // happened
 218   __ empty_expression_stack();
 219 
 220   __ mov(R1, R2_ClassCastException_obj);
 221   __ call_VM(noreg,
 222              CAST_FROM_FN_PTR(address,
 223                               InterpreterRuntime::throw_ClassCastException),
 224              R1);
 225 
 226   __ should_not_reach_here();
 227 
 228   return entry;
 229 }
 230 
 231 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
 232   assert(!pass_oop || message == NULL, "either oop or message but not both");
 233   address entry = __ pc();
 234 
 235   InlinedString Lname(name);
 236   InlinedString Lmessage(message);
 237 
 238   if (pass_oop) {
 239     // object is at TOS
 240     __ pop_ptr(R2);
 241   }
 242 
 243   // expression stack must be empty before entering the VM if an exception happened
 244   __ empty_expression_stack();
 245 
 246   // setup parameters
 247   __ ldr_literal(R1, Lname);
 248 
 249   if (pass_oop) {
 250     __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), R1, R2);
 251   } else {
 252     if (message != NULL) {
 253       __ ldr_literal(R2, Lmessage);
 254     } else {
 255       __ mov(R2, 0);
 256     }
 257     __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), R1, R2);
 258   }
 259 
 260   // throw exception
 261   __ b(Interpreter::throw_exception_entry());
 262 
 263   __ nop(); // to avoid filling CPU pipeline with invalid instructions
 264   __ nop();
 265   __ bind_literal(Lname);
 266   if (!pass_oop && (message != NULL)) {
 267     __ bind_literal(Lmessage);
 268   }
 269 
 270   return entry;
 271 }
 272 
 273 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
 274   address entry = __ pc();
 275 
 276   __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
 277 
 278 #ifdef AARCH64
 279   __ restore_sp_after_call(Rtemp);  // Restore SP to extended SP
 280   __ restore_stack_top();
 281 #else
 282   // Restore stack bottom in case i2c adjusted stack
 283   __ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
 284   // and NULL it as marker that SP is now tos until next java call
 285   __ mov(Rtemp, (int)NULL_WORD);
 286   __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
 287 #endif // AARCH64
 288 
 289   __ restore_method();
 290   __ restore_bcp();
 291   __ restore_dispatch();
 292   __ restore_locals();
 293 
 294   const Register Rcache = R2_tmp;
 295   const Register Rindex = R3_tmp;
 296   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size);
 297 
 298   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
 299   __ ldrb(Rtemp, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
 300   __ check_stack_top();
 301   __ add(Rstack_top, Rstack_top, AsmOperand(Rtemp, lsl, Interpreter::logStackElementSize));
 302 
 303 #ifndef AARCH64
 304   __ convert_retval_to_tos(state);
 305 #endif // !AARCH64
 306 
 307  __ check_and_handle_popframe();
 308  __ check_and_handle_earlyret();
 309 
 310   __ dispatch_next(state, step);
 311 
 312   return entry;
 313 }
 314 
 315 
 316 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
 317   address entry = __ pc();
 318 
 319   __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
 320 
 321 #ifdef AARCH64
 322   __ restore_sp_after_call(Rtemp);  // Restore SP to extended SP
 323   __ restore_stack_top();
 324 #else
 325   // The stack is not extended by deopt but we must NULL last_sp as this
 326   // entry is like a "return".
 327   __ mov(Rtemp, 0);
 328   __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
 329 #endif // AARCH64
 330 
 331   __ restore_method();
 332   __ restore_bcp();
 333   __ restore_dispatch();
 334   __ restore_locals();
 335 
 336   // handle exceptions
 337   { Label L;
 338     __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset()));
 339     __ cbz(Rtemp, L);
 340     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
 341     __ should_not_reach_here();
 342     __ bind(L);
 343   }
 344 
 345   __ dispatch_next(state, step);
 346 
 347   return entry;
 348 }
 349 
 350 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
 351 #ifdef AARCH64
 352   address entry = __ pc();
 353   switch (type) {
 354     case T_BOOLEAN:
 355       __ tst(R0, 0xff);
 356       __ cset(R0, ne);
 357       break;
 358     case T_CHAR   : __ zero_extend(R0, R0, 16);  break;
 359     case T_BYTE   : __ sign_extend(R0, R0,  8);  break;
 360     case T_SHORT  : __ sign_extend(R0, R0, 16);  break;
 361     case T_INT    : // fall through
 362     case T_LONG   : // fall through
 363     case T_VOID   : // fall through
 364     case T_FLOAT  : // fall through
 365     case T_DOUBLE : /* nothing to do */          break;
 366     case T_OBJECT :
 367       // retrieve result from frame
 368       __ ldr(R0, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize));
 369       // and verify it
 370       __ verify_oop(R0);
 371       break;
 372     default       : ShouldNotReachHere();
 373   }
 374   __ ret();
 375   return entry;
 376 #else
 377   // Result handlers are not used on 32-bit ARM
 378   // since the returned value is already in appropriate format.
 379   __ should_not_reach_here();  // to avoid empty code block
 380 
 381   // The result handler non-zero indicates an object is returned and this is
 382   // used in the native entry code.
 383   return type == T_OBJECT ? (address)(-1) : NULL;
 384 #endif // AARCH64
 385 }
 386 
 387 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
 388   address entry = __ pc();
 389   __ push(state);
 390   __ call_VM(noreg, runtime_entry);
 391 
 392   // load current bytecode
 393   __ ldrb(R3_bytecode, Address(Rbcp));
 394   __ dispatch_only_normal(vtos);
 395   return entry;
 396 }
 397 
 398 
 399 // Helpers for commoning out cases in the various type of method entries.
 400 //
 401 
 402 // increment invocation count & check for overflow
 403 //
 404 // Note: checking for negative value instead of overflow
 405 //       so we have a 'sticky' overflow test
 406 //
 407 // In: Rmethod.
 408 //
 409 // Uses R0, R1, Rtemp.
 410 //
 411 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow,
 412                                                  Label* profile_method,
 413                                                  Label* profile_method_continue) {
 414   Label done;
 415   const Register Rcounters = Rtemp;
 416   const Address invocation_counter(Rcounters,
 417                 MethodCounters::invocation_counter_offset() +
 418                 InvocationCounter::counter_offset());
 419 
 420   // Note: In tiered we increment either counters in MethodCounters* or
 421   // in MDO depending if we're profiling or not.
 422   if (TieredCompilation) {
 423     int increment = InvocationCounter::count_increment;
 424     Label no_mdo;
 425     if (ProfileInterpreter) {
 426       // Are we profiling?
 427       __ ldr(R1_tmp, Address(Rmethod, Method::method_data_offset()));
 428       __ cbz(R1_tmp, no_mdo);
 429       // Increment counter in the MDO
 430       const Address mdo_invocation_counter(R1_tmp,
 431                     in_bytes(MethodData::invocation_counter_offset()) +
 432                     in_bytes(InvocationCounter::counter_offset()));
 433       const Address mask(R1_tmp, in_bytes(MethodData::invoke_mask_offset()));
 434       __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, R0_tmp, Rtemp, eq, overflow);
 435       __ b(done);
 436     }
 437     __ bind(no_mdo);
 438     __ get_method_counters(Rmethod, Rcounters, done);
 439     const Address mask(Rcounters, in_bytes(MethodCounters::invoke_mask_offset()));
 440     __ increment_mask_and_jump(invocation_counter, increment, mask, R0_tmp, R1_tmp, eq, overflow);
 441     __ bind(done);
 442   } else { // not TieredCompilation
 443     const Address backedge_counter(Rcounters,
 444                   MethodCounters::backedge_counter_offset() +
 445                   InvocationCounter::counter_offset());
 446 
 447     const Register Ricnt = R0_tmp;  // invocation counter
 448     const Register Rbcnt = R1_tmp;  // backedge counter
 449 
 450     __ get_method_counters(Rmethod, Rcounters, done);
 451 
 452     if (ProfileInterpreter) {
 453       const Register Riic = R1_tmp;
 454       __ ldr_s32(Riic, Address(Rcounters, MethodCounters::interpreter_invocation_counter_offset()));
 455       __ add(Riic, Riic, 1);
 456       __ str_32(Riic, Address(Rcounters, MethodCounters::interpreter_invocation_counter_offset()));
 457     }
 458 
 459     // Update standard invocation counters
 460 
 461     __ ldr_u32(Ricnt, invocation_counter);
 462     __ ldr_u32(Rbcnt, backedge_counter);
 463 
 464     __ add(Ricnt, Ricnt, InvocationCounter::count_increment);
 465 
 466 #ifdef AARCH64
 467     __ andr(Rbcnt, Rbcnt, (unsigned int)InvocationCounter::count_mask_value); // mask out the status bits
 468 #else
 469     __ bic(Rbcnt, Rbcnt, ~InvocationCounter::count_mask_value); // mask out the status bits
 470 #endif // AARCH64
 471 
 472     __ str_32(Ricnt, invocation_counter);            // save invocation count
 473     __ add(Ricnt, Ricnt, Rbcnt);                     // add both counters
 474 
 475     // profile_method is non-null only for interpreted method so
 476     // profile_method != NULL == !native_call
 477     // BytecodeInterpreter only calls for native so code is elided.
 478 
 479     if (ProfileInterpreter && profile_method != NULL) {
 480       assert(profile_method_continue != NULL, "should be non-null");
 481 
 482       // Test to see if we should create a method data oop
 483       // Reuse R1_tmp as we don't need backedge counters anymore.
 484       Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
 485       __ ldr_s32(R1_tmp, profile_limit);
 486       __ cmp_32(Ricnt, R1_tmp);
 487       __ b(*profile_method_continue, lt);
 488 
 489       // if no method data exists, go to profile_method
 490       __ test_method_data_pointer(R1_tmp, *profile_method);
 491     }
 492 
 493     Address invoke_limit(Rcounters, in_bytes(MethodCounters::interpreter_invocation_limit_offset()));
 494     __ ldr_s32(R1_tmp, invoke_limit);
 495     __ cmp_32(Ricnt, R1_tmp);
 496     __ b(*overflow, hs);
 497     __ bind(done);
 498   }
 499 }
 500 
 501 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
 502   // InterpreterRuntime::frequency_counter_overflow takes one argument
 503   // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
 504   // The call returns the address of the verified entry point for the method or NULL
 505   // if the compilation did not complete (either went background or bailed out).
 506   __ mov(R1, (int)false);
 507   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
 508 
 509   // jump to the interpreted entry.
 510   __ b(do_continue);
 511 }
 512 
 513 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
 514   // Check if we've got enough room on the stack for
 515   //  - overhead;
 516   //  - locals;
 517   //  - expression stack.
 518   //
 519   // Registers on entry:
 520   //
 521   // R3 = number of additional locals
 522   // R11 = max expression stack slots (AArch64 only)
 523   // Rthread
 524   // Rmethod
 525   // Registers used: R0, R1, R2, Rtemp.
 526 
 527   const Register Radditional_locals = R3;
 528   const Register RmaxStack = AARCH64_ONLY(R11) NOT_AARCH64(R2);
 529 
 530   // monitor entry size
 531   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
 532 
 533   // total overhead size: entry_size + (saved registers, thru expr stack bottom).
 534   // be sure to change this if you add/subtract anything to/from the overhead area
 535   const int overhead_size = (frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset)*wordSize + entry_size;
 536 
 537   // Pages reserved for VM runtime calls and subsequent Java calls.
 538   const int reserved_pages = JavaThread::stack_shadow_zone_size();
 539 
 540   // Thread::stack_size() includes guard pages, and they should not be touched.
 541   const int guard_pages = JavaThread::stack_guard_zone_size();
 542 
 543   __ ldr(R0, Address(Rthread, Thread::stack_base_offset()));
 544   __ ldr(R1, Address(Rthread, Thread::stack_size_offset()));
 545 #ifndef AARCH64
 546   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
 547   __ ldrh(RmaxStack, Address(Rtemp, ConstMethod::max_stack_offset()));
 548 #endif // !AARCH64
 549   __ sub_slow(Rtemp, SP, overhead_size + reserved_pages + guard_pages + Method::extra_stack_words());
 550 
 551   // reserve space for additional locals
 552   __ sub(Rtemp, Rtemp, AsmOperand(Radditional_locals, lsl, Interpreter::logStackElementSize));
 553 
 554   // stack size
 555   __ sub(R0, R0, R1);
 556 
 557   // reserve space for expression stack
 558   __ sub(Rtemp, Rtemp, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize));
 559 
 560   __ cmp(Rtemp, R0);
 561 
 562 #ifdef AARCH64
 563   Label L;
 564   __ b(L, hi);
 565   __ mov(SP, Rsender_sp);  // restore SP
 566   __ b(StubRoutines::throw_StackOverflowError_entry());
 567   __ bind(L);
 568 #else
 569   __ mov(SP, Rsender_sp, ls);  // restore SP
 570   __ b(StubRoutines::throw_StackOverflowError_entry(), ls);
 571 #endif // AARCH64
 572 }
 573 
 574 
 575 // Allocate monitor and lock method (asm interpreter)
 576 //
 577 void TemplateInterpreterGenerator::lock_method() {
 578   // synchronize method
 579 
 580   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
 581   assert ((entry_size % StackAlignmentInBytes) == 0, "should keep stack alignment");
 582 
 583   #ifdef ASSERT
 584     { Label L;
 585       __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
 586       __ tbnz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L);
 587       __ stop("method doesn't need synchronization");
 588       __ bind(L);
 589     }
 590   #endif // ASSERT
 591 
 592   // get synchronization object
 593   { Label done;
 594     __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
 595 #ifdef AARCH64
 596     __ ldr(R0, Address(Rlocals, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case)
 597     __ tbz(Rtemp, JVM_ACC_STATIC_BIT, done);
 598 #else
 599     __ tst(Rtemp, JVM_ACC_STATIC);
 600     __ ldr(R0, Address(Rlocals, Interpreter::local_offset_in_bytes(0)), eq); // get receiver (assume this is frequent case)
 601     __ b(done, eq);
 602 #endif // AARCH64
 603     __ load_mirror(R0, Rmethod, Rtemp);
 604     __ bind(done);
 605   }
 606 
 607   // add space for monitor & lock
 608 
 609 #ifdef AARCH64
 610   __ check_extended_sp(Rtemp);
 611   __ sub(SP, SP, entry_size);                  // adjust extended SP
 612   __ mov(Rtemp, SP);
 613   __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize));
 614 #endif // AARCH64
 615 
 616   __ sub(Rstack_top, Rstack_top, entry_size);
 617   __ check_stack_top_on_expansion();
 618                                               // add space for a monitor entry
 619   __ str(Rstack_top, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
 620                                               // set new monitor block top
 621   __ str(R0, Address(Rstack_top, BasicObjectLock::obj_offset_in_bytes()));
 622                                               // store object
 623   __ mov(R1, Rstack_top);                     // monitor entry address
 624   __ lock_object(R1);
 625 }
 626 
 627 #ifdef AARCH64
 628 
 629 //
 630 // Generate a fixed interpreter frame. This is identical setup for interpreted methods
 631 // and for native methods hence the shared code.
 632 //
 633 // On entry:
 634 //   R10 = ConstMethod
 635 //   R11 = max expr. stack (in slots), if !native_call
 636 //
 637 // On exit:
 638 //   Rbcp, Rstack_top are initialized, SP is extended
 639 //
 640 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 641   // Incoming registers
 642   const Register RconstMethod = R10;
 643   const Register RmaxStack = R11;
 644   // Temporary registers
 645   const Register RextendedSP = R0;
 646   const Register Rcache = R1;
 647   const Register Rmdp = ProfileInterpreter ? R2 : ZR;
 648 
 649   // Generates the following stack layout (stack grows up in this picture):
 650   //
 651   // [ expr. stack bottom ]
 652   // [ saved Rbcp         ]
 653   // [ current Rlocals    ]
 654   // [ cache              ]
 655   // [ mdx                ]
 656   // [ mirror             ]
 657   // [ Method*            ]
 658   // [ extended SP        ]
 659   // [ expr. stack top    ]
 660   // [ sender_sp          ]
 661   // [ saved FP           ] <--- FP
 662   // [ saved LR           ]
 663 
 664   // initialize fixed part of activation frame
 665   __ stp(FP, LR, Address(SP, -2*wordSize, pre_indexed));
 666   __ mov(FP, SP);                                     // establish new FP
 667 
 668   // setup Rbcp
 669   if (native_call) {
 670     __ mov(Rbcp, ZR);                                 // bcp = 0 for native calls
 671   } else {
 672     __ add(Rbcp, RconstMethod, in_bytes(ConstMethod::codes_offset())); // get codebase
 673   }
 674 
 675   // Rstack_top & RextendedSP
 676   __ sub(Rstack_top, SP, 10*wordSize);
 677   if (native_call) {
 678     __ sub(RextendedSP, Rstack_top, round_to(wordSize, StackAlignmentInBytes));    // reserve 1 slot for exception handling
 679   } else {
 680     __ sub(RextendedSP, Rstack_top, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize));
 681     __ align_reg(RextendedSP, RextendedSP, StackAlignmentInBytes);
 682   }
 683   __ mov(SP, RextendedSP);
 684   __ check_stack_top();
 685 
 686   // Load Rmdp
 687   if (ProfileInterpreter) {
 688     __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
 689     __ tst(Rtemp, Rtemp);
 690     __ add(Rtemp, Rtemp, in_bytes(MethodData::data_offset()));
 691     __ csel(Rmdp, ZR, Rtemp, eq);
 692   }
 693 
 694   // Load Rcache
 695   __ ldr(Rtemp, Address(RconstMethod, ConstMethod::constants_offset()));
 696   __ ldr(Rcache, Address(Rtemp, ConstantPool::cache_offset_in_bytes()));
 697   // Get mirror and store it in the frame as GC root for this Method*
 698   __ load_mirror(Rtemp, Rmethod, Rtemp);
 699 
 700   // Build fixed frame
 701   __ stp(Rstack_top, Rbcp, Address(FP, -10*wordSize));
 702   __ stp(Rlocals, Rcache,  Address(FP,  -8*wordSize));
 703   __ stp(Rmdp, Rtemp,          Address(FP,  -6*wordSize));
 704   __ stp(Rmethod, RextendedSP, Address(FP,  -4*wordSize));
 705   __ stp(ZR, Rsender_sp,   Address(FP,  -2*wordSize));
 706   assert(frame::interpreter_frame_initial_sp_offset == -10, "interpreter frame broken");
 707   assert(frame::interpreter_frame_stack_top_offset  == -2, "stack top broken");
 708 }
 709 
 710 #else // AARCH64
 711 
 712 //
 713 // Generate a fixed interpreter frame. This is identical setup for interpreted methods
 714 // and for native methods hence the shared code.
 715 
 716 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 717   // Generates the following stack layout:
 718   //
 719   // [ expr. stack bottom ]
 720   // [ saved Rbcp         ]
 721   // [ current Rlocals    ]
 722   // [ cache              ]
 723   // [ mdx                ]
 724   // [ Method*            ]
 725   // [ last_sp            ]
 726   // [ sender_sp          ]
 727   // [ saved FP           ] <--- FP
 728   // [ saved LR           ]
 729 
 730   // initialize fixed part of activation frame
 731   __ push(LR);                                        // save return address
 732   __ push(FP);                                        // save FP
 733   __ mov(FP, SP);                                     // establish new FP
 734 
 735   __ push(Rsender_sp);
 736 
 737   __ mov(R0, 0);
 738   __ push(R0);                                        // leave last_sp as null
 739 
 740   // setup Rbcp
 741   if (native_call) {
 742     __ mov(Rbcp, 0);                                  // bcp = 0 for native calls
 743   } else {
 744     __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); // get ConstMethod*
 745     __ add(Rbcp, Rtemp, ConstMethod::codes_offset()); // get codebase
 746   }
 747 
 748   __ push(Rmethod);                                    // save Method*
 749   // Get mirror and store it in the frame as GC root for this Method*
 750   __ load_mirror(Rtemp, Rmethod, Rtemp);
 751   __ push(Rtemp);
 752 
 753   if (ProfileInterpreter) {
 754     __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
 755     __ tst(Rtemp, Rtemp);
 756     __ add(Rtemp, Rtemp, in_bytes(MethodData::data_offset()), ne);
 757     __ push(Rtemp);                                    // set the mdp (method data pointer)
 758   } else {
 759     __ push(R0);
 760   }
 761 
 762   __ ldr(Rtemp, Address(Rmethod, Method::const_offset()));
 763   __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset()));
 764   __ ldr(Rtemp, Address(Rtemp, ConstantPool::cache_offset_in_bytes()));
 765   __ push(Rtemp);                                      // set constant pool cache
 766   __ push(Rlocals);                                    // set locals pointer
 767   __ push(Rbcp);                                       // set bcp
 768   __ push(R0);                                         // reserve word for pointer to expression stack bottom
 769   __ str(SP, Address(SP, 0));                          // set expression stack bottom
 770 }
 771 
 772 #endif // AARCH64
 773 
 774 // End of helpers
 775 
 776 //------------------------------------------------------------------------------------------------------------------------
 777 // Entry points
 778 //
 779 // Here we generate the various kind of entries into the interpreter.
 780 // The two main entry type are generic bytecode methods and native call method.
 781 // These both come in synchronized and non-synchronized versions but the
 782 // frame layout they create is very similar. The other method entry
 783 // types are really just special purpose entries that are really entry
 784 // and interpretation all in one. These are for trivial methods like
 785 // accessor, empty, or special math methods.
 786 //
 787 // When control flow reaches any of the entry types for the interpreter
 788 // the following holds ->
 789 //
 790 // Arguments:
 791 //
 792 // Rmethod: Method*
 793 // Rthread: thread
 794 // Rsender_sp:  sender sp
 795 // Rparams (SP on 32-bit ARM): pointer to method parameters
 796 //
 797 // LR: return address
 798 //
 799 // Stack layout immediately at entry
 800 //
 801 // [ optional padding(*)] <--- SP (AArch64)
 802 // [ parameter n        ] <--- Rparams (SP on 32-bit ARM)
 803 //   ...
 804 // [ parameter 1        ]
 805 // [ expression stack   ] (caller's java expression stack)
 806 
 807 // Assuming that we don't go to one of the trivial specialized
 808 // entries the stack will look like below when we are ready to execute
 809 // the first bytecode (or call the native routine). The register usage
 810 // will be as the template based interpreter expects.
 811 //
 812 // local variables follow incoming parameters immediately; i.e.
 813 // the return address is saved at the end of the locals.
 814 //
 815 // [ reserved stack (*) ] <--- SP (AArch64)
 816 // [ expr. stack        ] <--- Rstack_top (SP on 32-bit ARM)
 817 // [ monitor entry      ]
 818 //   ...
 819 // [ monitor entry      ]
 820 // [ expr. stack bottom ]
 821 // [ saved Rbcp         ]
 822 // [ current Rlocals    ]
 823 // [ cache              ]
 824 // [ mdx                ]
 825 // [ mirror             ]
 826 // [ Method*            ]
 827 //
 828 // 32-bit ARM:
 829 // [ last_sp            ]
 830 //
 831 // AArch64:
 832 // [ extended SP (*)    ]
 833 // [ stack top (*)      ]
 834 //
 835 // [ sender_sp          ]
 836 // [ saved FP           ] <--- FP
 837 // [ saved LR           ]
 838 // [ optional padding(*)]
 839 // [ local variable m   ]
 840 //   ...
 841 // [ local variable 1   ]
 842 // [ parameter n        ]
 843 //   ...
 844 // [ parameter 1        ] <--- Rlocals
 845 //
 846 // (*) - AArch64 only
 847 //
 848 
 849 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
 850 #if INCLUDE_ALL_GCS
 851   if (UseG1GC) {
 852     // Code: _aload_0, _getfield, _areturn
 853     // parameter size = 1
 854     //
 855     // The code that gets generated by this routine is split into 2 parts:
 856     //    1. The "intrinsified" code for G1 (or any SATB based GC),
 857     //    2. The slow path - which is an expansion of the regular method entry.
 858     //
 859     // Notes:-
 860     // * In the G1 code we do not check whether we need to block for
 861     //   a safepoint. If G1 is enabled then we must execute the specialized
 862     //   code for Reference.get (except when the Reference object is null)
 863     //   so that we can log the value in the referent field with an SATB
 864     //   update buffer.
 865     //   If the code for the getfield template is modified so that the
 866     //   G1 pre-barrier code is executed when the current method is
 867     //   Reference.get() then going through the normal method entry
 868     //   will be fine.
 869     // * The G1 code can, however, check the receiver object (the instance
 870     //   of java.lang.Reference) and jump to the slow path if null. If the
 871     //   Reference object is null then we obviously cannot fetch the referent
 872     //   and so we don't need to call the G1 pre-barrier. Thus we can use the
 873     //   regular method entry code to generate the NPE.
 874     //
 875     // This code is based on generate_accessor_enty.
 876     //
 877     // Rmethod: Method*
 878     // Rthread: thread
 879     // Rsender_sp: sender sp, must be preserved for slow path, set SP to it on fast path
 880     // Rparams: parameters
 881 
 882     address entry = __ pc();
 883     Label slow_path;
 884     const Register Rthis = R0;
 885     const Register Rret_addr = Rtmp_save1;
 886     assert_different_registers(Rthis, Rret_addr, Rsender_sp);
 887 
 888     const int referent_offset = java_lang_ref_Reference::referent_offset;
 889     guarantee(referent_offset > 0, "referent offset not initialized");
 890 
 891     // Check if local 0 != NULL
 892     // If the receiver is null then it is OK to jump to the slow path.
 893     __ ldr(Rthis, Address(Rparams));
 894     __ cbz(Rthis, slow_path);
 895 
 896     // Generate the G1 pre-barrier code to log the value of
 897     // the referent field in an SATB buffer.
 898 
 899     // Load the value of the referent field.
 900     __ load_heap_oop(R0, Address(Rthis, referent_offset));
 901 
 902     // Preserve LR
 903     __ mov(Rret_addr, LR);
 904 
 905     __ g1_write_barrier_pre(noreg,   // store_addr
 906                             noreg,   // new_val
 907                             R0,      // pre_val
 908                             Rtemp,   // tmp1
 909                             R1_tmp); // tmp2
 910 
 911     // _areturn
 912     __ mov(SP, Rsender_sp);
 913     __ ret(Rret_addr);
 914 
 915     // generate a vanilla interpreter entry as the slow path
 916     __ bind(slow_path);
 917     __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
 918     return entry;
 919   }
 920 #endif // INCLUDE_ALL_GCS
 921 
 922   // If G1 is not enabled then attempt to go through the normal entry point
 923   return NULL;
 924 }
 925 
 926 // Not supported
 927 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { return NULL; }
 928 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
 929 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
 930 
 931 //
 932 // Interpreter stub for calling a native method. (asm interpreter)
 933 // This sets up a somewhat different looking stack for calling the native method
 934 // than the typical interpreter frame setup.
 935 //
 936 
 937 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
 938   // determine code generation flags
 939   bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
 940 
 941   // Incoming registers:
 942   //
 943   // Rmethod: Method*
 944   // Rthread: thread
 945   // Rsender_sp: sender sp
 946   // Rparams: parameters
 947 
 948   address entry_point = __ pc();
 949 
 950   // Register allocation
 951   const Register Rsize_of_params = AARCH64_ONLY(R20) NOT_AARCH64(R6);
 952   const Register Rsig_handler    = AARCH64_ONLY(R21) NOT_AARCH64(Rtmp_save0 /* R4 */);
 953   const Register Rnative_code    = AARCH64_ONLY(R22) NOT_AARCH64(Rtmp_save1 /* R5 */);
 954   const Register Rresult_handler = AARCH64_ONLY(Rsig_handler) NOT_AARCH64(R6);
 955 
 956 #ifdef AARCH64
 957   const Register RconstMethod = R10; // also used in generate_fixed_frame (should match)
 958   const Register Rsaved_result = Rnative_code;
 959   const FloatRegister Dsaved_result = V8;
 960 #else
 961   const Register Rsaved_result_lo = Rtmp_save0;  // R4
 962   const Register Rsaved_result_hi = Rtmp_save1;  // R5
 963   FloatRegister saved_result_fp;
 964 #endif // AARCH64
 965 
 966 
 967 #ifdef AARCH64
 968   __ ldr(RconstMethod, Address(Rmethod, Method::const_offset()));
 969   __ ldrh(Rsize_of_params,  Address(RconstMethod, ConstMethod::size_of_parameters_offset()));
 970 #else
 971   __ ldr(Rsize_of_params, Address(Rmethod, Method::const_offset()));
 972   __ ldrh(Rsize_of_params,  Address(Rsize_of_params, ConstMethod::size_of_parameters_offset()));
 973 #endif // AARCH64
 974 
 975   // native calls don't need the stack size check since they have no expression stack
 976   // and the arguments are already on the stack and we only add a handful of words
 977   // to the stack
 978 
 979   // compute beginning of parameters (Rlocals)
 980   __ sub(Rlocals, Rparams, wordSize);
 981   __ add(Rlocals, Rlocals, AsmOperand(Rsize_of_params, lsl, Interpreter::logStackElementSize));
 982 
 983 #ifdef AARCH64
 984   int extra_stack_reserve = 2*wordSize; // extra space for oop_temp
 985   if(__ can_post_interpreter_events()) {
 986     // extra space for saved results
 987     extra_stack_reserve += 2*wordSize;
 988   }
 989   // reserve extra stack space and nullify oop_temp slot
 990   __ stp(ZR, ZR, Address(SP, -extra_stack_reserve, pre_indexed));
 991 #else
 992   // reserve stack space for oop_temp
 993   __ mov(R0, 0);
 994   __ push(R0);
 995 #endif // AARCH64
 996 
 997   generate_fixed_frame(true); // Note: R9 is now saved in the frame
 998 
 999   // make sure method is native & not abstract
1000 #ifdef ASSERT
1001   __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
1002   {
1003     Label L;
1004     __ tbnz(Rtemp, JVM_ACC_NATIVE_BIT, L);
1005     __ stop("tried to execute non-native method as native");
1006     __ bind(L);
1007   }
1008   { Label L;
1009     __ tbz(Rtemp, JVM_ACC_ABSTRACT_BIT, L);
1010     __ stop("tried to execute abstract method in interpreter");
1011     __ bind(L);
1012   }
1013 #endif
1014 
1015   // increment invocation count & check for overflow
1016   Label invocation_counter_overflow;
1017   if (inc_counter) {
1018     if (synchronized) {
1019       // Avoid unlocking method's monitor in case of exception, as it has not
1020       // been locked yet.
1021       __ set_do_not_unlock_if_synchronized(true, Rtemp);
1022     }
1023     generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
1024   }
1025 
1026   Label continue_after_compile;
1027   __ bind(continue_after_compile);
1028 
1029   if (inc_counter && synchronized) {
1030     __ set_do_not_unlock_if_synchronized(false, Rtemp);
1031   }
1032 
1033   // check for synchronized methods
1034   // Must happen AFTER invocation_counter check and stack overflow check,
1035   // so method is not locked if overflows.
1036   //
1037   if (synchronized) {
1038     lock_method();
1039   } else {
1040     // no synchronization necessary
1041 #ifdef ASSERT
1042       { Label L;
1043         __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
1044         __ tbz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L);
1045         __ stop("method needs synchronization");
1046         __ bind(L);
1047       }
1048 #endif
1049   }
1050 
1051   // start execution
1052 #ifdef ASSERT
1053   { Label L;
1054     __ ldr(Rtemp, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
1055     __ cmp(Rtemp, Rstack_top);
1056     __ b(L, eq);
1057     __ stop("broken stack frame setup in interpreter");
1058     __ bind(L);
1059   }
1060 #endif
1061   __ check_extended_sp(Rtemp);
1062 
1063   // jvmti/dtrace support
1064   __ notify_method_entry();
1065 #if R9_IS_SCRATCHED
1066   __ restore_method();
1067 #endif
1068 
1069   {
1070     Label L;
1071     __ ldr(Rsig_handler, Address(Rmethod, Method::signature_handler_offset()));
1072     __ cbnz(Rsig_handler, L);
1073     __ mov(R1, Rmethod);
1074     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R1, true);
1075     __ ldr(Rsig_handler, Address(Rmethod, Method::signature_handler_offset()));
1076     __ bind(L);
1077   }
1078 
1079   {
1080     Label L;
1081     __ ldr(Rnative_code, Address(Rmethod, Method::native_function_offset()));
1082     __ cbnz(Rnative_code, L);
1083     __ mov(R1, Rmethod);
1084     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R1);
1085     __ ldr(Rnative_code, Address(Rmethod, Method::native_function_offset()));
1086     __ bind(L);
1087   }
1088 
1089   // Allocate stack space for arguments
1090 
1091 #ifdef AARCH64
1092   __ sub(Rtemp, SP, Rsize_of_params, ex_uxtw, LogBytesPerWord);
1093   __ align_reg(SP, Rtemp, StackAlignmentInBytes);
1094 
1095   // Allocate more stack space to accomodate all arguments passed on GP and FP registers:
1096   // 8 * wordSize for GPRs
1097   // 8 * wordSize for FPRs
1098   int reg_arguments = round_to(8*wordSize + 8*wordSize, StackAlignmentInBytes);
1099 #else
1100 
1101   // C functions need aligned stack
1102   __ bic(SP, SP, StackAlignmentInBytes - 1);
1103   // Multiply by BytesPerLong instead of BytesPerWord, because calling convention
1104   // may require empty slots due to long alignment, e.g. func(int, jlong, int, jlong)
1105   __ sub(SP, SP, AsmOperand(Rsize_of_params, lsl, LogBytesPerLong));
1106 
1107 #ifdef __ABI_HARD__
1108   // Allocate more stack space to accomodate all GP as well as FP registers:
1109   // 4 * wordSize
1110   // 8 * BytesPerLong
1111   int reg_arguments = round_to((4*wordSize) + (8*BytesPerLong), StackAlignmentInBytes);
1112 #else
1113   // Reserve at least 4 words on the stack for loading
1114   // of parameters passed on registers (R0-R3).
1115   // See generate_slow_signature_handler().
1116   // It is also used for JNIEnv & class additional parameters.
1117   int reg_arguments = 4 * wordSize;
1118 #endif // __ABI_HARD__
1119 #endif // AARCH64
1120 
1121   __ sub(SP, SP, reg_arguments);
1122 
1123 
1124   // Note: signature handler blows R4 (32-bit ARM) or R21 (AArch64) besides all scratch registers.
1125   // See AbstractInterpreterGenerator::generate_slow_signature_handler().
1126   __ call(Rsig_handler);
1127 #if R9_IS_SCRATCHED
1128   __ restore_method();
1129 #endif
1130   __ mov(Rresult_handler, R0);
1131 
1132   // Pass JNIEnv and mirror for static methods
1133   {
1134     Label L;
1135     __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
1136     __ add(R0, Rthread, in_bytes(JavaThread::jni_environment_offset()));
1137     __ tbz(Rtemp, JVM_ACC_STATIC_BIT, L);
1138     __ load_mirror(Rtemp, Rmethod, Rtemp);
1139     __ add(R1, FP, frame::interpreter_frame_oop_temp_offset * wordSize);
1140     __ str(Rtemp, Address(R1, 0));
1141     __ bind(L);
1142   }
1143 
1144   __ set_last_Java_frame(SP, FP, true, Rtemp);
1145 
1146   // Changing state to _thread_in_native must be the last thing to do
1147   // before the jump to native code. At this moment stack must be
1148   // safepoint-safe and completely prepared for stack walking.
1149 #ifdef ASSERT
1150   {
1151     Label L;
1152     __ ldr_u32(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
1153     __ cmp_32(Rtemp, _thread_in_Java);
1154     __ b(L, eq);
1155     __ stop("invalid thread state");
1156     __ bind(L);
1157   }
1158 #endif
1159 
1160 #ifdef AARCH64
1161   __ mov(Rtemp, _thread_in_native);
1162   __ add(Rtemp2, Rthread, in_bytes(JavaThread::thread_state_offset()));
1163   // STLR is used to force all preceding writes to be observed prior to thread state change
1164   __ stlr_w(Rtemp, Rtemp2);
1165 #else
1166   // Force all preceding writes to be observed prior to thread state change
1167   __ membar(MacroAssembler::StoreStore, Rtemp);
1168 
1169   __ mov(Rtemp, _thread_in_native);
1170   __ str(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
1171 #endif // AARCH64
1172 
1173   __ call(Rnative_code);
1174 #if R9_IS_SCRATCHED
1175   __ restore_method();
1176 #endif
1177 
1178   // Set FPSCR/FPCR to a known state
1179   if (AlwaysRestoreFPU) {
1180     __ restore_default_fp_mode();
1181   }
1182 
1183   // Do safepoint check
1184   __ mov(Rtemp, _thread_in_native_trans);
1185   __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
1186 
1187     // Force this write out before the read below
1188   __ membar(MacroAssembler::StoreLoad, Rtemp);
1189 
1190   __ ldr_global_s32(Rtemp, SafepointSynchronize::address_of_state());
1191 
1192   // Protect the return value in the interleaved code: save it to callee-save registers.
1193 #ifdef AARCH64
1194   __ mov(Rsaved_result, R0);
1195   __ fmov_d(Dsaved_result, D0);
1196 #else
1197   __ mov(Rsaved_result_lo, R0);
1198   __ mov(Rsaved_result_hi, R1);
1199 #ifdef __ABI_HARD__
1200   // preserve native FP result in a callee-saved register
1201   saved_result_fp = D8;
1202   __ fcpyd(saved_result_fp, D0);
1203 #else
1204   saved_result_fp = fnoreg;
1205 #endif // __ABI_HARD__
1206 #endif // AARCH64
1207 
1208   {
1209     __ ldr_u32(R3, Address(Rthread, JavaThread::suspend_flags_offset()));
1210     __ cmp(Rtemp, SafepointSynchronize::_not_synchronized);
1211     __ cond_cmp(R3, 0, eq);
1212 
1213 #ifdef AARCH64
1214     Label L;
1215     __ b(L, eq);
1216     __ mov(R0, Rthread);
1217     __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::none);
1218     __ bind(L);
1219 #else
1220   __ mov(R0, Rthread, ne);
1221   __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::none, ne);
1222 #if R9_IS_SCRATCHED
1223   __ restore_method();
1224 #endif
1225 #endif // AARCH64
1226   }
1227 
1228   // Perform Native->Java thread transition
1229   __ mov(Rtemp, _thread_in_Java);
1230   __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));
1231 
1232   // Zero handles and last_java_sp
1233   __ reset_last_Java_frame(Rtemp);
1234   __ ldr(R3, Address(Rthread, JavaThread::active_handles_offset()));
1235   __ str_32(__ zero_register(Rtemp), Address(R3, JNIHandleBlock::top_offset_in_bytes()));
1236   if (CheckJNICalls) {
1237     __ str(__ zero_register(Rtemp), Address(Rthread, JavaThread::pending_jni_exception_check_fn_offset()));
1238   }
1239 
1240   // Unbox if the result is non-zero object
1241 #ifdef AARCH64
1242   {
1243     Label L, Lnull;
1244     __ mov_slow(Rtemp, AbstractInterpreter::result_handler(T_OBJECT));
1245     __ cmp(Rresult_handler, Rtemp);
1246     __ b(L, ne);
1247     __ cbz(Rsaved_result, Lnull);
1248     __ ldr(Rsaved_result, Address(Rsaved_result));
1249     __ bind(Lnull);
1250     // Store oop on the stack for GC
1251     __ str(Rsaved_result, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize));
1252     __ bind(L);
1253   }
1254 #else
1255   __ tst(Rsaved_result_lo, Rresult_handler);
1256   __ ldr(Rsaved_result_lo, Address(Rsaved_result_lo), ne);
1257 
1258   // Store oop on the stack for GC
1259   __ cmp(Rresult_handler, 0);
1260   __ str(Rsaved_result_lo, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize), ne);
1261 #endif // AARCH64
1262 
1263 #ifdef AARCH64
1264   // Restore SP (drop native parameters area), to keep SP in sync with extended_sp in frame
1265   __ restore_sp_after_call(Rtemp);
1266   __ check_stack_top();
1267 #endif // AARCH64
1268 
1269   // reguard stack if StackOverflow exception happened while in native.
1270   {
1271     __ ldr_u32(Rtemp, Address(Rthread, JavaThread::stack_guard_state_offset()));
1272     __ cmp_32(Rtemp, JavaThread::stack_guard_yellow_reserved_disabled);
1273 #ifdef AARCH64
1274     Label L;
1275     __ b(L, ne);
1276     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none);
1277     __ bind(L);
1278 #else
1279   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none, eq);
1280 #if R9_IS_SCRATCHED
1281   __ restore_method();
1282 #endif
1283 #endif // AARCH64
1284   }
1285 
1286   // check pending exceptions
1287   {
1288     __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset()));
1289 #ifdef AARCH64
1290     Label L;
1291     __ cbz(Rtemp, L);
1292     __ mov_pc_to(Rexception_pc);
1293     __ b(StubRoutines::forward_exception_entry());
1294     __ bind(L);
1295 #else
1296     __ cmp(Rtemp, 0);
1297     __ mov(Rexception_pc, PC, ne);
1298     __ b(StubRoutines::forward_exception_entry(), ne);
1299 #endif // AARCH64
1300   }
1301 
1302   if (synchronized) {
1303     // address of first monitor
1304     __ sub(R1, FP, - (frame::interpreter_frame_monitor_block_bottom_offset - frame::interpreter_frame_monitor_size()) * wordSize);
1305     __ unlock_object(R1);
1306   }
1307 
1308   // jvmti/dtrace support
1309   // Note: This must happen _after_ handling/throwing any exceptions since
1310   //       the exception handler code notifies the runtime of method exits
1311   //       too. If this happens before, method entry/exit notifications are
1312   //       not properly paired (was bug - gri 11/22/99).
1313 #ifdef AARCH64
1314   __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI, true, Rsaved_result, noreg, Dsaved_result);
1315 #else
1316   __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI, true, Rsaved_result_lo, Rsaved_result_hi, saved_result_fp);
1317 #endif // AARCH64
1318 
1319   // Restore the result. Oop result is restored from the stack.
1320 #ifdef AARCH64
1321   __ mov(R0, Rsaved_result);
1322   __ fmov_d(D0, Dsaved_result);
1323 
1324   __ blr(Rresult_handler);
1325 #else
1326   __ cmp(Rresult_handler, 0);
1327   __ ldr(R0, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize), ne);
1328   __ mov(R0, Rsaved_result_lo, eq);
1329   __ mov(R1, Rsaved_result_hi);
1330 
1331 #ifdef __ABI_HARD__
1332   // reload native FP result
1333   __ fcpyd(D0, D8);
1334 #endif // __ABI_HARD__
1335 
1336 #ifdef ASSERT
1337   if (VerifyOops) {
1338     Label L;
1339     __ cmp(Rresult_handler, 0);
1340     __ b(L, eq);
1341     __ verify_oop(R0);
1342     __ bind(L);
1343   }
1344 #endif // ASSERT
1345 #endif // AARCH64
1346 
1347   // Restore FP/LR, sender_sp and return
1348 #ifdef AARCH64
1349   __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
1350   __ ldp(FP, LR, Address(FP));
1351   __ mov(SP, Rtemp);
1352 #else
1353   __ mov(Rtemp, FP);
1354   __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
1355   __ ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize));
1356 #endif // AARCH64
1357 
1358   __ ret();
1359 
1360   if (inc_counter) {
1361     // Handle overflow of counter and compile method
1362     __ bind(invocation_counter_overflow);
1363     generate_counter_overflow(continue_after_compile);
1364   }
1365 
1366   return entry_point;
1367 }
1368 
1369 //
1370 // Generic interpreted method entry to (asm) interpreter
1371 //
1372 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1373   // determine code generation flags
1374   bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1375 
1376   // Rmethod: Method*
1377   // Rthread: thread
1378   // Rsender_sp: sender sp (could differ from SP if we were called via c2i)
1379   // Rparams: pointer to the last parameter in the stack
1380 
1381   address entry_point = __ pc();
1382 
1383   const Register RconstMethod = AARCH64_ONLY(R10) NOT_AARCH64(R3);
1384 
1385 #ifdef AARCH64
1386   const Register RmaxStack = R11;
1387   const Register RlocalsBase = R12;
1388 #endif // AARCH64
1389 
1390   __ ldr(RconstMethod, Address(Rmethod, Method::const_offset()));
1391 
1392   __ ldrh(R2, Address(RconstMethod, ConstMethod::size_of_parameters_offset()));
1393   __ ldrh(R3, Address(RconstMethod, ConstMethod::size_of_locals_offset()));
1394 
1395   // setup Rlocals
1396   __ sub(Rlocals, Rparams, wordSize);
1397   __ add(Rlocals, Rlocals, AsmOperand(R2, lsl, Interpreter::logStackElementSize));
1398 
1399   __ sub(R3, R3, R2); // number of additional locals
1400 
1401 #ifdef AARCH64
1402   // setup RmaxStack
1403   __ ldrh(RmaxStack, Address(RconstMethod, ConstMethod::max_stack_offset()));
1404   __ add(RmaxStack, RmaxStack, MAX2(1, Method::extra_stack_entries())); // reserve slots for exception handler and JSR292 appendix argument
1405 #endif // AARCH64
1406 
1407   // see if we've got enough room on the stack for locals plus overhead.
1408   generate_stack_overflow_check();
1409 
1410 #ifdef AARCH64
1411 
1412   // allocate space for locals
1413   {
1414     __ sub(RlocalsBase, Rparams, AsmOperand(R3, lsl, Interpreter::logStackElementSize));
1415     __ align_reg(SP, RlocalsBase, StackAlignmentInBytes);
1416   }
1417 
1418   // explicitly initialize locals
1419   {
1420     Label zero_loop, done;
1421     __ cbz(R3, done);
1422 
1423     __ tbz(R3, 0, zero_loop);
1424     __ subs(R3, R3, 1);
1425     __ str(ZR, Address(RlocalsBase, wordSize, post_indexed));
1426     __ b(done, eq);
1427 
1428     __ bind(zero_loop);
1429     __ subs(R3, R3, 2);
1430     __ stp(ZR, ZR, Address(RlocalsBase, 2*wordSize, post_indexed));
1431     __ b(zero_loop, ne);
1432 
1433     __ bind(done);
1434   }
1435 
1436 #else
1437   // allocate space for locals
1438   // explicitly initialize locals
1439 
1440   // Loop is unrolled 4 times
1441   Label loop;
1442   __ mov(R0, 0);
1443   __ bind(loop);
1444 
1445   // #1
1446   __ subs(R3, R3, 1);
1447   __ push(R0, ge);
1448 
1449   // #2
1450   __ subs(R3, R3, 1, ge);
1451   __ push(R0, ge);
1452 
1453   // #3
1454   __ subs(R3, R3, 1, ge);
1455   __ push(R0, ge);
1456 
1457   // #4
1458   __ subs(R3, R3, 1, ge);
1459   __ push(R0, ge);
1460 
1461   __ b(loop, gt);
1462 #endif // AARCH64
1463 
1464   // initialize fixed part of activation frame
1465   generate_fixed_frame(false);
1466 
1467   __ restore_dispatch();
1468 
1469   // make sure method is not native & not abstract
1470 #ifdef ASSERT
1471   __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
1472   {
1473     Label L;
1474     __ tbz(Rtemp, JVM_ACC_NATIVE_BIT, L);
1475     __ stop("tried to execute native method as non-native");
1476     __ bind(L);
1477   }
1478   { Label L;
1479     __ tbz(Rtemp, JVM_ACC_ABSTRACT_BIT, L);
1480     __ stop("tried to execute abstract method in interpreter");
1481     __ bind(L);
1482   }
1483 #endif
1484 
1485   // increment invocation count & check for overflow
1486   Label invocation_counter_overflow;
1487   Label profile_method;
1488   Label profile_method_continue;
1489   if (inc_counter) {
1490     if (synchronized) {
1491       // Avoid unlocking method's monitor in case of exception, as it has not
1492       // been locked yet.
1493       __ set_do_not_unlock_if_synchronized(true, Rtemp);
1494     }
1495     generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
1496     if (ProfileInterpreter) {
1497       __ bind(profile_method_continue);
1498     }
1499   }
1500   Label continue_after_compile;
1501   __ bind(continue_after_compile);
1502 
1503   if (inc_counter && synchronized) {
1504     __ set_do_not_unlock_if_synchronized(false, Rtemp);
1505   }
1506 #if R9_IS_SCRATCHED
1507   __ restore_method();
1508 #endif
1509 
1510   // check for synchronized methods
1511   // Must happen AFTER invocation_counter check and stack overflow check,
1512   // so method is not locked if overflows.
1513   //
1514   if (synchronized) {
1515     // Allocate monitor and lock method
1516     lock_method();
1517   } else {
1518     // no synchronization necessary
1519 #ifdef ASSERT
1520       { Label L;
1521         __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset()));
1522         __ tbz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L);
1523         __ stop("method needs synchronization");
1524         __ bind(L);
1525       }
1526 #endif
1527   }
1528 
1529   // start execution
1530 #ifdef ASSERT
1531   { Label L;
1532     __ ldr(Rtemp, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
1533     __ cmp(Rtemp, Rstack_top);
1534     __ b(L, eq);
1535     __ stop("broken stack frame setup in interpreter");
1536     __ bind(L);
1537   }
1538 #endif
1539   __ check_extended_sp(Rtemp);
1540 
1541   // jvmti support
1542   __ notify_method_entry();
1543 #if R9_IS_SCRATCHED
1544   __ restore_method();
1545 #endif
1546 
1547   __ dispatch_next(vtos);
1548 
1549   // invocation counter overflow
1550   if (inc_counter) {
1551     if (ProfileInterpreter) {
1552       // We have decided to profile this method in the interpreter
1553       __ bind(profile_method);
1554 
1555       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1556       __ set_method_data_pointer_for_bcp();
1557 
1558       __ b(profile_method_continue);
1559     }
1560 
1561     // Handle overflow of counter and compile method
1562     __ bind(invocation_counter_overflow);
1563     generate_counter_overflow(continue_after_compile);
1564   }
1565 
1566   return entry_point;
1567 }
1568 
1569 //------------------------------------------------------------------------------------------------------------------------
1570 // Exceptions
1571 
1572 void TemplateInterpreterGenerator::generate_throw_exception() {
1573   // Entry point in previous activation (i.e., if the caller was interpreted)
1574   Interpreter::_rethrow_exception_entry = __ pc();
1575   // Rexception_obj: exception
1576 
1577 #ifndef AARCH64
1578   // Clear interpreter_frame_last_sp.
1579   __ mov(Rtemp, 0);
1580   __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
1581 #endif // !AARCH64
1582 
1583 #if R9_IS_SCRATCHED
1584   __ restore_method();
1585 #endif
1586   __ restore_bcp();
1587   __ restore_dispatch();
1588   __ restore_locals();
1589 
1590 #ifdef AARCH64
1591   __ restore_sp_after_call(Rtemp);
1592 #endif // AARCH64
1593 
1594   // Entry point for exceptions thrown within interpreter code
1595   Interpreter::_throw_exception_entry = __ pc();
1596 
1597   // expression stack is undefined here
1598   // Rexception_obj: exception
1599   // Rbcp: exception bcp
1600   __ verify_oop(Rexception_obj);
1601 
1602   // expression stack must be empty before entering the VM in case of an exception
1603   __ empty_expression_stack();
1604   // find exception handler address and preserve exception oop
1605   __ mov(R1, Rexception_obj);
1606   __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), R1);
1607   // R0: exception handler entry point
1608   // Rexception_obj: preserved exception oop
1609   // Rbcp: bcp for exception handler
1610   __ push_ptr(Rexception_obj);                    // push exception which is now the only value on the stack
1611   __ jump(R0);                                    // jump to exception handler (may be _remove_activation_entry!)
1612 
1613   // If the exception is not handled in the current frame the frame is removed and
1614   // the exception is rethrown (i.e. exception continuation is _rethrow_exception).
1615   //
1616   // Note: At this point the bci is still the bxi for the instruction which caused
1617   //       the exception and the expression stack is empty. Thus, for any VM calls
1618   //       at this point, GC will find a legal oop map (with empty expression stack).
1619 
1620   // In current activation
1621   // tos: exception
1622   // Rbcp: exception bcp
1623 
1624   //
1625   // JVMTI PopFrame support
1626   //
1627    Interpreter::_remove_activation_preserving_args_entry = __ pc();
1628 
1629 #ifdef AARCH64
1630   __ restore_sp_after_call(Rtemp); // restore SP to extended SP
1631 #endif // AARCH64
1632 
1633   __ empty_expression_stack();
1634 
1635   // Set the popframe_processing bit in _popframe_condition indicating that we are
1636   // currently handling popframe, so that call_VMs that may happen later do not trigger new
1637   // popframe handling cycles.
1638 
1639   __ ldr_s32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset()));
1640   __ orr(Rtemp, Rtemp, (unsigned)JavaThread::popframe_processing_bit);
1641   __ str_32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset()));
1642 
1643   {
1644     // Check to see whether we are returning to a deoptimized frame.
1645     // (The PopFrame call ensures that the caller of the popped frame is
1646     // either interpreted or compiled and deoptimizes it if compiled.)
1647     // In this case, we can't call dispatch_next() after the frame is
1648     // popped, but instead must save the incoming arguments and restore
1649     // them after deoptimization has occurred.
1650     //
1651     // Note that we don't compare the return PC against the
1652     // deoptimization blob's unpack entry because of the presence of
1653     // adapter frames in C2.
1654     Label caller_not_deoptimized;
1655     __ ldr(R0, Address(FP, frame::return_addr_offset * wordSize));
1656     __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), R0);
1657     __ cbnz_32(R0, caller_not_deoptimized);
1658 #ifdef AARCH64
1659     __ NOT_TESTED();
1660 #endif
1661 
1662     // Compute size of arguments for saving when returning to deoptimized caller
1663     __ restore_method();
1664     __ ldr(R0, Address(Rmethod, Method::const_offset()));
1665     __ ldrh(R0, Address(R0, ConstMethod::size_of_parameters_offset()));
1666 
1667     __ logical_shift_left(R1, R0, Interpreter::logStackElementSize);
1668     // Save these arguments
1669     __ restore_locals();
1670     __ sub(R2, Rlocals, R1);
1671     __ add(R2, R2, wordSize);
1672     __ mov(R0, Rthread);
1673     __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R0, R1, R2);
1674 
1675     __ remove_activation(vtos, LR,
1676                          /* throw_monitor_exception */ false,
1677                          /* install_monitor_exception */ false,
1678                          /* notify_jvmdi */ false);
1679 
1680     // Inform deoptimization that it is responsible for restoring these arguments
1681     __ mov(Rtemp, JavaThread::popframe_force_deopt_reexecution_bit);
1682     __ str_32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset()));
1683 
1684     // Continue in deoptimization handler
1685     __ ret();
1686 
1687     __ bind(caller_not_deoptimized);
1688   }
1689 
1690   __ remove_activation(vtos, R4,
1691                        /* throw_monitor_exception */ false,
1692                        /* install_monitor_exception */ false,
1693                        /* notify_jvmdi */ false);
1694 
1695 #ifndef AARCH64
1696   // Finish with popframe handling
1697   // A previous I2C followed by a deoptimization might have moved the
1698   // outgoing arguments further up the stack. PopFrame expects the
1699   // mutations to those outgoing arguments to be preserved and other
1700   // constraints basically require this frame to look exactly as
1701   // though it had previously invoked an interpreted activation with
1702   // no space between the top of the expression stack (current
1703   // last_sp) and the top of stack. Rather than force deopt to
1704   // maintain this kind of invariant all the time we call a small
1705   // fixup routine to move the mutated arguments onto the top of our
1706   // expression stack if necessary.
1707   __ mov(R1, SP);
1708   __ ldr(R2, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
1709   // PC must point into interpreter here
1710   __ set_last_Java_frame(SP, FP, true, Rtemp);
1711   __ mov(R0, Rthread);
1712   __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), R0, R1, R2);
1713   __ reset_last_Java_frame(Rtemp);
1714 #endif // !AARCH64
1715 
1716 #ifdef AARCH64
1717   __ restore_sp_after_call(Rtemp);
1718   __ restore_stack_top();
1719 #else
1720   // Restore the last_sp and null it out
1721   __ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
1722   __ mov(Rtemp, (int)NULL_WORD);
1723   __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
1724 #endif // AARCH64
1725 
1726   __ restore_bcp();
1727   __ restore_dispatch();
1728   __ restore_locals();
1729   __ restore_method();
1730 
1731   // The method data pointer was incremented already during
1732   // call profiling. We have to restore the mdp for the current bcp.
1733   if (ProfileInterpreter) {
1734     __ set_method_data_pointer_for_bcp();
1735   }
1736 
1737   // Clear the popframe condition flag
1738   assert(JavaThread::popframe_inactive == 0, "adjust this code");
1739   __ str_32(__ zero_register(Rtemp), Address(Rthread, JavaThread::popframe_condition_offset()));
1740 
1741 #if INCLUDE_JVMTI
1742   {
1743     Label L_done;
1744 
1745     __ ldrb(Rtemp, Address(Rbcp, 0));
1746     __ cmp(Rtemp, Bytecodes::_invokestatic);
1747     __ b(L_done, ne);
1748 
1749     // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1750     // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
1751 
1752     // get local0
1753     __ ldr(R1, Address(Rlocals, 0));
1754     __ mov(R2, Rmethod);
1755     __ mov(R3, Rbcp);
1756     __ call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R1, R2, R3);
1757 
1758     __ cbz(R0, L_done);
1759 
1760     __ str(R0, Address(Rstack_top));
1761     __ bind(L_done);
1762   }
1763 #endif // INCLUDE_JVMTI
1764 
1765   __ dispatch_next(vtos);
1766   // end of PopFrame support
1767 
1768   Interpreter::_remove_activation_entry = __ pc();
1769 
1770   // preserve exception over this code sequence
1771   __ pop_ptr(R0_tos);
1772   __ str(R0_tos, Address(Rthread, JavaThread::vm_result_offset()));
1773   // remove the activation (without doing throws on illegalMonitorExceptions)
1774   __ remove_activation(vtos, Rexception_pc, false, true, false);
1775   // restore exception
1776   __ get_vm_result(Rexception_obj, Rtemp);
1777 
1778   // Inbetween activations - previous activation type unknown yet
1779   // compute continuation point - the continuation point expects
1780   // the following registers set up:
1781   //
1782   // Rexception_obj: exception
1783   // Rexception_pc: return address/pc that threw exception
1784   // SP: expression stack of caller
1785   // FP: frame pointer of caller
1786   __ mov(c_rarg0, Rthread);
1787   __ mov(c_rarg1, Rexception_pc);
1788   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), c_rarg0, c_rarg1);
1789   // Note that an "issuing PC" is actually the next PC after the call
1790 
1791   __ jump(R0);                             // jump to exception handler of caller
1792 }
1793 
1794 
1795 //
1796 // JVMTI ForceEarlyReturn support
1797 //
1798 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1799   address entry = __ pc();
1800 
1801 #ifdef AARCH64
1802   __ restore_sp_after_call(Rtemp); // restore SP to extended SP
1803 #endif // AARCH64
1804 
1805   __ restore_bcp();
1806   __ restore_dispatch();
1807   __ restore_locals();
1808 
1809   __ empty_expression_stack();
1810 
1811   __ load_earlyret_value(state);
1812 
1813   // Clear the earlyret state
1814   __ ldr(Rtemp, Address(Rthread, JavaThread::jvmti_thread_state_offset()));
1815 
1816   assert(JvmtiThreadState::earlyret_inactive == 0, "adjust this code");
1817   __ str_32(__ zero_register(R2), Address(Rtemp, JvmtiThreadState::earlyret_state_offset()));
1818 
1819   __ remove_activation(state, LR,
1820                        false, /* throw_monitor_exception */
1821                        false, /* install_monitor_exception */
1822                        true); /* notify_jvmdi */
1823 
1824 #ifndef AARCH64
1825   // According to interpreter calling conventions, result is returned in R0/R1,
1826   // so ftos (S0) and dtos (D0) are moved to R0/R1.
1827   // This conversion should be done after remove_activation, as it uses
1828   // push(state) & pop(state) to preserve return value.
1829   __ convert_tos_to_retval(state);
1830 #endif // !AARCH64
1831   __ ret();
1832 
1833   return entry;
1834 } // end of ForceEarlyReturn support
1835 
1836 
1837 //------------------------------------------------------------------------------------------------------------------------
1838 // Helper for vtos entry point generation
1839 
1840 void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
1841   assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1842   Label L;
1843 
1844 #ifdef __SOFTFP__
1845   dep = __ pc();                // fall through
1846 #else
1847   fep = __ pc(); __ push(ftos); __ b(L);
1848   dep = __ pc(); __ push(dtos); __ b(L);
1849 #endif // __SOFTFP__
1850 
1851   lep = __ pc(); __ push(ltos); __ b(L);
1852 
1853   if (AARCH64_ONLY(true) NOT_AARCH64(VerifyOops)) {  // can't share atos entry with itos on AArch64 or if VerifyOops
1854     aep = __ pc(); __ push(atos); __ b(L);
1855   } else {
1856     aep = __ pc();              // fall through
1857   }
1858 
1859 #ifdef __SOFTFP__
1860   fep = __ pc();                // fall through
1861 #endif // __SOFTFP__
1862 
1863   bep = cep = sep =             // fall through
1864   iep = __ pc(); __ push(itos); // fall through
1865   vep = __ pc(); __ bind(L);    // fall through
1866   generate_and_dispatch(t);
1867 }
1868 
1869 //------------------------------------------------------------------------------------------------------------------------
1870 
1871 // Non-product code
1872 #ifndef PRODUCT
1873 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1874   address entry = __ pc();
1875 
1876   // prepare expression stack
1877   __ push(state);       // save tosca
1878 
1879   // pass tosca registers as arguments
1880   __ mov(R2, R0_tos);
1881 #ifdef AARCH64
1882   __ mov(R3, ZR);
1883 #else
1884   __ mov(R3, R1_tos_hi);
1885 #endif // AARCH64
1886   __ mov(R1, LR);       // save return address
1887 
1888   // call tracer
1889   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), R1, R2, R3);
1890 
1891   __ mov(LR, R0);       // restore return address
1892   __ pop(state);        // restore tosca
1893 
1894   // return
1895   __ ret();
1896 
1897   return entry;
1898 }
1899 
1900 
1901 void TemplateInterpreterGenerator::count_bytecode() {
1902   __ inc_global_counter((address) &BytecodeCounter::_counter_value, 0, Rtemp, R2_tmp, true);
1903 }
1904 
1905 
1906 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1907   __ inc_global_counter((address)&BytecodeHistogram::_counters[0], sizeof(BytecodeHistogram::_counters[0]) * t->bytecode(), Rtemp, R2_tmp, true);
1908 }
1909 
1910 
1911 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1912   const Register Rindex_addr = R2_tmp;
1913   Label Lcontinue;
1914   InlinedAddress Lcounters((address)BytecodePairHistogram::_counters);
1915   InlinedAddress Lindex((address)&BytecodePairHistogram::_index);
1916   const Register Rcounters_addr = R2_tmp;
1917   const Register Rindex = R4_tmp;
1918 
1919   // calculate new index for counter:
1920   // index = (_index >> log2_number_of_codes) | (bytecode << log2_number_of_codes).
1921   // (_index >> log2_number_of_codes) is previous bytecode
1922 
1923   __ ldr_literal(Rindex_addr, Lindex);
1924   __ ldr_s32(Rindex, Address(Rindex_addr));
1925   __ mov_slow(Rtemp, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
1926   __ orr(Rindex, Rtemp, AsmOperand(Rindex, lsr, BytecodePairHistogram::log2_number_of_codes));
1927   __ str_32(Rindex, Address(Rindex_addr));
1928 
1929   // Rindex (R4) contains index of counter
1930 
1931   __ ldr_literal(Rcounters_addr, Lcounters);
1932   __ ldr_s32(Rtemp, Address::indexed_32(Rcounters_addr, Rindex));
1933   __ adds_32(Rtemp, Rtemp, 1);
1934   __ b(Lcontinue, mi);                           // avoid overflow
1935   __ str_32(Rtemp, Address::indexed_32(Rcounters_addr, Rindex));
1936 
1937   __ b(Lcontinue);
1938 
1939   __ bind_literal(Lindex);
1940   __ bind_literal(Lcounters);
1941 
1942   __ bind(Lcontinue);
1943 }
1944 
1945 
1946 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1947   // Call a little run-time stub to avoid blow-up for each bytecode.
1948   // The run-time runtime saves the right registers, depending on
1949   // the tosca in-state for the given template.
1950   assert(Interpreter::trace_code(t->tos_in()) != NULL,
1951          "entry must have been generated");
1952   address trace_entry = Interpreter::trace_code(t->tos_in());
1953   __ call(trace_entry, relocInfo::none);
1954 }
1955 
1956 
1957 void TemplateInterpreterGenerator::stop_interpreter_at() {
1958   Label Lcontinue;
1959   const Register stop_at = R2_tmp;
1960 
1961   __ ldr_global_s32(Rtemp, (address) &BytecodeCounter::_counter_value);
1962   __ mov_slow(stop_at, StopInterpreterAt);
1963 
1964   // test bytecode counter
1965   __ cmp(Rtemp, stop_at);
1966   __ b(Lcontinue, ne);
1967 
1968   __ trace_state("stop_interpreter_at");
1969   __ breakpoint();
1970 
1971   __ bind(Lcontinue);
1972 }
1973 #endif // !PRODUCT