rev 8069 : 8164652: aarch32: C1 port

   1 /*
   2  * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * Copyright (c) 2015, Linaro Ltd. All rights reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.hpp"
  29 #include "interp_masm_aarch32.hpp"
  30 #include "interpreter/bytecodeHistogram.hpp"
  31 #include "interpreter/bytecodeTracer.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interpreterGenerator.hpp"
  34 #include "interpreter/interpreterRuntime.hpp"
  35 #include "interpreter/templateTable.hpp"
  36 #include "oops/arrayOop.hpp"
  37 #include "oops/method.hpp"
  38 #include "oops/methodData.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "prims/jvmtiExport.hpp"
  41 #include "prims/jvmtiThreadState.hpp"
  42 #include "runtime/arguments.hpp"
  43 #include "runtime/deoptimization.hpp"
  44 #include "runtime/frame.inline.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "runtime/stubRoutines.hpp"
  47 #include "runtime/synchronizer.hpp"
  48 #include "runtime/timer.hpp"
  49 #include "runtime/vframeArray.hpp"
  50 #include "utilities/debug.hpp"
  51 
  52 #include <sys/types.h>
  53 
  54 #ifndef PRODUCT
  55 #include "oops/method.hpp"
  56 #endif // !PRODUCT
  57 
  58 #define __ _masm->
  59 
  60 #ifndef CC_INTERP
  61 
  62 //-----------------------------------------------------------------------------
  63 
  64 extern "C" void entry(CodeBuffer*);
  65 
  66 //-----------------------------------------------------------------------------
  67 
  68 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
  69   address entry = __ pc();
  70 
  71 #ifdef ASSERT
  72   {
  73     Label L;
  74     __ ldr(rscratch1, Address(rfp,
  75                        frame::interpreter_frame_monitor_block_top_offset *
  76                        wordSize));
  77     __ mov(rscratch2, sp);
  78     __ cmp(rscratch1, rscratch2); // maximal rsp for current rfp (stack
  79                            // grows negative)
  80     __ b(L, Assembler::HS); // check if frame is complete
  81     __ stop ("interpreter frame not set up");
  82     __ bind(L);
  83   }
  84 #endif // ASSERT
  85   // Restore bcp under the assumption that the current frame is still
  86   // interpreted
  87   __ restore_bcp();
  88 
  89   // expression stack must be empty before entering the VM if an
  90   // exception happened
  91   __ empty_expression_stack();
  92   // throw exception
  93   __ call_VM(noreg,
  94              CAST_FROM_FN_PTR(address,
  95                               InterpreterRuntime::throw_StackOverflowError));
  96   return entry;
  97 }
  98 
  99 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
 100         const char* name) {
 101   address entry = __ pc();
 102   // expression stack must be empty before entering the VM if an
 103   // exception happened
 104   __ empty_expression_stack();
 105   // setup parameters
 106   // ??? convention: expect aberrant index in register r2
 107   __ mov(c_rarg1, (address)name);
 108   __ call_VM(noreg,
 109              CAST_FROM_FN_PTR(address,
 110                               InterpreterRuntime::
 111                               throw_ArrayIndexOutOfBoundsException),
 112              c_rarg1, c_rarg2);
 113   return entry;
 114 }
 115 
 116 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
 117   address entry = __ pc();
 118 
 119   // object is at TOS
 120   __ pop(c_rarg1);
 121 
 122   // expression stack must be empty before entering the VM if an
 123   // exception happened
 124   __ empty_expression_stack();
 125 
 126   __ call_VM(noreg,
 127              CAST_FROM_FN_PTR(address,
 128                               InterpreterRuntime::
 129                               throw_ClassCastException),
 130              c_rarg1);
 131   return entry;
 132 }
 133 
 134 address TemplateInterpreterGenerator::generate_exception_handler_common(
 135         const char* name, const char* message, bool pass_oop) {
 136   assert(!pass_oop || message == NULL, "either oop or message but not both");
 137   address entry = __ pc();
 138   if (pass_oop) {
 139     // object is at TOS
 140     __ pop(c_rarg2);
 141   }
 142   // expression stack must be empty before entering the VM if an
 143   // exception happened
 144   __ empty_expression_stack();






 145   // setup parameters
 146   __ lea(c_rarg1, Address((address)name));
 147   if (pass_oop) {
 148     __ call_VM(r0, CAST_FROM_FN_PTR(address,
 149                                     InterpreterRuntime::
 150                                     create_klass_exception),
 151                c_rarg1, c_rarg2);
 152   } else {
 153     // kind of lame ExternalAddress can't take NULL because
 154     // external_word_Relocation will assert.
 155     if (message != NULL) {
 156       __ lea(c_rarg2, Address((address)message));
 157     } else {
 158       __ mov(c_rarg2, NULL_WORD);
 159     }
 160     __ call_VM(r0,
 161                CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
 162                c_rarg1, c_rarg2);
 163   }
 164   // throw exception
 165   __ b(address(Interpreter::throw_exception_entry()));
 166   return entry;
 167 }
 168 
 169 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
 170   address entry = __ pc();
 171   // NULL last_sp until next java call
 172   __ mov(rscratch1, 0);
 173   __ str(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 174   __ dispatch_next(state);
 175   return entry;
 176 }
 177 
 178 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
 179   address entry = __ pc();
 180 
 181   __ print_method_exit();
 182   __ reg_printf("A. return_entry <r1:r0> : 0x%08x%08x\n", r1, r0);
 183 
 184   // Restore stack bottom in case i2c adjusted stack
 185   __ ldr(sp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 186   // and NULL it as marker that sp is now tos until next java call
 187   __ mov(rscratch1, 0);
 188   __ str(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 189   __ reg_printf("B. return_entry <r1:r0> : 0x%08x%08x\n", r1, r0);
 190   __ restore_bcp();
 191   __ restore_locals();
 192   __ restore_constant_pool_cache();
 193   __ get_method(rmethod);
 194   __ reg_printf("C. return_entry <r1:r0> : 0x%08x%08x\n", r1, r0);
 195 
 196   // Pop N words from the stack
 197   __ get_cache_and_index_at_bcp(r3, r2, 1, index_size);
 198   __ reg_printf("D. return_entry <r1:r0> : 0x%08x%08x\n", r1, r0);
 199   __ ldr(r3, Address(r3, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
 200   __ andr(r3, r3, ConstantPoolCacheEntry::parameter_size_mask);
 201 
 202   __ add(sp, sp, r3, lsl(2));
 203 
 204   // Restore machine SP
 205   /*__ ldr(rscratch1, Address(rmethod, Method::const_offset()));
 206   __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
 207   __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
 208   __ ldr(rscratch2,
 209          Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
 210   __ sub(rscratch1, rscratch2, rscratch1, lsl(2));
 211   __ bic(sp, rscratch1, 0xf);*/
 212 
 213   __ get_dispatch();
 214   __ reg_printf("E. return_entry <r1:r0> : 0x%08x%08x\n", r1, r0);
 215   __ dispatch_next(state, step);
 216 
 217   return entry;
 218 }
 219 
 220 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
 221                                                                int step) {
 222   address entry = __ pc();
 223   __ restore_bcp();
 224   __ restore_locals();
 225   __ restore_constant_pool_cache();
 226   __ get_method(rmethod);
 227 
 228   // handle exceptions
 229   {
 230     Label L;
 231     __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
 232     __ cbz(rscratch1, L);
 233     __ call_VM(noreg,
 234                CAST_FROM_FN_PTR(address,
 235                                 InterpreterRuntime::throw_pending_exception));
 236     __ should_not_reach_here();
 237     __ bind(L);
 238   }
 239 
 240   __ get_dispatch();
 241 
 242   // Calculate stack limit
 243   __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
 244   __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
 245   __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
 246   __ ldr(rscratch2,
 247          Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
 248   __ sub(rscratch1, rscratch2, rscratch1, lsl(2));
 249   __ bic(sp, rscratch1, 0xf);
 250 
 251   // Restore expression stack pointer
 252   __ ldr(sp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 253   // NULL last_sp until next java call
 254   __ mov(rscratch1, 0);
 255   __ str(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 256 
 257   __ dispatch_next(state, step);
 258   return entry;
 259 }
 260 
 261 
 262 int AbstractInterpreter::BasicType_as_index(BasicType type) {
 263   int i = 0;
 264   switch (type) {
 265     case T_BOOLEAN: i = 0; break;
 266     case T_CHAR   : i = 1; break;
 267     case T_BYTE   : i = 2; break;
 268     case T_SHORT  : i = 3; break;
 269     case T_INT    : i = 4; break;
 270     case T_LONG   : i = 5; break;
 271     case T_VOID   : i = 6; break;
 272     case T_FLOAT  : i = 7; break;
 273     case T_DOUBLE : i = 8; break;
 274     case T_OBJECT : i = 9; break;
 275     case T_ARRAY  : i = 9; break;
 276     default       : ShouldNotReachHere();
 277   }
 278   assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers,
 279          "index out of bounds");
 280   return i;
 281 }
 282 
 283 
 284 address TemplateInterpreterGenerator::generate_result_handler_for(
 285         BasicType type) {
 286   address entry = __ pc();
 287   switch (type) {
 288   case T_BOOLEAN: __ uxtb(r0, r0);       break;
 289   case T_CHAR   : __ uxth(r0, r0);       break;
 290   case T_BYTE   : __ sxtb(r0, r0);       break;
 291   case T_SHORT  : __ sxth(r0, r0);       break;
 292   case T_INT    : /* nothing to do */    break;
 293   case T_LONG   : /* nothing to do */    break;
 294   case T_VOID   : /* nothing to do */    break;
 295   case T_FLOAT  : /* nothing to do */    break;
 296   case T_DOUBLE : /* nothing to do */    break;
 297   case T_OBJECT :
 298     // retrieve result from frame
 299     __ reg_printf("In object result handler\n");
 300     __ ldr(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
 301     // and verify it
 302     __ verify_oop(r0);
 303     break;
 304   default       : ShouldNotReachHere();
 305   }
 306   __ b(lr);                                  // return from result handler
 307   return entry;
 308 }
 309 
 310 address TemplateInterpreterGenerator::generate_safept_entry_for(
 311         TosState state,
 312         address runtime_entry) {
 313   address entry = __ pc();
 314   __ push(state);
 315   __ call_VM(noreg, runtime_entry);
 316   __ membar(Assembler::AnyAny);
 317   __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
 318   return entry;
 319 }
 320 
 321 // Helpers for commoning out cases in the various type of method entries.
 322 //
 323 
 324 
 325 // increment invocation count & check for overflow
 326 //
 327 // Note: checking for negative value instead of overflow
 328 //       so we have a 'sticky' overflow test
 329 //
 330 // rmethod: method
 331 //
 332 void InterpreterGenerator::generate_counter_incr(
 333         Label* overflow,
 334         Label* profile_method,
 335         Label* profile_method_continue) {
 336   Label done;
 337   // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
 338   if (TieredCompilation) {
 339     int increment = InvocationCounter::count_increment;
 340     int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
 341     Label no_mdo;
 342     if (ProfileInterpreter) {
 343       // Are we profiling?
 344       __ ldr(r0, Address(rmethod, Method::method_data_offset()));
 345       __ cbz(r0, no_mdo);
 346       // Increment counter in the MDO
 347       const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) +
 348                                            in_bytes(InvocationCounter::counter_offset()));
 349       __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, false, Assembler::EQ, overflow);
 350       __ b(done);
 351     }
 352     __ bind(no_mdo);
 353     // Increment counter in MethodCounters
 354     const Address invocation_counter(rscratch2,
 355                   MethodCounters::invocation_counter_offset() +
 356                   InvocationCounter::counter_offset());
 357     __ get_method_counters(rmethod, rscratch2, done);
 358     __ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, false, Assembler::EQ, overflow);
 359     __ bind(done);
 360   } else {
 361     const Address backedge_counter(rscratch2,
 362                   MethodCounters::backedge_counter_offset() +
 363                   InvocationCounter::counter_offset());
 364     const Address invocation_counter(rscratch2,
 365                   MethodCounters::invocation_counter_offset() +
 366                   InvocationCounter::counter_offset());
 367 
 368     __ get_method_counters(rmethod, rscratch2, done);
 369 
 370     if (ProfileInterpreter) { // %%% Merge this into MethodData*
 371       __ ldr(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
 372       __ add(r1, r1, 1);
 373       __ str(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
 374     }
 375     // Update standard invocation counters
 376     __ ldr(r1, invocation_counter);
 377     __ ldr(r0, backedge_counter);
 378 
 379     __ add(r1, r1, InvocationCounter::count_increment);
 380     __ mov(rscratch1, InvocationCounter::count_mask_value);
 381     __ andr(r0, r0, rscratch1);
 382 
 383     __ str(r1, invocation_counter);
 384     __ add(r0, r0, r1);                // add both counters
 385 
 386     // profile_method is non-null only for interpreted method so
 387     // profile_method != NULL == !native_call
 388 
 389     if (ProfileInterpreter && profile_method != NULL) {
 390       // Test to see if we should create a method data oop
 391       __ mov(rscratch2, ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
 392       __ ldr(rscratch2, rscratch2);
 393       __ cmp(r0, rscratch2);
 394       __ b(*profile_method_continue, Assembler::LT);
 395 
 396       // if no method data exists, go to profile_method
 397       __ test_method_data_pointer(r0, *profile_method);
 398     }
 399 
 400     {
 401       __ mov(rscratch2, ExternalAddress((address) &InvocationCounter::InterpreterInvocationLimit));
 402       __ ldr(rscratch2, rscratch2);
 403       __ cmp(r0, rscratch2);
 404       __ b(*overflow, Assembler::HS);
 405     }
 406     __ bind(done);
 407   }
 408 }
 409 
 410 void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
 411 
 412   // Asm interpreter on entry
 413   // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
 414   // Everything as it was on entry
 415 
 416   // InterpreterRuntime::frequency_counter_overflow takes two
 417   // arguments, the first (thread) is passed by call_VM, the second
 418   // indicates if the counter overflow occurs at a backwards branch
 419   // (NULL bcp).  We pass zero for it.  The call returns the address
 420   // of the verified entry point for the method or NULL if the
 421   // compilation did not complete (either went background or bailed
 422   // out).
 423   __ mov(c_rarg1, 0);
 424   __ call_VM(noreg,
 425              CAST_FROM_FN_PTR(address,
 426                               InterpreterRuntime::frequency_counter_overflow),
 427              c_rarg1);
 428 
 429   __ b(*do_continue);
 430 }
 431 
 432 // See if we've got enough room on the stack for locals plus overhead.
 433 // The expression stack grows down incrementally, so the normal guard
 434 // page mechanism will work for that.
 435 //
 436 // NOTE: Since the additional locals are also always pushed (wasn't
 437 // obvious in generate_method_entry) so the guard should work for them
 438 // too.
 439 //
 440 // Args:
 441 //      r3: number of additional locals this frame needs (what we must check)
 442 //      rmethod: Method*
 443 //
 444 // Kills:
 445 //      r0
 446 void InterpreterGenerator::generate_stack_overflow_check(void) {
 447 
 448   // monitor entry size: see picture of stack set
 449   // (generate_method_entry) and frame_amd64.hpp
 450   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
 451 
 452   // total overhead size: entry_size + (saved rbp through expr stack
 453   // bottom).  be sure to change this if you add/subtract anything
 454   // to/from the overhead area
 455   const int overhead_size =
 456     -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
 457 
 458   const int page_size = os::vm_page_size();
 459 
 460   Label after_frame_check;
 461 
 462   // see if the frame is greater than one page in size. If so,
 463   // then we need to verify there is enough stack space remaining
 464   // for the additional locals.
 465   //
 466   __ mov(rscratch1, (page_size - overhead_size) / Interpreter::stackElementSize);
 467   __ cmp(r3, rscratch1);
 468   __ b(after_frame_check, Assembler::LS);
 469 
 470   // compute rsp as if this were going to be the last frame on
 471   // the stack before the red zone
 472 
 473   const Address stack_base(rthread, Thread::stack_base_offset());
 474   const Address stack_size(rthread, Thread::stack_size_offset());
 475 
 476   // locals + overhead, in bytes
 477   __ mov(r0, overhead_size);
 478   __ add(r0, r0, r3, lsl(Interpreter::logStackElementSize));  // 1 slot per parameter.
 479 
 480   __ ldr(rscratch1, stack_base);
 481   __ ldr(rscratch2, stack_size);
 482 
 483 #ifdef ASSERT
 484   Label stack_base_okay, stack_size_okay;
 485   // verify that thread stack base is non-zero
 486   __ cbnz(rscratch1, stack_base_okay);
 487   __ stop("stack base is zero");
 488   __ bind(stack_base_okay);
 489   // verify that thread stack size is non-zero
 490   __ cbnz(rscratch2, stack_size_okay);
 491   __ stop("stack size is zero");
 492   __ bind(stack_size_okay);
 493 #endif
 494 
 495   // Add stack base to locals and subtract stack size
 496   __ sub(rscratch1, rscratch1, rscratch2); // Stack limit
 497   __ add(r0, r0, rscratch1);
 498 
 499   // Use the maximum number of pages we might bang.
 500   const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
 501                                                                               (StackRedPages+StackYellowPages);
 502 
 503   // add in the red and yellow zone sizes
 504   __ add(r0, r0, max_pages * page_size * 2);
 505 
 506   // check against the current stack bottom
 507   __ cmp(sp, r0);
 508   __ b(after_frame_check, Assembler::HI);
 509 
 510   // Remove the incoming args, peeling the machine SP back to where it
 511   // was in the caller.


 512   __ mov(sp, r4);
 513 
 514   // Note: the restored frame is not necessarily interpreted.
 515   // Use the shared runtime version of the StackOverflowError.
 516   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
 517   __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
 518 
 519   // all done with frame size check
 520   __ bind(after_frame_check);
 521 }
 522 
 523 // Allocate monitor and lock method (asm interpreter)
 524 //
 525 // Args:
 526 //      rmethod: Method*
 527 //      rlocals: locals
 528 //
 529 // Kills:
 530 //      r0
 531 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
 532 //      rscratch1, rscratch2 (scratch regs)
 533 void InterpreterGenerator::lock_method(void) {
 534   // synchronize method
 535   const Address access_flags(rmethod, Method::access_flags_offset());
 536   const Address monitor_block_top(
 537         rfp,
 538         frame::interpreter_frame_monitor_block_top_offset * wordSize);
 539   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
 540 
 541 #ifdef ASSERT
 542   {
 543     Label L;
 544     __ ldr(r0, access_flags);
 545     __ tst(r0, JVM_ACC_SYNCHRONIZED);
 546     __ b(L, Assembler::NE);
 547     __ stop("method doesn't need synchronization");
 548     __ bind(L);
 549   }
 550 #endif // ASSERT
 551 
 552   // get synchronization object
 553   {
 554     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 555     Label done;
 556     __ ldr(r0, access_flags);
 557     __ tst(r0, JVM_ACC_STATIC);
 558     // get receiver (assume this is frequent case)
 559     __ ldr(r0, Address(rlocals, Interpreter::local_offset_in_bytes(0)));
 560     __ b(done, Assembler::EQ);
 561     __ ldr(r0, Address(rmethod, Method::const_offset()));
 562     __ ldr(r0, Address(r0, ConstMethod::constants_offset()));
 563     __ ldr(r0, Address(r0,
 564                            ConstantPool::pool_holder_offset_in_bytes()));
 565     __ ldr(r0, Address(r0, mirror_offset));
 566 
 567 #ifdef ASSERT
 568     {
 569       Label L;
 570       __ cbnz(r0, L);
 571       __ stop("synchronization object is NULL");
 572       __ bind(L);
 573     }
 574 #endif // ASSERT
 575 
 576     __ bind(done);
 577   }
 578 
 579   // add space for monitor & lock
 580   __ sub(sp, sp, entry_size); // add space for a monitor entry
 581   __ mov(rscratch1, sp);
 582   __ str(rscratch1, monitor_block_top);  // set new monitor block top
 583   // store object
 584   __ str(r0, Address(sp, BasicObjectLock::obj_offset_in_bytes()));
 585   __ mov(c_rarg1, sp); // object address
 586   __ lock_object(c_rarg1);
 587 }
 588 
 589 // Generate a fixed interpreter frame. This is identical setup for
 590 // interpreted methods and for native methods hence the shared code.
 591 //
 592 // Args:
 593 //      lr: return address
 594 //      rmethod: Method*
 595 //      rlocals: pointer to locals
 596 //      rcpool: cp cache
 597 //      stack_pointer: previous sp
 598 //      r4 contains the sender sp
 599 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 600   // initialize fixed part of activation frame
 601   __ reg_printf("About to print native entry, rmethod = %p\n", rmethod);
 602   /*__ mov(rscratch1, (address)0x62829d20);
 603   __ cmp(rscratch1, rmethod);
 604   Label skip;
 605   __ b(skip, Assembler::NE);
 606   __ bkpt(111);
 607   __ bind(skip);*/
 608 
 609   __ print_method_entry(rmethod, native_call);
 610 
 611   if (native_call) {
 612     __ sub(sp, sp, 12 *  wordSize);
 613     __ mov(rbcp, 0);
 614     __ strd(sp, rbcp, Address(sp));
 615     // add 2 zero-initialized slots for native calls
 616     __ strd(rbcp, rbcp, Address(sp, 10 * wordSize));
 617     // Note using rbcp in strd
 618   } else {
 619     __ sub(sp, sp, 10 *  wordSize);
 620     __ ldr(rscratch1, Address(rmethod, Method::const_offset()));      // get ConstMethod
 621     __ add(rbcp, rscratch1, in_bytes(ConstMethod::codes_offset())); // get codebase
 622     __ strd(sp, rbcp, Address(sp));
 623   }
 624 
 625   if (ProfileInterpreter) {
 626     Label method_data_continue;
 627     __ ldr(rscratch1, Address(rmethod, Method::method_data_offset()));
 628     __ cbz(rscratch1, method_data_continue);
 629     __ lea(rscratch1, Address(rscratch1, in_bytes(MethodData::data_offset())));
 630     __ bind(method_data_continue);
 631     __ strd(rscratch1, rmethod, Address(sp, 4 * wordSize));  // save Method* and mdp (method data pointer)
 632   } else {
 633     __ mov(rscratch1, 0);
 634     __ strd(rscratch1, rmethod, Address(sp, 4 * wordSize));        // save Method* (no mdp)
 635   }
 636   __ ldr(rcpool, Address(rmethod, Method::const_offset()));
 637   __ ldr(rcpool, Address(rcpool, ConstMethod::constants_offset()));
 638   __ ldr(rcpool, Address(rcpool, ConstantPool::cache_offset_in_bytes()));
 639   __ strd(rlocals, rcpool, Address(sp, 2 * wordSize));
 640 
 641   // this code sets up the stack frame, in the same fashion as enter()
 642   __ strd(rfp, lr, Address(sp, 8 * wordSize));
 643   // point rfp to location of old pc
 644   __ add(rfp, sp, 9 * wordSize);
 645 
 646   __ reg_printf("Three-quarters through\n");
 647   // set sender sp
 648   // leave last_sp as null
 649   __ mov(rscratch1, 0);
 650   // r4 contains the sender sp
 651   __ strd(rscratch1, r4, Address(sp, 6 * wordSize));
 652 
 653   // Move SP out of the way
 654   /*if (! native_call) {
 655     __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
 656     __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
 657     __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
 658     __ sub(rscratch1, sp, rscratch1, lsl(2));
 659     __ bic(sp, rscratch1, 0xf);
 660   }*/
 661   // FIXME This code moves the sp to after the end of the stack - if this is what's happening
 662   // some calls out of the VM may need to be patched
 663   __ reg_printf("Fully through\n");
 664 }
 665 
 666 // End of helpers
 667 
 668 // Various method entries
 669 //------------------------------------------------------------------------------------------------------------------------
 670 //
 671 //
 672 
 673 // Method entry for java.lang.ref.Reference.get.
 674 address InterpreterGenerator::generate_Reference_get_entry(void) {
 675 #if INCLUDE_ALL_GCS
 676   // Code: _aload_0, _getfield, _areturn
 677   // parameter size = 1
 678   //
 679   // The code that gets generated by this routine is split into 2 parts:
 680   //    1. The "intrinsified" code for G1 (or any SATB based GC),
 681   //    2. The slow path - which is an expansion of the regular method entry.
 682   //
 683   // Notes:-
 684   // * In the G1 code we do not check whether we need to block for
 685   //   a safepoint. If G1 is enabled then we must execute the specialized
 686   //   code for Reference.get (except when the Reference object is null)
 687   //   so that we can log the value in the referent field with an SATB
 688   //   update buffer.
 689   //   If the code for the getfield template is modified so that the
 690   //   G1 pre-barrier code is executed when the current method is
 691   //   Reference.get() then going through the normal method entry
 692   //   will be fine.
 693   // * The G1 code can, however, check the receiver object (the instance
 694   //   of java.lang.Reference) and jump to the slow path if null. If the
 695   //   Reference object is null then we obviously cannot fetch the referent
 696   //   and so we don't need to call the G1 pre-barrier. Thus we can use the
 697   //   regular method entry code to generate the NPE.
 698   //
 699   // This code is based on generate_accessor_enty.
 700   //
 701   // rmethod: Method*
 702   // r13: senderSP must preserve for slow path, set SP to it on fast path
 703 
 704   address entry = __ pc();
 705 
 706   const int referent_offset = java_lang_ref_Reference::referent_offset;
 707   guarantee(referent_offset > 0, "referent offset not initialized");
 708 
 709   if (UseG1GC) {
 710     Label slow_path;
 711     const Register local_0 = c_rarg0;
 712     // Check if local 0 != NULL
 713     // If the receiver is null then it is OK to jump to the slow path.
 714     __ ldr(local_0, Address(sp, 0));
 715     __ cbz(local_0, slow_path);
 716 
 717 
 718     // Load the value of the referent field.
 719     const Address field_address(local_0, referent_offset);
 720     __ load_heap_oop(local_0, field_address);
 721 
 722     // Generate the G1 pre-barrier code to log the value of
 723     // the referent field in an SATB buffer.
 724     __ enter(); // g1_write may call runtime
 725     __ g1_write_barrier_pre(noreg /* obj */,
 726                             local_0 /* pre_val */,
 727                             rthread /* thread */,
 728                             rscratch2 /* tmp */,
 729                             true /* tosca_live */,
 730                             true /* expand_call */);
 731     __ leave();
 732     // areturn
 733     __ mov(sp, r4);           // set sp to sender sp
 734     __ stop("Check sp restored correctly, may be get_dispatch()?");
 735     //__ bic(sp, r13, 0xf);  // done with stack
 736     __ b(lr);
 737 
 738     // generate a vanilla interpreter entry as the slow path
 739     __ bind(slow_path);
 740     (void) generate_normal_entry(false);
 741 
 742     return entry;
 743   }
 744 #endif // INCLUDE_ALL_GCS
 745 
 746   // If G1 is not enabled then attempt to go through the accessor entry point
 747   // Reference.get is an accessor
 748   return generate_accessor_entry();
 749 }
 750 
 751 void InterpreterGenerator::bang_stack_shadow_pages(bool native_call, Register rscratch) {
 752   // Bang each page in the shadow zone. We can't assume it's been done for
 753   // an interpreter frame with greater than a page of locals, so each page
 754   // needs to be checked.  Only true for non-native.
 755   assert(rscratch != rscratch2, "can't be");
 756   if (UseStackBanging) {
 757     const int start_page = native_call ? StackShadowPages : 1;
 758     const int page_size = os::vm_page_size();
 759     __ mov(rscratch, 0);
 760     for (int pages = start_page; pages <= StackShadowPages ; pages++) {
 761       __ sub(rscratch2, sp, pages*page_size);
 762       __ str(rscratch, Address(rscratch2));
 763     }
 764   }
 765 }
 766 
 767 
 768 // Interpreter stub for calling a native method. (asm interpreter)
 769 // This sets up a somewhat different looking stack for calling the
 770 // native method than the typical interpreter frame setup.
 771 address InterpreterGenerator::generate_native_entry(bool synchronized) {
 772   // determine code generation flags
 773   bool inc_counter  = UseCompiler || CountCompiledCalls;
 774 
 775   // r1: Method*
 776   // r4: sender sp
 777 
 778   address entry_point = __ pc();
 779   __ reg_printf("entering generate_native_entry, lr = %p, rfp = %p\n\tRBCP = %p\n", lr, rfp, rbcp);
 780 
 781   const Address constMethod       (rmethod, Method::const_offset());
 782   const Address access_flags      (rmethod, Method::access_flags_offset());
 783   const Address size_of_parameters(r2, ConstMethod::
 784                                        size_of_parameters_offset());
 785 
 786   // get parameter size (always needed)
 787   __ ldr(r2, constMethod);
 788   __ load_unsigned_short(r2, size_of_parameters);
 789 
 790   // native calls don't need the stack size check since they have no
 791   // expression stack and the arguments are already on the stack and
 792   // we only add a handful of words to the stack
 793 
 794   // rmethod: Method*
 795   // r2: size of parameters
 796   // r4: sender sp
 797 
 798   // for natives the size of locals is zero
 799 
 800   // compute beginning of parameters (rlocals)
 801   __ add(rlocals, sp, r2, lsl(2));
 802   __ sub(rlocals, rlocals, wordSize);
 803   __ reg_printf("(start of parameters) rlocals = %p, nparams = %d\n", rlocals, r2);
 804 
 805   // initialize fixed part of activation frame
 806   generate_fixed_frame(true);
 807   __ reg_printf("pushed new fixed frame, lr = %p, rfp = %p\n", lr, rfp);
 808 
 809   Register locals_sp = r4; // the overwrites rdispatch, we can restore at end
 810   // !! If this canges, change the end of arguements in interpreterRT_aarch32.cpp
 811   //__ mov(r4, sp); //Save top of arguments
 812 
 813   // make sure method is native & not abstract
 814 #ifdef ASSERT
 815   __ ldr(r0, access_flags);
 816   {
 817     Label L;
 818     __ tst(r0, JVM_ACC_NATIVE);
 819     __ b(L, Assembler::NE);
 820     __ stop("tried to execute non-native method as native");
 821     __ bind(L);
 822   }
 823   {
 824     Label L;
 825     __ tst(r0, JVM_ACC_ABSTRACT);
 826     __ b(L, Assembler::EQ);
 827     __ stop("tried to execute abstract method in interpreter");
 828     __ bind(L);
 829   }
 830 #endif
 831 
 832   // Since at this point in the method invocation the exception
 833   // handler would try to exit the monitor of synchronized methods
 834   // which hasn't been entered yet, we set the thread local variable
 835   // _do_not_unlock_if_synchronized to true. The remove_activation
 836   // will check this flag.
 837 
 838    const Address do_not_unlock_if_synchronized(rthread,
 839         in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
 840   __ mov(rscratch2, true);
 841   __ strb(rscratch2, do_not_unlock_if_synchronized);
 842 
 843   // increment invocation count & check for overflow
 844   Label invocation_counter_overflow;
 845   if (inc_counter) {
 846     generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
 847   }
 848 
 849   Label continue_after_compile;
 850   __ bind(continue_after_compile);
 851 
 852   bang_stack_shadow_pages(true, rscratch1);
 853   // Note rscratch1 will contain zero here due to bang_stack_shadow_pages
 854   // reset the _do_not_unlock_if_synchronized flag
 855   //__ mov(rscratch1, 0);
 856   __ strb(rscratch1, do_not_unlock_if_synchronized);
 857 
 858   // check for synchronized methods
 859   // Must happen AFTER invocation_counter check and stack overflow check,
 860   // so method is not locked if overflows.
 861   if (synchronized) {
 862     lock_method();
 863   } else {
 864     // no synchronization necessary
 865 #ifdef ASSERT
 866     {
 867       Label L;
 868       __ ldr(r0, access_flags);
 869       __ tst(r0, JVM_ACC_SYNCHRONIZED);
 870       __ b(L, Assembler::EQ);
 871       __ stop("method needs synchronization");
 872       __ bind(L);
 873     }
 874 #endif
 875   }
 876 
 877   // start execution
 878 #ifdef ASSERT
 879   {
 880     Label L;
 881     const Address monitor_block_top(rfp,
 882                  frame::interpreter_frame_monitor_block_top_offset * wordSize);
 883     __ ldr(rscratch1, monitor_block_top);
 884     __ cmp(sp, rscratch1);
 885     __ b(L, Assembler::EQ);
 886     __ stop("broken stack frame setup in interpreter");
 887     __ bind(L);
 888   }
 889 #endif
 890 
 891   // jvmti support
 892   __ notify_method_entry();
 893 
 894   const Register result_handler = rlocals;
 895   //This is recomputed for the new function and result_handler is not written until
 896   // after the function has been called
 897 
 898   // allocate space for parameters
 899   __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
 900   __ load_unsigned_short(rscratch1, Address(rscratch1, ConstMethod::size_of_parameters_offset()));
 901 
 902   __ sub(sp, sp, rscratch1, lsl(Interpreter::logStackElementSize + 1));
 903   // This +1 is a hack to double the amount of space allocated for parameters, this is likely far
 904   // more than needed as in the worst case when parameters have to be placed on the stack they would be aligned
 905   // as follows LONG | INT | EMPTY | LONG ... This would only increase the space used by a half.
 906   __ align_stack();
 907   __ mov(locals_sp, sp);
 908   __ reg_printf("Stack Pointer on arg copy, sp = %p, locals_sp = %p, rlocals = %p\n", sp, locals_sp, rlocals);
 909 
 910   // get signature handler
 911   {
 912     Label L;
 913     __ ldr(rscratch1, Address(rmethod, Method::signature_handler_offset()));
 914     __ cmp(rscratch1, 0);
 915     __ b(L, Assembler::NE);
 916     __ reg_printf("Prepare_native_call, locals_sp = %p, rlocals = %p\n", locals_sp, rlocals);
 917     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
 918                                        InterpreterRuntime::prepare_native_call), rmethod);
 919     __ reg_printf("Finished prepare_native_call, locals_sp = %p, rlocals = %p\n", locals_sp, rlocals);
 920     __ ldr(rscratch1, Address(rmethod, Method::signature_handler_offset()));
 921     __ bind(L);
 922   }
 923 
 924   // call signature handler
 925   assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals,
 926          "adjust this code");
 927   assert(InterpreterRuntime::SignatureHandlerGenerator::to() == locals_sp,
 928          "adjust this code");
 929   assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
 930           "adjust this code");
 931 
 932   // The generated handlers do not touch rmethod (the method).
 933   // However, large signatures cannot be cached and are generated
 934   // each time here.  The slow-path generator can do a GC on return,
 935   // so we must reload it after the call.
 936   __ reg_printf("**BEFORE**\nrlocals = %p,locals_sp = %p, sp = %p\n", rlocals, locals_sp, sp);
 937   __ reg_printf("About to call the Method::signature_handler = %p\n", rscratch1);
 938   __ bl(rscratch1);
 939   __ reg_printf("**AFTER**\nr0 : %p, r1 : %p, r2 : %p\n", r0, r1, r2);
 940   __ reg_printf("r3 : %p, sp : %p\n", r3, sp);
 941   __ get_method(rmethod);        // slow path can do a GC, reload rmethod
 942 
 943 
 944 
 945   // result handler is in r0
 946   // set result handler
 947   __ mov(result_handler, r0);
 948   // pass mirror handle if static call
 949   {
 950     Label L;
 951     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 952     __ ldr(rscratch1, Address(rmethod, Method::access_flags_offset()));
 953     __ tst(rscratch1, JVM_ACC_STATIC);
 954     __ b(L, Assembler::EQ);
 955     // get mirror
 956     __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
 957     __ ldr(rscratch1, Address(rscratch1, ConstMethod::constants_offset()));
 958     __ ldr(rscratch1, Address(rscratch1, ConstantPool::pool_holder_offset_in_bytes()));
 959     __ ldr(rscratch1, Address(rscratch1, mirror_offset));
 960     // copy mirror into activation frame
 961     __ str(rscratch1, Address(rfp, frame::interpreter_frame_oop_temp_offset * wordSize));
 962     // pass handle to mirror
 963     __ add(c_rarg1, rfp, frame::interpreter_frame_oop_temp_offset * wordSize);
 964     __ bind(L);
 965   }
 966 
 967   // get native function entry point in r14
 968   Register native_entry_point = r14;
 969 
 970   {
 971     Label L;
 972     __ ldr(native_entry_point, Address(rmethod, Method::native_function_offset()));
 973     address unsatisfied = (SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
 974     __ mov(rscratch2, unsatisfied);
 975     __ ldr(rscratch2, rscratch2);
 976     __ reg_printf("QWERTY native_entry_point = %p, unsatisfied_link_entry_point = %p\n", native_entry_point, rscratch2);
 977     __ cmp(native_entry_point, rscratch2);
 978     __ b(L, Assembler::NE);
 979     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
 980                                        InterpreterRuntime::prepare_native_call), rmethod);
 981     __ get_method(rmethod);
 982     __ ldr(native_entry_point, Address(rmethod, Method::native_function_offset()));
 983     __ bind(L);
 984   }
 985 
 986   // pass JNIEnv
 987   __ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset()));
 988 
 989   // It is enough that the pc() points into the right code
 990   // segment. It does not have to be the correct return pc.
 991   __ set_last_Java_frame(sp, rfp, (address)NULL, rscratch1);
 992 
 993   // change thread state
 994 #ifdef ASSERT
 995   {
 996     Label L;
 997     __ ldr(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
 998     __ cmp(rscratch1, _thread_in_Java);
 999     __ b(L, Assembler::EQ);
1000     __ stop("Wrong thread state in native stub");
1001     __ bind(L);
1002   }
1003 #endif
1004 
1005   // Change state to native
1006   __ mov(rscratch1, _thread_in_native);
1007   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1008   __ dmb(Assembler::ISH);
1009   __ str(rscratch1, Address(rscratch2));
1010 
1011   __ reg_printf("Calling native method, lr = %p & rmethod = %p\n", lr, rmethod);
1012   // Call the native method.
1013   /*__ reg_printf("**ONCALL**\nr0 : %p\nr1 : %p\nr2 : %p\n", r0, r1, r2);
1014   __ reg_printf("r3 : %p\n\nr4 : %p\nrloc : %p\n", r3, r4, rlocals);*/
1015   __ reg_printf("Stack Pointer on entry to native, sp = %p\n", sp);
1016   __ bl(native_entry_point);
1017   __ reg_printf("Returned from native, lr = %p, r1 = %p, r0 = %p\n", lr, r1, r0);
1018   __ maybe_isb();
1019   __ get_method(rmethod);
1020   // result potentially in r0, <r0:r1> or v0
1021 
1022   // make room for the pushes we're about to do
1023   //__ sub(rscratch1, sp, 4 * wordSize);
1024   //__ bic(sp, rscratch1, 0xf);
1025   // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1026   // in order to extract the result of a method call. If the order of these
1027   // pushes change or anything else is added to the stack then the code in
1028   // interpreter_frame_result must also change.
1029   __ reg_printf("Before push dtos, ltos. sp = %p\n", sp);
1030   __ push(dtos);
1031   __ push(ltos);
1032 
1033   // change thread state
1034   __ mov(rscratch1, _thread_in_native_trans);
1035   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1036   __ dmb(Assembler::ISH);
1037   __ str(rscratch1, Address(rscratch2));
1038   __ reg_printf("before os::is_MP\n");
1039   if (os::is_MP()) {
1040     if (UseMembar) {
1041       // Force this write out before the read below
1042       __ dsb(Assembler::SY);
1043     } else {
1044       // Write serialization page so VM thread can do a pseudo remote membar.
1045       // We use the current thread pointer to calculate a thread specific
1046       // offset to write to within the page. This minimizes bus traffic
1047       // due to cache line collision.
1048       __ serialize_memory(rthread, rscratch2);
1049     }
1050   }
1051   __ reg_printf("after os::is_MP\n");
1052   // check for safepoint operation in progress and/or pending suspend requests
1053   {
1054     Label Continue;
1055     __ lea(rscratch2, ExternalAddress(SafepointSynchronize::address_of_state()));
1056     assert(SafepointSynchronize::_not_synchronized == 0,
1057            "SafepointSynchronize::_not_synchronized");
1058     __ ldr(rscratch2, rscratch2);
1059     Label L;
1060     __ cbnz(rscratch2, L);
1061     __ ldr(rscratch2, Address(rthread, JavaThread::suspend_flags_offset()));
1062     __ cbz(rscratch2, Continue);
1063     __ bind(L);
1064 
1065     // Don't use call_VM as it will see a possible pending exception
1066     // and forward it and never return here preventing us from
1067     // clearing _last_native_pc down below. So we do a runtime call by
1068     // hand.
1069     //
1070     __ mov(c_rarg0, rthread);
1071     __ mov(rscratch2, CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
1072     //__ blrt(rscratch2, 1, 0, 0);
1073     __ bl(rscratch2);
1074     __ maybe_isb();
1075     __ get_method(rmethod);
1076     __ bind(Continue);
1077   }
1078   __ reg_printf("finished safepoint check\n");
1079   // change thread state
1080   __ mov(rscratch1, _thread_in_Java);
1081   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1082   __ dmb(Assembler::ISH);
1083   __ str(rscratch1, Address(rscratch2));
1084 
1085   // reset_last_Java_frame
1086   __ reset_last_Java_frame(true, true);
1087 
1088   // reset handle block
1089   __ ldr(rscratch2, Address(rthread, JavaThread::active_handles_offset()));
1090   __ mov(rscratch1, 0);
1091   __ str(rscratch1, Address(rscratch2, JNIHandleBlock::top_offset_in_bytes()));
1092 
1093   // If result is an oop unbox and store it in frame where gc will see it
1094   // and result handler will pick it up
1095   __ reg_printf("finished checking last_Java_frame\n");
1096   {
1097     Label no_oop, store_result;
1098     //__ bkpt(345);
1099     //__ adr(rscratch2, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
1100     __ mov(rscratch2, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
1101     __ reg_printf("Comparing rscratch2 = %p and result_handler = %p\n", rscratch2, result_handler);
1102 
1103     __ cmp(rscratch2, result_handler);
1104     __ b(no_oop, Assembler::NE);
1105     __ reg_printf("It's an oop.\n");
1106     // retrieve result
1107     __ pop(ltos);
1108     __ cbz(r0, store_result);
1109     __ ldr(r0, Address(r0));
1110     __ bind(store_result);
1111     __ str(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
1112     // keep stack depth as expected by pushing oop which will eventually be discarded
1113     __ push(ltos);
1114     __ bind(no_oop);
1115   }
1116 
1117   {
1118     Label no_reguard;
1119     __ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset())));
1120     __ ldrb(rscratch1, Address(rscratch1));
1121     __ cmp(rscratch1, JavaThread::stack_guard_yellow_disabled);
1122     __ b(no_reguard, Assembler::NE);
1123 
1124     __ pusha(); // XXX only save smashed registers
1125     __ mov(c_rarg0, rthread);
1126     __ mov(rscratch2, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
1127     __ bl(rscratch2);
1128     __ popa(); // XXX only restore smashed registers
1129     __ bind(no_reguard);
1130   }
1131   __ reg_printf("Restoring java-ish things\n");
1132   // The method register is junk from after the thread_in_native transition
1133   // until here.  Also can't call_VM until the bcp has been
1134   // restored.  Need bcp for throwing exception below so get it now.
1135   __ get_method(rmethod);
1136   __ get_dispatch(); // used to save sp in for args
1137   // restore bcp to have legal interpreter frame, i.e., bci == 0 <=>
1138   // rbcp == code_base()
1139   __ ldr(rbcp, Address(rmethod, Method::const_offset()));   // get ConstMethod*
1140   __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));          // get codebase
1141   // handle exceptions (exception handling will handle unlocking!)
1142   {
1143     Label L;
1144     __ reg_printf("Checking pending exceptions\n");
1145     __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
1146     __ cbz(rscratch1, L);
1147     // Note: At some point we may want to unify this with the code
1148     // used in call_VM_base(); i.e., we should use the
1149     // StubRoutines::forward_exception code. For now this doesn't work
1150     // here because the rsp is not correctly set at this point.
1151     __ reg_printf("Calling vm to throw_pending_exception\n");
1152 
1153     // Need to restore lr? - introduced on aarch32 port
1154     //__ ldr(lr, Address(rfp, frame::return_addr_offset));
1155 
1156     __ MacroAssembler::call_VM(noreg,
1157                                CAST_FROM_FN_PTR(address,
1158                                InterpreterRuntime::throw_pending_exception));
1159     __ should_not_reach_here();
1160     __ bind(L);
1161   }
1162 
1163   // do unlocking if necessary
1164   {
1165     Label L;
1166     __ reg_printf("testing if we need to unlock\n");
1167     __ ldr(rscratch1, Address(rmethod, Method::access_flags_offset()));
1168     __ tst(rscratch1, JVM_ACC_SYNCHRONIZED);
1169     __ b(L, Assembler::EQ);
1170     // the code below should be shared with interpreter macro
1171     // assembler implementation
1172     {
1173       Label unlock;
1174       // BasicObjectLock will be first in list, since this is a
1175       // synchronized method. However, need to check that the object
1176       // has not been unlocked by an explicit monitorexit bytecode.
1177 
1178       // monitor expect in c_rarg1 for slow unlock path
1179       __ lea (c_rarg1, Address(rfp,   // address of first monitor
1180                                (intptr_t)(frame::interpreter_frame_initial_sp_offset *
1181                                           wordSize - sizeof(BasicObjectLock))));
1182 
1183       __ ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
1184       __ reg_printf("Checking if we are already unlocked\n");
1185       __ cbnz(rscratch1, unlock);
1186 
1187       // Entry already unlocked, need to throw exception
1188       __ MacroAssembler::call_VM(noreg,
1189                                  CAST_FROM_FN_PTR(address,
1190                    InterpreterRuntime::throw_illegal_monitor_state_exception));
1191       __ should_not_reach_here();
1192 
1193       __ bind(unlock);
1194       __ reg_printf("Doing unlock\n");
1195       __ unlock_object(c_rarg1);
1196     }
1197     __ bind(L);
1198   }
1199   __ reg_printf("finished unlocking\n");
1200   // jvmti support
1201   // Note: This must happen _after_ handling/throwing any exceptions since
1202   //       the exception handler code notifies the runtime of method exits
1203   //       too. If this happens before, method entry/exit notifications are
1204   //       not properly paired (was bug - gri 11/22/99).
1205   __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1206 
1207   // restore potential result in r0:d0, call result handler to
1208   // restore potential result in ST0 & handle result
1209   __ reg_printf("Before pop dtos, ltos. sp = %p\n", sp);
1210   __ pop(ltos);
1211   __ pop(dtos);
1212 
1213   __ reg_printf("Calling result handler, r1 = %p, r0 = %p\n", r1, r0);
1214   __ bl(result_handler);
1215   __ reg_printf("Finished result_handler\n RFP NOW = %p, r0 = %p\n", rfp, r0);
1216 
1217   // remove activation restore sp to sender_sp
1218   __ ldr(rscratch1, Address(rfp,
1219                     frame::interpreter_frame_sender_sp_offset *
1220                     wordSize)); // get sender sp
1221   // remove frame anchor & restore sp
1222   __ leave();
1223 
1224   __ mov(sp, rscratch1); // Native frame so two extra fields
1225   __ reg_printf("Returning to Java execution, restored frame = %p, lr = %p\n\tRBCP = %p\n", rfp, lr, rbcp);
1226   __ b(lr);
1227 
1228   if (inc_counter) {
1229     // Handle overflow of counter and compile method
1230     __ bind(invocation_counter_overflow);
1231     generate_counter_overflow(&continue_after_compile);
1232   }
1233 
1234   return entry_point;
1235 }
1236 
1237 address InterpreterGenerator::generate_CRC32_update_entry() {
1238   if (UseCRC32Intrinsics) {
1239     address entry = __ pc();
1240 
1241     // rmethod: Method*
1242     // sp: args
1243 
1244     Label slow_path;
1245     // If we need a safepoint check, generate full interpreter entry.
1246     __ lea(rscratch2, ExternalAddress(SafepointSynchronize::address_of_state()));
1247     assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
1248     __ ldr(rscratch2, Address(rscratch2));
1249     __ cbnz(rscratch2, slow_path);
1250 
1251     // We don't generate local frame and don't align stack because
1252     // we call stub code and there is no safepoint on this path.
1253 
1254     // Load parameters
1255     const Register crc = c_rarg0;  // crc
1256     const Register val = c_rarg1;  // source java byte value
1257     const Register tbl = c_rarg2;  // scratch
1258 
1259     // Arguments are reversed on java expression stack
1260     __ ldr(val, Address(sp, 0));              // byte value
1261     __ ldr(crc, Address(sp, wordSize));       // Initial CRC
1262 
1263     __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr()));
1264     __ inv(crc, crc);
1265     __ update_byte_crc32(crc, val, tbl);
1266     __ inv(crc, crc); // result in c_rarg0
1267 
1268     __ mov(sp, r4);
1269     __ ret(lr);
1270 
1271     // generate a vanilla native entry as the slow path
1272     __ bind(slow_path);
1273 
1274     (void) generate_native_entry(false);
1275 
1276     return entry;
1277   }
1278   return generate_native_entry(false);
1279 }
1280 
1281 address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
1282   if (UseCRC32Intrinsics) {
1283     address entry = __ pc();
1284 
1285     // rmethod,: Method*
1286     // sp: senderSP must preserved for slow path
1287 
1288     Label slow_path;
1289     // If we need a safepoint check, generate full interpreter entry.
1290     __ lea(rscratch2, ExternalAddress(SafepointSynchronize::address_of_state()));
1291     assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
1292     __ ldr(rscratch2, Address(rscratch2));
1293     __ cbnz(rscratch2, slow_path);
1294 
1295     // We don't generate local frame and don't align stack because
1296     // we call stub code and there is no safepoint on this path.
1297 
1298     // Load parameters
1299     const Register crc = c_rarg0;  // crc
1300     const Register buf = c_rarg1;  // source java byte array address
1301     const Register len = c_rarg2;  // length
1302     const Register off = len;      // offset (never overlaps with 'len')
1303 
1304     // Arguments are reversed on java expression stack
1305     // Calculate address of start element
1306     if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
1307       __ ldr(buf, Address(sp, 2*wordSize)); // long buf
1308       __ ldr(off, Address(sp, wordSize)); // offset
1309       __ add(buf, buf, off); // + offset
1310       __ ldr(crc, Address(sp, 4*wordSize)); // Initial CRC
1311     } else {
1312       __ ldr(buf, Address(sp, 2*wordSize)); // byte[] array
1313       __ add(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
1314       __ ldr(off, Address(sp, wordSize)); // offset
1315       __ add(buf, buf, off); // + offset
1316       __ ldr(crc, Address(sp, 3*wordSize)); // Initial CRC
1317     }
1318     // Can now load 'len' since we're finished with 'off'
1319     __ ldr(len, Address(sp)); // Length
1320 
1321     __ mov(sp, r4); // Restore the caller's SP
1322 
1323     // We are frameless so we can just jump to the stub.
1324     __ b(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()));
1325 
1326     // generate a vanilla native entry as the slow path
1327     __ bind(slow_path);
1328 
1329     (void) generate_native_entry(false);
1330 
1331     return entry;
1332   }
1333   return generate_native_entry(false);
1334 }
1335 
1336 //
1337 // Generic interpreted method entry to (asm) interpreter
1338 //
1339 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1340   // determine code generation flags
1341   bool inc_counter = UseCompiler || CountCompiledCalls;
1342 
1343   // r4: sender sp
1344   address entry_point = __ pc();
1345 
1346   const Address constMethod(rmethod, Method::const_offset());
1347   const Address access_flags(rmethod, Method::access_flags_offset());
1348   const Address size_of_parameters(r3,
1349                                    ConstMethod::size_of_parameters_offset());
1350   const Address size_of_locals(r3, ConstMethod::size_of_locals_offset());
1351 
1352   // get parameter size (always needed)
1353   // need to load the const method first
1354   __ ldr(r3, constMethod);
1355   __ load_unsigned_short(r2, size_of_parameters);
1356 
1357   // r2: size of parameters
1358 
1359   __ load_unsigned_short(r3, size_of_locals); // get size of locals in words
1360   __ sub(r3, r3, r2); // r3 = no. of additional locals
1361 
1362   // see if we've got enough room on the stack for locals plus overhead.
1363   generate_stack_overflow_check();
1364 
1365   // compute beginning of parameters (rlocals)
1366   __ add(rlocals, sp, r2, lsl(2));
1367   __ sub(rlocals, rlocals, wordSize);
1368 
1369   // Make room for locals
1370   __ sub(rscratch1, sp, r3, lsl(2));
1371   // Align the sp value
1372   __ bic(sp, rscratch1, StackAlignmentInBytes-1);
1373 
1374   // r3 - # of additional locals
1375   // allocate space for locals
1376   // explicitly initialize locals
1377   {
1378     Label exit, loop;
1379     __ mov(rscratch2, 0);
1380     __ cmp(r3, 0);
1381     __ b(exit, Assembler::LE); // do nothing if r3 <= 0
1382     __ bind(loop);
1383     __ str(rscratch2, Address(__ post(rscratch1, wordSize)));
1384     __ subs(r3, r3, 1); // until everything initialized
1385     __ b(loop, Assembler::NE);
1386     __ bind(exit);
1387   }
1388   __ reg_printf("Done locals space\n", r2);
1389 
1390   // initialize fixed part of activation frame
1391   __ reg_printf("About to do fixed frame\n", r2);
1392   generate_fixed_frame(false);
1393   // And the base dispatch table
1394   __ get_dispatch();
1395   // make sure method is not native & not abstract
1396   __ reg_printf("Just done generate_fixed_frame; rmethod = %p\n", rmethod);
1397 #ifdef ASSERT
1398   __ ldr(r0, access_flags);
1399   {
1400     Label L;
1401     __ tst(r0, JVM_ACC_NATIVE);
1402     __ b(L, Assembler::EQ);
1403     __ stop("tried to execute native method as non-native");
1404     __ bind(L);
1405   }
1406   {
1407     Label L;
1408     __ tst(r0, JVM_ACC_ABSTRACT);
1409     __ b(L, Assembler::EQ);
1410     __ stop("tried to execute abstract method in interpreter");
1411     __ bind(L);
1412   }
1413 #endif
1414 
1415   // Since at this point in the method invocation the exception
1416   // handler would try to exit the monitor of synchronized methods
1417   // which hasn't been entered yet, we set the thread local variable
1418   // _do_not_unlock_if_synchronized to true. The remove_activation
1419   // will check this flag.
1420 
1421    const Address do_not_unlock_if_synchronized(rthread,
1422         in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1423   __ mov(rscratch2, true);
1424   __ strb(rscratch2, do_not_unlock_if_synchronized);
1425 
1426   // increment invocation count & check for overflow
1427   Label invocation_counter_overflow;
1428   Label profile_method;
1429   Label profile_method_continue;
1430   if (inc_counter) {
1431     generate_counter_incr(&invocation_counter_overflow,
1432                           &profile_method,
1433                           &profile_method_continue);
1434     if (ProfileInterpreter) {
1435       __ bind(profile_method_continue);
1436     }
1437   }
1438 
1439   Label continue_after_compile;
1440   __ bind(continue_after_compile);
1441 
1442   bang_stack_shadow_pages(false, rscratch1);
1443   // Note rscratch1 will contain zero here
1444   // reset the _do_not_unlock_if_synchronized flag
1445   __ strb(rscratch1, do_not_unlock_if_synchronized);
1446 
1447   // check for synchronized methods
1448   // Must happen AFTER invocation_counter check and stack overflow check,
1449   // so method is not locked if overflows.
1450   if (synchronized) {
1451     // Allocate monitor and lock method
1452     lock_method();
1453   } else {
1454     // no synchronization necessary
1455 #ifdef ASSERT
1456     {
1457       Label L;
1458       __ reg_printf("Checking synchronization, rmethod = %p\n", rmethod);
1459       __ ldr(r0, access_flags);
1460       __ tst(r0, JVM_ACC_SYNCHRONIZED);
1461       __ b(L, Assembler::EQ);
1462       __ stop("method needs synchronization");
1463       __ bind(L);
1464     }
1465 #endif
1466   }
1467 
1468   // start execution
1469 #ifdef ASSERT
1470   {
1471     Label L;
1472      const Address monitor_block_top (rfp,
1473                  frame::interpreter_frame_monitor_block_top_offset * wordSize);
1474     __ ldr(rscratch1, monitor_block_top);
1475     __ cmp(sp, rscratch1);
1476     __ b(L, Assembler::EQ);
1477     __ stop("broken stack frame setup in interpreter");
1478     __ bind(L);
1479   }
1480 #endif
1481 
1482   // jvmti support
1483   __ notify_method_entry();
1484   __ reg_printf("About to dispatch, rmethod = %p, rlocals = %p\n", rmethod, rlocals);
1485   __ dispatch_next(vtos);
1486   __ reg_printf("Finshed dispatch? rmethod = %p\n", rmethod);
1487   // invocation counter overflow
1488   if (inc_counter) {
1489     if (ProfileInterpreter) {
1490       // We have decided to profile this method in the interpreter
1491       __ bind(profile_method);
1492       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1493       __ set_method_data_pointer_for_bcp();
1494       // don't think we need this
1495       __ get_method(r1);
1496       __ b(profile_method_continue);
1497     }
1498     // Handle overflow of counter and compile method
1499     __ bind(invocation_counter_overflow);
1500     generate_counter_overflow(&continue_after_compile);
1501   }
1502 
1503   __ reg_printf("Just completed normal entry, rmethod = %p\n", rmethod);
1504   return entry_point;
1505 }
1506 
1507 address AbstractInterpreterGenerator::generate_method_entry(
1508     AbstractInterpreter::MethodKind kind) {
1509   bool synchronized = false;
1510   address entry_point = NULL;
1511   InterpreterGenerator* ig = (InterpreterGenerator*) this;
1512 
1513   switch (kind) {
1514   case Interpreter::zerolocals:
1515     break;
1516   case Interpreter::zerolocals_synchronized:
1517     synchronized = true;
1518     break;
1519   case Interpreter::native:
1520     entry_point = ig->generate_native_entry(false);
1521     break;
1522   case Interpreter::native_synchronized:
1523     entry_point = ig->generate_native_entry(true);
1524     break;
1525   case Interpreter::empty:
1526     entry_point = ig->generate_empty_entry();
1527     break;
1528   case Interpreter::accessor:
1529     entry_point = ig->generate_accessor_entry();
1530     break;
1531   case Interpreter::abstract:
1532     entry_point = ig->generate_abstract_entry();
1533     break;
1534   case Interpreter::java_lang_math_sin:
1535   case Interpreter::java_lang_math_cos:
1536   case Interpreter::java_lang_math_tan:
1537   case Interpreter::java_lang_math_abs:
1538   case Interpreter::java_lang_math_sqrt:
1539   case Interpreter::java_lang_math_log:
1540   case Interpreter::java_lang_math_log10:
1541   case Interpreter::java_lang_math_pow:
1542   case Interpreter::java_lang_math_exp:
1543     entry_point = ig->generate_math_entry(kind);
1544     break;
1545   case Interpreter::java_lang_ref_reference_get:
1546     entry_point = ig->generate_Reference_get_entry();
1547     break;
1548   case Interpreter::java_util_zip_CRC32_update:
1549     entry_point = ig->generate_CRC32_update_entry();
1550     break;
1551   case Interpreter::java_util_zip_CRC32_updateBytes:
1552   case Interpreter::java_util_zip_CRC32_updateByteBuffer:
1553     entry_point = ig->generate_CRC32_updateBytes_entry(kind);
1554     break;
1555   default:
1556     ShouldNotReachHere();
1557     break;
1558   }
1559 
1560   if (entry_point != NULL) {
1561     return entry_point;
1562   }
1563 
1564   return ig->generate_normal_entry(synchronized);
1565 }
1566 
1567 // These should never be compiled since the interpreter will prefer
1568 // the compiled version to the intrinsic version.
1569 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
1570   switch (method_kind(m)) {
1571     case Interpreter::java_lang_math_sin     : // fall thru
1572     case Interpreter::java_lang_math_cos     : // fall thru
1573     case Interpreter::java_lang_math_tan     : // fall thru
1574     case Interpreter::java_lang_math_abs     : // fall thru
1575     case Interpreter::java_lang_math_log     : // fall thru
1576     case Interpreter::java_lang_math_log10   : // fall thru
1577     case Interpreter::java_lang_math_sqrt    : // fall thru
1578     case Interpreter::java_lang_math_pow     : // fall thru
1579     case Interpreter::java_lang_math_exp     :
1580       return false;
1581     default:
1582       return true;
1583   }
1584 }
1585 
1586 // How much stack a method activation needs in words.
1587 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
1588   const int entry_size = frame::interpreter_frame_monitor_size();
1589 
1590   // total overhead size: entry_size + (saved rfp thru expr stack
1591   // bottom).  be sure to change this if you add/subtract anything
1592   // to/from the overhead area
1593   const int overhead_size =
1594     -(frame::interpreter_frame_initial_sp_offset) + entry_size;
1595 
1596   const int stub_code = frame::entry_frame_after_call_words;
1597   const int method_stack = (method->max_locals() + method->max_stack()) *
1598                            Interpreter::stackElementWords;
1599   return (overhead_size + method_stack + stub_code);
1600 }
1601 
1602 // asm based interpreter deoptimization helpers
1603 int AbstractInterpreter::size_activation(int max_stack,
1604                                          int temps,
1605                                          int extra_args,
1606                                          int monitors,
1607                                          int callee_params,
1608                                          int callee_locals,
1609                                          bool is_top_frame) {
1610   // Note: This calculation must exactly parallel the frame setup
1611   // in AbstractInterpreterGenerator::generate_method_entry.
1612 
1613   // fixed size of an interpreter frame:
1614   int overhead = frame::sender_sp_offset -
1615                  frame::interpreter_frame_initial_sp_offset;
1616   // Our locals were accounted for by the caller (or last_frame_adjust
1617   // on the transistion) Since the callee parameters already account
1618   // for the callee's params we only need to account for the extra
1619   // locals.
1620   int size = overhead +
1621          (callee_locals - callee_params)*Interpreter::stackElementWords +
1622          monitors * frame::interpreter_frame_monitor_size() +
1623          temps* Interpreter::stackElementWords + extra_args;
1624 
1625   // On AArch64 we always keep the stack pointer 16-aligned, so we
1626   // must round up here.
1627   size = round_to(size, 2);
1628 
1629   return size;
1630 }
1631 
1632 void AbstractInterpreter::layout_activation(Method* method,
1633                                             int tempcount,
1634                                             int popframe_extra_args,
1635                                             int moncount,
1636                                             int caller_actual_parameters,
1637                                             int callee_param_count,
1638                                             int callee_locals,
1639                                             frame* caller,
1640                                             frame* interpreter_frame,
1641                                             bool is_top_frame,
1642                                             bool is_bottom_frame) {
1643   // The frame interpreter_frame is guaranteed to be the right size,
1644   // as determined by a previous call to the size_activation() method.
1645   // It is also guaranteed to be walkable even though it is in a
1646   // skeletal state
1647 
1648   int max_locals = method->max_locals() * Interpreter::stackElementWords;
1649   int extra_locals = (method->max_locals() - method->size_of_parameters()) *
1650     Interpreter::stackElementWords;
1651 
1652 #ifdef ASSERT
1653   assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable");
1654 #endif
1655 
1656   interpreter_frame->interpreter_frame_set_method(method);
1657   // NOTE the difference in using sender_sp and
1658   // interpreter_frame_sender_sp interpreter_frame_sender_sp is
1659   // the original sp of the caller (the unextended_sp) and
1660   // sender_sp is fp+8/16 (32bit/64bit) XXX
1661   intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
1662 
1663 #ifdef ASSERT
1664   if (caller->is_interpreted_frame()) {
1665     assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement");
1666   }
1667 #endif
1668 
1669   interpreter_frame->interpreter_frame_set_locals(locals);
1670   BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
1671   BasicObjectLock* monbot = montop - moncount;
1672   interpreter_frame->interpreter_frame_set_monitor_end(monbot);
1673 
1674   // Set last_sp
1675   intptr_t*  last_sp = (intptr_t*) monbot -
1676     tempcount*Interpreter::stackElementWords -
1677     popframe_extra_args;
1678   interpreter_frame->interpreter_frame_set_last_sp(last_sp);
1679 
1680   // All frames but the initial (oldest) interpreter frame we fill in have
1681   // a value for sender_sp that allows walking the stack but isn't
1682   // truly correct. Correct the value here.
1683   if (extra_locals != 0 &&
1684       interpreter_frame->sender_sp() ==
1685       interpreter_frame->interpreter_frame_sender_sp()) {
1686     interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() +
1687                                                        extra_locals);
1688   }
1689   *interpreter_frame->interpreter_frame_cache_addr() =
1690     method->constants()->cache();
1691 }
1692 
1693 
1694 //-----------------------------------------------------------------------------
1695 // Exceptions
1696 
1697 void TemplateInterpreterGenerator::generate_throw_exception() {
1698   // Entry point in previous activation (i.e., if the caller was
1699   // interpreted)
1700   Interpreter::_rethrow_exception_entry = __ pc();
1701   __ reg_printf("rethrow_exception_entry\n");
1702 
1703   // Restore sp to interpreter_frame_last_sp even though we are going
1704   // to empty the expression stack for the exception processing.
1705   __ mov(rscratch1, 0);
1706   __ str(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
1707   // r0: exception
1708   // r3: return address/pc that threw exception
1709   __ restore_bcp();    // rbcp points to call/send
1710   __ restore_locals();
1711   __ restore_constant_pool_cache();
1712   __ get_dispatch();
1713 
1714   // Entry point for exceptions thrown within interpreter code
1715   Interpreter::_throw_exception_entry = __ pc();
1716   __ reg_printf("throw_exception_entry\n");
1717   // If we came here via a NullPointerException on the receiver of a
1718   // method, rmethod may be corrupt.
1719   __ get_method(rmethod);
1720   // expression stack is undefined here
1721   // r0: exception
1722   // rbcp: exception bcp
1723   __ verify_oop(r0);
1724   __ mov(c_rarg1, r0);
1725 
1726   // expression stack must be empty before entering the VM in case of
1727   // an exception
1728   __ empty_expression_stack();
1729   // find exception handler address and preserve exception oop
1730   __ call_VM(r3,
1731              CAST_FROM_FN_PTR(address,
1732                           InterpreterRuntime::exception_handler_for_exception),
1733              c_rarg1);
1734 
1735   // Calculate stack limit
1736   /*__ ldr(rscratch1, Address(rmethod, Method::const_offset()));
1737   __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
1738   __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4);
1739   __ ldr(rscratch2,
1740          Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
1741   __ sub(rscratch1, rscratch2, rscratch1, lsl(2));
1742   __ bic(sp, rscratch1, 0xf);*/
1743   // Don't do this as we don't have a stack pointer
1744 
1745   // r0: exception handler entry point
1746   // r3: preserved exception oop
1747   // rbcp: bcp for exception handler
1748   __ push_ptr(r3); // push exception which is now the only value on the stack
1749   __ b(r0); // jump to exception handler (may be _remove_activation_entry!)
1750 
1751   // If the exception is not handled in the current frame the frame is
1752   // removed and the exception is rethrown (i.e. exception
1753   // continuation is _rethrow_exception).
1754   //
1755   // Note: At this point the bci is still the bxi for the instruction
1756   // which caused the exception and the expression stack is
1757   // empty. Thus, for any VM calls at this point, GC will find a legal
1758   // oop map (with empty expression stack).
1759 
1760   //
1761   // JVMTI PopFrame support
1762   //
1763 
1764   Interpreter::_remove_activation_preserving_args_entry = __ pc();
1765   __ print_method_exit(false);
1766   __ reg_printf("remove_activation_preserving_args_entry\n");
1767   __ empty_expression_stack();
1768   // Set the popframe_processing bit in pending_popframe_condition
1769   // indicating that we are currently handling popframe, so that
1770   // call_VMs that may happen later do not trigger new popframe
1771   // handling cycles.
1772   __ ldr(r3, Address(rthread, JavaThread::popframe_condition_offset()));
1773   __ orr(r3, r3, JavaThread::popframe_processing_bit);
1774   __ str(r3, Address(rthread, JavaThread::popframe_condition_offset()));
1775 
1776   {
1777     // Check to see whether we are returning to a deoptimized frame.
1778     // (The PopFrame call ensures that the caller of the popped frame is
1779     // either interpreted or compiled and deoptimizes it if compiled.)
1780     // In this case, we can't call dispatch_next() after the frame is
1781     // popped, but instead must save the incoming arguments and restore
1782     // them after deoptimization has occurred.
1783     //
1784     // Note that we don't compare the return PC against the
1785     // deoptimization blob's unpack entry because of the presence of
1786     // adapter frames in C2.
1787     Label caller_not_deoptimized;
1788     __ ldr(c_rarg1, Address(rfp, frame::return_addr_offset * wordSize));
1789     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1790                                InterpreterRuntime::interpreter_contains), c_rarg1);
1791     __ cbnz(r0, caller_not_deoptimized);
1792 
1793     // Compute size of arguments for saving when returning to
1794     // deoptimized caller
1795     __ get_method(r0);
1796     __ ldr(r0, Address(r0, Method::const_offset()));
1797     __ load_unsigned_short(r0, Address(r0, in_bytes(ConstMethod::
1798                                                     size_of_parameters_offset())));
1799     __ lsl(r0, r0, Interpreter::logStackElementSize);
1800     __ restore_locals(); // XXX do we need this?
1801     __ sub(rlocals, rlocals, r0);
1802     __ add(rlocals, rlocals, wordSize);
1803     // Save these arguments
1804     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1805                                            Deoptimization::
1806                                            popframe_preserve_args),
1807                           rthread, r0, rlocals);
1808 
1809     __ remove_activation(vtos,
1810                          /* throw_monitor_exception */ false,
1811                          /* install_monitor_exception */ false,
1812                          /* notify_jvmdi */ false);
1813 
1814     // Inform deoptimization that it is responsible for restoring
1815     // these arguments
1816     __ mov(rscratch1, JavaThread::popframe_force_deopt_reexecution_bit);
1817     __ str(rscratch1, Address(rthread, JavaThread::popframe_condition_offset()));
1818 
1819     // Continue in deoptimization handler
1820     __ b(lr);
1821 
1822     __ bind(caller_not_deoptimized);
1823   }
1824 
1825   __ remove_activation(vtos,
1826                        /* throw_monitor_exception */ false,
1827                        /* install_monitor_exception */ false,
1828                        /* notify_jvmdi */ false);
1829 
1830   // Restore the last_sp and null it out
1831   __ ldr(sp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
1832   __ mov(rscratch1, 0);
1833   __ str(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
1834   // remove_activation restores sp?
1835 
1836   __ restore_bcp();
1837   __ restore_locals();
1838   __ restore_constant_pool_cache();
1839   __ get_method(rmethod);
1840   __ get_dispatch();
1841 
1842   // The method data pointer was incremented already during
1843   // call profiling. We have to restore the mdp for the current bcp.
1844   if (ProfileInterpreter) {
1845     __ set_method_data_pointer_for_bcp();
1846   }
1847 
1848   // Clear the popframe condition flag
1849   __ mov(rscratch1, JavaThread::popframe_inactive);
1850   __ str(rscratch1, Address(rthread, JavaThread::popframe_condition_offset()));
1851   assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive");
1852 
1853 #if INCLUDE_JVMTI
1854   {
1855     Label L_done;
1856     __ ldrb(rscratch1, Address(rbcp, 0));
1857     __ cmp(rscratch1, Bytecodes::_invokestatic);
1858     __ b(L_done, Assembler::EQ);
1859 
1860     // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1861     // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
1862 
1863     __ ldr(c_rarg0, Address(rlocals, 0));
1864     __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), c_rarg0, rmethod, rbcp);
1865 
1866     __ cbz(r0, L_done);
1867 
1868     __ str(r0, Address(sp, 0));
1869     __ bind(L_done);
1870   }
1871 #endif // INCLUDE_JVMTI
1872 
1873   // Restore machine SP
1874   /*__ ldr(rscratch1, Address(rmethod, Method::const_offset()));
1875   __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
1876   __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4);
1877   __ ldr(rscratch2,
1878          Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
1879   __ sub(rscratch1, rscratch2, rscratch1, lsl(2));
1880   __ bic(sp, rscratch1, 0xf);*/
1881 
1882   __ dispatch_next(vtos);
1883   // end of PopFrame support
1884 
1885   Interpreter::_remove_activation_entry = __ pc();
1886   __ print_method_exit(false);
1887   __ reg_printf("remove_activation_entry\n");
1888 
1889   // preserve exception over this code sequence
1890   __ pop_ptr(r0);
1891   __ str(r0, Address(rthread, JavaThread::vm_result_offset()));
1892   // remove the activation (without doing throws on illegalMonitorExceptions)
1893   __ remove_activation(vtos, false, true, false);
1894   // restore exception
1895   // restore exception
1896   __ get_vm_result(r0, rthread);
1897 
1898   // In between activations - previous activation type unknown yet
1899   // compute continuation point - the continuation point expects the
1900   // following registers set up:
1901   //
1902   // r0: exception
1903   // lr: return address/pc that threw exception
1904   // rsp: expression stack of caller
1905   // rfp: fp of caller
1906   // FIXME: There's no point saving LR here because VM calls don't trash it
1907   __ strd(r0, lr, Address(__ pre(sp, -2 * wordSize)));  // save exception & return address
1908   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1909                           SharedRuntime::exception_handler_for_return_address),
1910                         rthread, lr);
1911   __ mov(r1, r0);                               // save exception handler
1912   __ ldrd(r0, lr, Address(__ post(sp, 2 * wordSize)));  // restore exception & return address
1913   // We might be returning to a deopt handler that expects r3 to
1914   // contain the exception pc
1915   __ mov(r3, lr);
1916   // Note that an "issuing PC" is actually the next PC after the call
1917   __ b(r1);                                    // jump to exception
1918                                                 // handler of caller
1919 }
1920 
1921 
1922 //
1923 // JVMTI ForceEarlyReturn support
1924 //
1925 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1926   address entry = __ pc();
1927   __ restore_bcp();
1928   __ restore_locals();
1929   __ empty_expression_stack();
1930   __ load_earlyret_value(state);
1931 
1932   __ ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));
1933   Address cond_addr(rscratch1, JvmtiThreadState::earlyret_state_offset());
1934 
1935   // Clear the earlyret state
1936   assert(JvmtiThreadState::earlyret_inactive == 0, "should be");
1937   __ mov(rscratch2, 0);
1938   __ str(rscratch2, cond_addr);
1939 
1940   __ remove_activation(state,
1941                        false, /* throw_monitor_exception */
1942                        false, /* install_monitor_exception */
1943                        true); /* notify_jvmdi */
1944   __ b(lr);
1945 
1946   return entry;
1947 } // end of ForceEarlyReturn support
1948 
1949 
1950 
1951 //-----------------------------------------------------------------------------
1952 // Helper for vtos entry point generation
1953 
1954 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
1955                                                          address& bep,
1956                                                          address& cep,
1957                                                          address& sep,
1958                                                          address& aep,
1959                                                          address& iep,
1960                                                          address& lep,
1961                                                          address& fep,
1962                                                          address& dep,
1963                                                          address& vep) {
1964   assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1965   Label L;
1966   aep = __ pc();  __ push_ptr();  __ b(L);
1967   fep = __ pc();  __ push_f();    __ b(L);
1968   dep = __ pc();  __ push_d();    __ b(L);
1969   lep = __ pc();  __ push_l();    __ b(L);
1970   bep = cep = sep =
1971   iep = __ pc();  __ push_i();
1972   vep = __ pc();
1973   __ bind(L);
1974   generate_and_dispatch(t);
1975 }
1976 
1977 //-----------------------------------------------------------------------------
1978 // Generation of individual instructions
1979 
1980 // helpers for generate_and_dispatch
1981 
1982 
1983 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
1984   : TemplateInterpreterGenerator(code) {
1985    generate_all(); // down here so it can be "virtual"
1986 }
1987 
1988 //-----------------------------------------------------------------------------
1989 
1990 // Non-product code
1991 #ifndef PRODUCT
1992 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1993   address entry = __ pc();
1994 
1995   __ push(lr);
1996   __ push(state);
1997   __ push(RegSet::range(r0, r12), sp);

1998   __ mov(c_rarg2, r0);  // Pass itos
1999   __ mov(c_rarg3, r1);  // Pass ltos/dtos high part
2000   __ call_VM(noreg,
2001              CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
2002              c_rarg1, c_rarg2, c_rarg3);
2003   __ pop(RegSet::range(r0, r12), sp);
2004   __ pop(state);
2005   __ pop(lr);
2006   __ b(lr);                                   // return from result handler
2007 
2008   return entry;
2009 }
2010 
2011 void TemplateInterpreterGenerator::count_bytecode() {
2012   __ push(c_rarg0);
2013   __ push(rscratch1);
2014   __ push(rscratch2);
2015   Label L;
2016   __ mov(rscratch2, (address) &BytecodeCounter::_counter_value);
2017   __ bind(L);
2018   __ ldrex(rscratch1, rscratch2);
2019   __ add(rscratch1, rscratch1, 1);
2020   // strex stores 2nd arg to dest adressed by 3rd arg,
2021   // stores status to 1st arg. So, 1st and 2nd shoud be different.
2022   __ strex(c_rarg0, rscratch1, rscratch2);
2023   __ cmp(c_rarg0, 0);
2024   __ b(L, Assembler::NE);
2025   __ pop(rscratch2);
2026   __ pop(rscratch1);
2027   __ pop(c_rarg0);
2028 }
2029 
2030 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { ; }
2031 
2032 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { ; }
2033 
2034 
2035 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
2036   // Call a little run-time stub to avoid blow-up for each bytecode.
2037   // The run-time runtime saves the right registers, depending on
2038   // the tosca in-state for the given template.
2039 
2040   assert(Interpreter::trace_code(t->tos_in()) != NULL,
2041          "entry must have been generated");
2042   __ bl(Interpreter::trace_code(t->tos_in()));
2043 }
2044 
2045 
2046 void TemplateInterpreterGenerator::stop_interpreter_at() {
2047   Label L;
2048   __ push(rscratch1);
2049   __ mov(rscratch1, (address) &BytecodeCounter::_counter_value);
2050   __ ldr(rscratch1, Address(rscratch1));
2051   __ mov(rscratch2, StopInterpreterAt);
2052   __ cmp(rscratch1, rscratch2);
2053   __ b(L, Assembler::NE);
2054   __ bkpt(0);
2055   __ bind(L);
2056   __ pop(rscratch1);
2057 }
2058 
2059 #endif // !PRODUCT
2060 #endif // ! CC_INTERP
--- EOF ---