1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2012, 2013 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/assembler.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "ci/ciMethod.hpp"
  30 #include "interpreter/bytecodeHistogram.hpp"
  31 #include "interpreter/cppInterpreter.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interpreterGenerator.hpp"
  34 #include "interpreter/interpreterRuntime.hpp"
  35 #include "oops/arrayOop.hpp"
  36 #include "oops/methodData.hpp"
  37 #include "oops/method.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "prims/jvmtiExport.hpp"
  40 #include "prims/jvmtiThreadState.hpp"
  41 #include "runtime/arguments.hpp"
  42 #include "runtime/deoptimization.hpp"
  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/interfaceSupport.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "runtime/stubRoutines.hpp"
  47 #include "runtime/synchronizer.hpp"
  48 #include "runtime/timer.hpp"
  49 #include "runtime/vframeArray.hpp"
  50 #include "utilities/debug.hpp"
  51 #ifdef SHARK
  52 #include "shark/shark_globals.hpp"
  53 #endif
  54 
  55 #ifdef CC_INTERP
  56 
  57 #define __ _masm->
  58 
  59 // Contains is used for identifying interpreter frames during a stack-walk.
  60 // A frame with a PC in InterpretMethod must be identified as a normal C frame.
  61 bool CppInterpreter::contains(address pc) {
  62   return _code->contains(pc);
  63 }
  64 
  65 #ifdef PRODUCT
  66 #define BLOCK_COMMENT(str) // nothing
  67 #else
  68 #define BLOCK_COMMENT(str) __ block_comment(str)
  69 #endif
  70 
  71 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  72 
  73 static address interpreter_frame_manager        = NULL;
  74 static address frame_manager_specialized_return = NULL;
  75 static address native_entry                     = NULL;
  76 
  77 static address interpreter_return_address       = NULL;
  78 
  79 static address unctrap_frame_manager_entry      = NULL;
  80 
  81 static address deopt_frame_manager_return_atos  = NULL;
  82 static address deopt_frame_manager_return_btos  = NULL;
  83 static address deopt_frame_manager_return_itos  = NULL;
  84 static address deopt_frame_manager_return_ltos  = NULL;
  85 static address deopt_frame_manager_return_ftos  = NULL;
  86 static address deopt_frame_manager_return_dtos  = NULL;
  87 static address deopt_frame_manager_return_vtos  = NULL;
  88 
  89 // A result handler converts/unboxes a native call result into
  90 // a java interpreter/compiler result. The current frame is an
  91 // interpreter frame.
  92 address CppInterpreterGenerator::generate_result_handler_for(BasicType type) {
  93   return AbstractInterpreterGenerator::generate_result_handler_for(type);
  94 }
  95 
  96 // tosca based result to c++ interpreter stack based result.
  97 address CppInterpreterGenerator::generate_tosca_to_stack_converter(BasicType type) {
  98   //
  99   // A result is in the native abi result register from a native
 100   // method call. We need to return this result to the interpreter by
 101   // pushing the result on the interpreter's stack.
 102   //
 103   // Registers alive:
 104   //   R3_ARG1(R3_RET)/F1_ARG1(F1_RET) - result to move
 105   //   R4_ARG2                         - address of tos
 106   //   LR
 107   //
 108   // Registers updated:
 109   //   R3_RET(R3_ARG1)   - address of new tos (== R17_tos for T_VOID)
 110   //
 111 
 112   int number_of_used_slots = 1;
 113 
 114   const Register tos = R4_ARG2;
 115   Label done;
 116   Label is_false;
 117 
 118   address entry = __ pc();
 119 
 120   switch (type) {
 121   case T_BOOLEAN:
 122     __ cmpwi(CCR0, R3_RET, 0);
 123     __ beq(CCR0, is_false);
 124     __ li(R3_RET, 1);
 125     __ stw(R3_RET, 0, tos);
 126     __ b(done);
 127     __ bind(is_false);
 128     __ li(R3_RET, 0);
 129     __ stw(R3_RET, 0, tos);
 130     break;
 131   case T_BYTE:
 132   case T_CHAR:
 133   case T_SHORT:
 134   case T_INT:
 135     __ stw(R3_RET, 0, tos);
 136     break;
 137   case T_LONG:
 138     number_of_used_slots = 2;
 139     // mark unused slot for debugging
 140     // long goes to topmost slot
 141     __ std(R3_RET, -BytesPerWord, tos);
 142     __ li(R3_RET, 0);
 143     __ std(R3_RET, 0, tos);
 144     break;
 145   case T_OBJECT:
 146     __ verify_oop(R3_RET);
 147     __ std(R3_RET, 0, tos);
 148     break;
 149   case T_FLOAT:
 150     __ stfs(F1_RET, 0, tos);
 151     break;
 152   case T_DOUBLE:
 153     number_of_used_slots = 2;
 154     // mark unused slot for debugging
 155     __ li(R3_RET, 0);
 156     __ std(R3_RET, 0, tos);
 157     // double goes to topmost slot
 158     __ stfd(F1_RET, -BytesPerWord, tos);
 159     break;
 160   case T_VOID:
 161     number_of_used_slots = 0;
 162     break;
 163   default:
 164     ShouldNotReachHere();
 165   }
 166 
 167   __ BIND(done);
 168 
 169   // new expression stack top
 170   __ addi(R3_RET, tos, -BytesPerWord * number_of_used_slots);
 171 
 172   __ blr();
 173 
 174   return entry;
 175 }
 176 
 177 address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType type) {
 178   //
 179   // Copy the result from the callee's stack to the caller's stack,
 180   // caller and callee both being interpreted.
 181   //
 182   // Registers alive
 183   //   R3_ARG1        - address of callee's tos + BytesPerWord
 184   //   R4_ARG2        - address of caller's tos [i.e. free location]
 185   //   LR
 186   //
 187   //   stack grows upwards, memory grows downwards.
 188   //
 189   //   [      free         ]  <-- callee's tos
 190   //   [  optional result  ]  <-- R3_ARG1
 191   //   [  optional dummy   ]
 192   //          ...
 193   //   [      free         ]  <-- caller's tos, R4_ARG2
 194   //          ...
 195   // Registers updated
 196   //   R3_RET(R3_ARG1) - address of caller's new tos
 197   //
 198   //   stack grows upwards, memory grows downwards.
 199   //
 200   //   [      free         ]  <-- current tos, R3_RET
 201   //   [  optional result  ]
 202   //   [  optional dummy   ]
 203   //          ...
 204   //
 205 
 206   const Register from = R3_ARG1;
 207   const Register ret  = R3_ARG1;
 208   const Register tos  = R4_ARG2;
 209   const Register tmp1 = R21_tmp1;
 210   const Register tmp2 = R22_tmp2;
 211 
 212   address entry = __ pc();
 213 
 214   switch (type) {
 215   case T_BOOLEAN:
 216   case T_BYTE:
 217   case T_CHAR:
 218   case T_SHORT:
 219   case T_INT:
 220   case T_FLOAT:
 221     __ lwz(tmp1, 0, from);
 222     __ stw(tmp1, 0, tos);
 223     // New expression stack top.
 224     __ addi(ret, tos, - BytesPerWord);
 225     break;
 226   case T_LONG:
 227   case T_DOUBLE:
 228     // Move both entries for debug purposes even though only one is live.
 229     __ ld(tmp1, BytesPerWord, from);
 230     __ ld(tmp2, 0, from);
 231     __ std(tmp1, 0, tos);
 232     __ std(tmp2, -BytesPerWord, tos);
 233     // New expression stack top.
 234     __ addi(ret, tos, - 2 * BytesPerWord); // two slots
 235     break;
 236   case T_OBJECT:
 237     __ ld(tmp1, 0, from);
 238     __ verify_oop(tmp1);
 239     __ std(tmp1, 0, tos);
 240     // New expression stack top.
 241     __ addi(ret, tos, - BytesPerWord);
 242     break;
 243   case T_VOID:
 244     // New expression stack top.
 245     __ mr(ret, tos);
 246     break;
 247   default:
 248     ShouldNotReachHere();
 249   }
 250 
 251   __ blr();
 252 
 253   return entry;
 254 }
 255 
 256 address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicType type) {
 257   //
 258   // Load a result from the callee's stack into the caller's expecting
 259   // return register, callee being interpreted, caller being call stub
 260   // or jit code.
 261   //
 262   // Registers alive
 263   //   R3_ARG1   - callee expression tos + BytesPerWord
 264   //   LR
 265   //
 266   //   stack grows upwards, memory grows downwards.
 267   //
 268   //   [      free         ]  <-- callee's tos
 269   //   [  optional result  ]  <-- R3_ARG1
 270   //   [  optional dummy   ]
 271   //          ...
 272   //
 273   // Registers updated
 274   //   R3_RET(R3_ARG1)/F1_RET - result
 275   //
 276 
 277   const Register from = R3_ARG1;
 278   const Register ret = R3_ARG1;
 279   const FloatRegister fret = F1_ARG1;
 280 
 281   address entry = __ pc();
 282 
 283   // Implemented uniformly for both kinds of endianness. The interpreter
 284   // implements boolean, byte, char, and short as jint (4 bytes).
 285   switch (type) {
 286   case T_BOOLEAN:
 287   case T_CHAR:
 288     // zero extension
 289     __ lwz(ret, 0, from);
 290     break;
 291   case T_BYTE:
 292   case T_SHORT:
 293   case T_INT:
 294     // sign extension
 295     __ lwa(ret, 0, from);
 296     break;
 297   case T_LONG:
 298     __ ld(ret, 0, from);
 299     break;
 300   case T_OBJECT:
 301     __ ld(ret, 0, from);
 302     __ verify_oop(ret);
 303     break;
 304   case T_FLOAT:
 305     __ lfs(fret, 0, from);
 306     break;
 307   case T_DOUBLE:
 308     __ lfd(fret, 0, from);
 309     break;
 310   case T_VOID:
 311     break;
 312   default:
 313     ShouldNotReachHere();
 314   }
 315 
 316   __ blr();
 317 
 318   return entry;
 319 }
 320 
 321 address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
 322   assert(interpreter_return_address != NULL, "Not initialized");
 323   return interpreter_return_address;
 324 }
 325 
 326 address CppInterpreter::deopt_entry(TosState state, int length) {
 327   address ret = NULL;
 328   if (length != 0) {
 329     switch (state) {
 330       case atos: ret = deopt_frame_manager_return_atos; break;
 331       case btos: ret = deopt_frame_manager_return_itos; break;
 332       case ctos:
 333       case stos:
 334       case itos: ret = deopt_frame_manager_return_itos; break;
 335       case ltos: ret = deopt_frame_manager_return_ltos; break;
 336       case ftos: ret = deopt_frame_manager_return_ftos; break;
 337       case dtos: ret = deopt_frame_manager_return_dtos; break;
 338       case vtos: ret = deopt_frame_manager_return_vtos; break;
 339       default: ShouldNotReachHere();
 340     }
 341   } else {
 342     ret = unctrap_frame_manager_entry;  // re-execute the bytecode (e.g. uncommon trap, popframe)
 343   }
 344   assert(ret != NULL, "Not initialized");
 345   return ret;
 346 }
 347 
 348 //
 349 // Helpers for commoning out cases in the various type of method entries.
 350 //
 351 
 352 //
 353 // Registers alive
 354 //   R16_thread      - JavaThread*
 355 //   R1_SP           - old stack pointer
 356 //   R19_method      - callee's Method
 357 //   R17_tos         - address of caller's tos (prepushed)
 358 //   R15_prev_state  - address of caller's BytecodeInterpreter or 0
 359 //   return_pc in R21_tmp15 (only when called within generate_native_entry)
 360 //
 361 // Registers updated
 362 //   R14_state       - address of callee's interpreter state
 363 //   R1_SP           - new stack pointer
 364 //   CCR4_is_synced  - current method is synchronized
 365 //
 366 void CppInterpreterGenerator::generate_compute_interpreter_state(Label& stack_overflow_return) {
 367   //
 368   // Stack layout at this point:
 369   //
 370   //   F1      [TOP_IJAVA_FRAME_ABI]              <-- R1_SP
 371   //           alignment (optional)
 372   //           [F1's outgoing Java arguments]     <-- R17_tos
 373   //           ...
 374   //   F2      [PARENT_IJAVA_FRAME_ABI]
 375   //            ...
 376 
 377   //=============================================================================
 378   // Allocate space for locals other than the parameters, the
 379   // interpreter state, monitors, and the expression stack.
 380 
 381   const Register local_count        = R21_tmp1;
 382   const Register parameter_count    = R22_tmp2;
 383   const Register max_stack          = R23_tmp3;
 384   // Must not be overwritten within this method!
 385   // const Register return_pc         = R29_tmp9;
 386 
 387   const ConditionRegister is_synced = CCR4_is_synced;
 388   const ConditionRegister is_native = CCR6;
 389   const ConditionRegister is_static = CCR7;
 390 
 391   assert(is_synced != is_native, "condition code registers must be distinct");
 392   assert(is_synced != is_static, "condition code registers must be distinct");
 393   assert(is_native != is_static, "condition code registers must be distinct");
 394 
 395   {
 396 
 397   // Local registers
 398   const Register top_frame_size     = R24_tmp4;
 399   const Register access_flags       = R25_tmp5;
 400   const Register state_offset       = R26_tmp6;
 401   Register mem_stack_limit          = R27_tmp7;
 402   const Register page_size          = R28_tmp8;
 403 
 404   BLOCK_COMMENT("compute_interpreter_state {");
 405 
 406   // access_flags = method->access_flags();
 407   // TODO: PPC port: assert(4 == methodOopDesc::sz_access_flags(), "unexpected field size");
 408   __ lwa(access_flags, method_(access_flags));
 409 
 410   // parameter_count = method->constMethod->size_of_parameters();
 411   // TODO: PPC port: assert(2 == ConstMethod::sz_size_of_parameters(), "unexpected field size");
 412   __ ld(max_stack, in_bytes(Method::const_offset()), R19_method);   // Max_stack holds constMethod for a while.
 413   __ lhz(parameter_count, in_bytes(ConstMethod::size_of_parameters_offset()), max_stack);
 414 
 415   // local_count = method->constMethod()->max_locals();
 416   // TODO: PPC port: assert(2 == ConstMethod::sz_max_locals(), "unexpected field size");
 417   __ lhz(local_count, in_bytes(ConstMethod::size_of_locals_offset()), max_stack);
 418 
 419   // max_stack = method->constMethod()->max_stack();
 420   // TODO: PPC port: assert(2 == ConstMethod::sz_max_stack(), "unexpected field size");
 421   __ lhz(max_stack, in_bytes(ConstMethod::max_stack_offset()), max_stack);
 422 
 423   if (EnableInvokeDynamic) {
 424     // Take into account 'extra_stack_entries' needed by method handles (see method.hpp).
 425     __ addi(max_stack, max_stack, Method::extra_stack_entries());
 426   }
 427 
 428   // mem_stack_limit = thread->stack_limit();
 429   __ ld(mem_stack_limit, thread_(stack_overflow_limit));
 430 
 431   // Point locals at the first argument. Method's locals are the
 432   // parameters on top of caller's expression stack.
 433 
 434   // tos points past last Java argument
 435   __ sldi(R18_locals, parameter_count, Interpreter::logStackElementSize);
 436   __ add(R18_locals, R17_tos, R18_locals);
 437 
 438   // R18_locals - i*BytesPerWord points to i-th Java local (i starts at 0)
 439 
 440   // Set is_native, is_synced, is_static - will be used later.
 441   __ testbitdi(is_native, R0, access_flags, JVM_ACC_NATIVE_BIT);
 442   __ testbitdi(is_synced, R0, access_flags, JVM_ACC_SYNCHRONIZED_BIT);
 443   assert(is_synced->is_nonvolatile(), "is_synced must be non-volatile");
 444   __ testbitdi(is_static, R0, access_flags, JVM_ACC_STATIC_BIT);
 445 
 446   // PARENT_IJAVA_FRAME_ABI
 447   //
 448   // frame_size =
 449   //   round_to((local_count - parameter_count)*BytesPerWord +
 450   //              2*BytesPerWord +
 451   //              alignment +
 452   //              frame::interpreter_frame_cinterpreterstate_size_in_bytes()
 453   //              sizeof(PARENT_IJAVA_FRAME_ABI)
 454   //              method->is_synchronized() ? sizeof(BasicObjectLock) : 0 +
 455   //              max_stack*BytesPerWord,
 456   //            16)
 457   //
 458   // Note that this calculation is exactly mirrored by
 459   // AbstractInterpreter::layout_activation_impl() [ and
 460   // AbstractInterpreter::size_activation() ]. Which is used by
 461   // deoptimization so that it can allocate the proper sized
 462   // frame. This only happens for interpreted frames so the extra
 463   // notes below about max_stack below are not important. The other
 464   // thing to note is that for interpreter frames other than the
 465   // current activation the size of the stack is the size of the live
 466   // portion of the stack at the particular bcp and NOT the maximum
 467   // stack that the method might use.
 468   //
 469   // If we're calling a native method, we replace max_stack (which is
 470   // zero) with space for the worst-case signature handler varargs
 471   // vector, which is:
 472   //
 473   //   max_stack = max(Argument::n_register_parameters, parameter_count+2);
 474   //
 475   // We add two slots to the parameter_count, one for the jni
 476   // environment and one for a possible native mirror.  We allocate
 477   // space for at least the number of ABI registers, even though
 478   // InterpreterRuntime::slow_signature_handler won't write more than
 479   // parameter_count+2 words when it creates the varargs vector at the
 480   // top of the stack.  The generated slow signature handler will just
 481   // load trash into registers beyond the necessary number.  We're
 482   // still going to cut the stack back by the ABI register parameter
 483   // count so as to get SP+16 pointing at the ABI outgoing parameter
 484   // area, so we need to allocate at least that much even though we're
 485   // going to throw it away.
 486   //
 487 
 488   // Adjust max_stack for native methods:
 489   Label skip_native_calculate_max_stack;
 490   __ bfalse(is_native, skip_native_calculate_max_stack);
 491   // if (is_native) {
 492   //  max_stack = max(Argument::n_register_parameters, parameter_count+2);
 493   __ addi(max_stack, parameter_count, 2*Interpreter::stackElementWords);
 494   __ cmpwi(CCR0, max_stack, Argument::n_register_parameters);
 495   __ bge(CCR0, skip_native_calculate_max_stack);
 496   __ li(max_stack,  Argument::n_register_parameters);
 497   // }
 498   __ bind(skip_native_calculate_max_stack);
 499   // max_stack is now in bytes
 500   __ slwi(max_stack, max_stack, Interpreter::logStackElementSize);
 501 
 502   // Calculate number of non-parameter locals (in slots):
 503   Label not_java;
 504   __ btrue(is_native, not_java);
 505   // if (!is_native) {
 506   //   local_count = non-parameter local count
 507   __ sub(local_count, local_count, parameter_count);
 508   // } else {
 509   //   // nothing to do: method->max_locals() == 0 for native methods
 510   // }
 511   __ bind(not_java);
 512 
 513 
 514   // Calculate top_frame_size and parent_frame_resize.
 515   {
 516   const Register parent_frame_resize = R12_scratch2;
 517 
 518   BLOCK_COMMENT("Compute top_frame_size.");
 519   // top_frame_size = TOP_IJAVA_FRAME_ABI
 520   //                  + size of interpreter state
 521   __ li(top_frame_size, frame::top_ijava_frame_abi_size
 522                         + frame::interpreter_frame_cinterpreterstate_size_in_bytes());
 523   //                  + max_stack
 524   __ add(top_frame_size, top_frame_size, max_stack);
 525   //                  + stack slots for a BasicObjectLock for synchronized methods
 526   {
 527     Label not_synced;
 528     __ bfalse(is_synced, not_synced);
 529     __ addi(top_frame_size, top_frame_size, frame::interpreter_frame_monitor_size_in_bytes());
 530     __ bind(not_synced);
 531   }
 532   // align
 533   __ round_to(top_frame_size, frame::alignment_in_bytes);
 534 
 535 
 536   BLOCK_COMMENT("Compute parent_frame_resize.");
 537   // parent_frame_resize = R1_SP - R17_tos
 538   __ sub(parent_frame_resize, R1_SP, R17_tos);
 539   //__ li(parent_frame_resize, 0);
 540   //                       + PARENT_IJAVA_FRAME_ABI
 541   //                       + extra two slots for the no-parameter/no-locals
 542   //                         method result
 543   __ addi(parent_frame_resize, parent_frame_resize,
 544                                       frame::parent_ijava_frame_abi_size
 545                                     + 2*Interpreter::stackElementSize);
 546   //                       + (locals_count - params_count)
 547   __ sldi(R0, local_count, Interpreter::logStackElementSize);
 548   __ add(parent_frame_resize, parent_frame_resize, R0);
 549   // align
 550   __ round_to(parent_frame_resize, frame::alignment_in_bytes);
 551 
 552   //
 553   // Stack layout at this point:
 554   //
 555   // The new frame F0 hasn't yet been pushed, F1 is still the top frame.
 556   //
 557   //   F0      [TOP_IJAVA_FRAME_ABI]
 558   //           alignment (optional)
 559   //           [F0's full operand stack]
 560   //           [F0's monitors] (optional)
 561   //           [F0's BytecodeInterpreter object]
 562   //   F1      [PARENT_IJAVA_FRAME_ABI]
 563   //           alignment (optional)
 564   //           [F0's Java result]
 565   //           [F0's non-arg Java locals]
 566   //           [F1's outgoing Java arguments]     <-- R17_tos
 567   //           ...
 568   //   F2      [PARENT_IJAVA_FRAME_ABI]
 569   //            ...
 570 
 571 
 572   // Calculate new R14_state
 573   // and
 574   // test that the new memory stack pointer is above the limit,
 575   // throw a StackOverflowError otherwise.
 576   __ sub(R11_scratch1/*F1's SP*/,  R1_SP, parent_frame_resize);
 577   __ addi(R14_state, R11_scratch1/*F1's SP*/,
 578               -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
 579   __ sub(R11_scratch1/*F0's SP*/,
 580              R11_scratch1/*F1's SP*/, top_frame_size);
 581 
 582   BLOCK_COMMENT("Test for stack overflow:");
 583   __ cmpld(CCR0/*is_stack_overflow*/, R11_scratch1, mem_stack_limit);
 584   __ blt(CCR0/*is_stack_overflow*/, stack_overflow_return);
 585 
 586 
 587   //=============================================================================
 588   // Frame_size doesn't overflow the stack. Allocate new frame and
 589   // initialize interpreter state.
 590 
 591   // Register state
 592   //
 593   //   R15            - local_count
 594   //   R16            - parameter_count
 595   //   R17            - max_stack
 596   //
 597   //   R18            - frame_size
 598   //   R19            - access_flags
 599   //   CCR4_is_synced - is_synced
 600   //
 601   //   GR_Lstate      - pointer to the uninitialized new BytecodeInterpreter.
 602 
 603   // _last_Java_pc just needs to be close enough that we can identify
 604   // the frame as an interpreted frame. It does not need to be the
 605   // exact return address from either calling
 606   // BytecodeInterpreter::InterpretMethod or the call to a jni native method.
 607   // So we can initialize it here with a value of a bundle in this
 608   // code fragment. We only do this initialization for java frames
 609   // where InterpretMethod needs a a way to get a good pc value to
 610   // store in the thread state. For interpreter frames used to call
 611   // jni native code we just zero the value in the state and move an
 612   // ip as needed in the native entry code.
 613   //
 614   // const Register last_Java_pc_addr     = GR24_SCRATCH;  // QQQ 27
 615   // const Register last_Java_pc          = GR26_SCRATCH;
 616 
 617   // Must reference stack before setting new SP since Windows
 618   // will not be able to deliver the exception on a bad SP.
 619   // Windows also insists that we bang each page one at a time in order
 620   // for the OS to map in the reserved pages. If we bang only
 621   // the final page, Windows stops delivering exceptions to our
 622   // VectoredExceptionHandler and terminates our program.
 623   // Linux only requires a single bang but it's rare to have
 624   // to bang more than 1 page so the code is enabled for both OS's.
 625 
 626   // BANG THE STACK
 627   //
 628   // Nothing to do for PPC, because updating the SP will automatically
 629   // bang the page.
 630 
 631   // Up to here we have calculated the delta for the new C-frame and
 632   // checked for a stack-overflow. Now we can savely update SP and
 633   // resize the C-frame.
 634 
 635   // R14_state has already been calculated.
 636   __ push_interpreter_frame(top_frame_size, parent_frame_resize,
 637                             R25_tmp5, R26_tmp6, R27_tmp7, R28_tmp8);
 638 
 639   }
 640 
 641   //
 642   // Stack layout at this point:
 643   //
 644   //   F0 has been been pushed!
 645   //
 646   //   F0      [TOP_IJAVA_FRAME_ABI]              <-- R1_SP
 647   //           alignment (optional)               (now it's here, if required)
 648   //           [F0's full operand stack]
 649   //           [F0's monitors] (optional)
 650   //           [F0's BytecodeInterpreter object]
 651   //   F1      [PARENT_IJAVA_FRAME_ABI]
 652   //           alignment (optional)               (now it's here, if required)
 653   //           [F0's Java result]
 654   //           [F0's non-arg Java locals]
 655   //           [F1's outgoing Java arguments]
 656   //           ...
 657   //   F2      [PARENT_IJAVA_FRAME_ABI]
 658   //           ...
 659   //
 660   // R14_state points to F0's BytecodeInterpreter object.
 661   //
 662 
 663   }
 664 
 665   //=============================================================================
 666   // new BytecodeInterpreter-object is save, let's initialize it:
 667   BLOCK_COMMENT("New BytecodeInterpreter-object is save.");
 668 
 669   {
 670   // Locals
 671   const Register bytecode_addr = R24_tmp4;
 672   const Register constants     = R25_tmp5;
 673   const Register tos           = R26_tmp6;
 674   const Register stack_base    = R27_tmp7;
 675   const Register local_addr    = R28_tmp8;
 676   {
 677     Label L;
 678     __ btrue(is_native, L);
 679     // if (!is_native) {
 680       // bytecode_addr = constMethod->codes();
 681       __ ld(bytecode_addr, method_(const));
 682       __ addi(bytecode_addr, bytecode_addr, in_bytes(ConstMethod::codes_offset()));
 683     // }
 684     __ bind(L);
 685   }
 686 
 687   __ ld(constants, in_bytes(Method::const_offset()), R19_method);
 688   __ ld(constants, in_bytes(ConstMethod::constants_offset()), constants);
 689 
 690   // state->_prev_link = prev_state;
 691   __ std(R15_prev_state, state_(_prev_link));
 692 
 693   // For assertions only.
 694   // TODO: not needed anyway because it coincides with `_monitor_base'. remove!
 695   // state->_self_link = state;
 696   DEBUG_ONLY(__ std(R14_state, state_(_self_link));)
 697 
 698   // state->_thread = thread;
 699   __ std(R16_thread, state_(_thread));
 700 
 701   // state->_method = method;
 702   __ std(R19_method, state_(_method));
 703 
 704   // state->_locals = locals;
 705   __ std(R18_locals, state_(_locals));
 706 
 707   // state->_oop_temp = NULL;
 708   __ li(R0, 0);
 709   __ std(R0, state_(_oop_temp));
 710 
 711   // state->_last_Java_fp = *R1_SP // Use *R1_SP as fp
 712   __ ld(R0, _abi(callers_sp), R1_SP);
 713   __ std(R0, state_(_last_Java_fp));
 714 
 715   BLOCK_COMMENT("load Stack base:");
 716   {
 717     // Stack_base.
 718     // if (!method->synchronized()) {
 719     //   stack_base = state;
 720     // } else {
 721     //   stack_base = (uintptr_t)state - sizeof(BasicObjectLock);
 722     // }
 723     Label L;
 724     __ mr(stack_base, R14_state);
 725     __ bfalse(is_synced, L);
 726     __ addi(stack_base, stack_base, -frame::interpreter_frame_monitor_size_in_bytes());
 727     __ bind(L);
 728   }
 729 
 730   // state->_mdx = NULL;
 731   __ li(R0, 0);
 732   __ std(R0, state_(_mdx));
 733 
 734   {
 735     // if (method->is_native()) state->_bcp = NULL;
 736     // else state->_bcp = bytecode_addr;
 737     Label label1, label2;
 738     __ bfalse(is_native, label1);
 739     __ std(R0, state_(_bcp));
 740     __ b(label2);
 741     __ bind(label1);
 742     __ std(bytecode_addr, state_(_bcp));
 743     __ bind(label2);
 744   }
 745 
 746 
 747   // state->_result._to_call._callee = NULL;
 748   __ std(R0, state_(_result._to_call._callee));
 749 
 750   // state->_monitor_base = state;
 751   __ std(R14_state, state_(_monitor_base));
 752 
 753   // state->_msg = BytecodeInterpreter::method_entry;
 754   __ li(R0, BytecodeInterpreter::method_entry);
 755   __ stw(R0, state_(_msg));
 756 
 757   // state->_last_Java_sp = R1_SP;
 758   __ std(R1_SP, state_(_last_Java_sp));
 759 
 760   // state->_stack_base = stack_base;
 761   __ std(stack_base, state_(_stack_base));
 762 
 763   // tos = stack_base - 1 slot (prepushed);
 764   // state->_stack.Tos(tos);
 765   __ addi(tos, stack_base, - Interpreter::stackElementSize);
 766   __ std(tos,  state_(_stack));
 767 
 768 
 769   {
 770     BLOCK_COMMENT("get last_Java_pc:");
 771     // if (!is_native) state->_last_Java_pc = <some_ip_in_this_code_buffer>;
 772     // else state->_last_Java_pc = NULL; (just for neatness)
 773     Label label1, label2;
 774     __ btrue(is_native, label1);
 775     __ get_PC_trash_LR(R0);
 776     __ std(R0, state_(_last_Java_pc));
 777     __ b(label2);
 778     __ bind(label1);
 779     __ li(R0, 0);
 780     __ std(R0, state_(_last_Java_pc));
 781     __ bind(label2);
 782   }
 783 
 784 
 785   // stack_limit = tos - max_stack;
 786   __ sub(R0, tos, max_stack);
 787   // state->_stack_limit = stack_limit;
 788   __ std(R0, state_(_stack_limit));
 789 
 790 
 791   // cache = method->constants()->cache();
 792    __ ld(R0, ConstantPool::cache_offset_in_bytes(), constants);
 793   // state->_constants = method->constants()->cache();
 794   __ std(R0, state_(_constants));
 795 
 796 
 797 
 798   //=============================================================================
 799   // synchronized method, allocate and initialize method object lock.
 800   // if (!method->is_synchronized()) goto fill_locals_with_0x0s;
 801   Label fill_locals_with_0x0s;
 802   __ bfalse(is_synced, fill_locals_with_0x0s);
 803 
 804   //   pool_holder = method->constants()->pool_holder();
 805   const int mirror_offset = in_bytes(Klass::java_mirror_offset());
 806   {
 807     Label label1, label2;
 808     // lockee = NULL; for java methods, correct value will be inserted in BytecodeInterpretMethod.hpp
 809     __ li(R0,0);
 810     __ bfalse(is_native, label2);
 811 
 812     __ bfalse(is_static, label1);
 813     // if (method->is_static()) lockee =
 814     // pool_holder->klass_part()->java_mirror();
 815     __ ld(R11_scratch1/*pool_holder*/, ConstantPool::pool_holder_offset_in_bytes(), constants);
 816     __ ld(R0/*lockee*/, mirror_offset, R11_scratch1/*pool_holder*/);
 817     __ b(label2);
 818 
 819     __ bind(label1);
 820     // else lockee = *(oop*)locals;
 821     __ ld(R0/*lockee*/, 0, R18_locals);
 822     __ bind(label2);
 823 
 824     // monitor->set_obj(lockee);
 825     __ std(R0/*lockee*/, BasicObjectLock::obj_offset_in_bytes(), stack_base);
 826   }
 827 
 828   // See if we need to zero the locals
 829   __ BIND(fill_locals_with_0x0s);
 830 
 831 
 832   //=============================================================================
 833   // fill locals with 0x0s
 834   Label locals_zeroed;
 835   __ btrue(is_native, locals_zeroed);
 836 
 837   if (true /* zerolocals */ || ClearInterpreterLocals) {
 838     // local_count is already num_locals_slots - num_param_slots
 839     __ sldi(R0, parameter_count, Interpreter::logStackElementSize);
 840     __ sub(local_addr, R18_locals, R0);
 841     __ cmpdi(CCR0, local_count, 0);
 842     __ ble(CCR0, locals_zeroed);
 843 
 844     __ mtctr(local_count);
 845     //__ ld_const_addr(R0, (address) 0xcafe0000babe);
 846     __ li(R0, 0);
 847 
 848     Label zero_slot;
 849     __ bind(zero_slot);
 850 
 851     // first local is at local_addr
 852     __ std(R0, 0, local_addr);
 853     __ addi(local_addr, local_addr, -BytesPerWord);
 854     __ bdnz(zero_slot);
 855   }
 856 
 857    __ BIND(locals_zeroed);
 858 
 859   }
 860   BLOCK_COMMENT("} compute_interpreter_state");
 861 }
 862 
 863 // Generate code to initiate compilation on invocation counter overflow.
 864 void CppInterpreterGenerator::generate_counter_overflow(Label& continue_entry) {
 865   // Registers alive
 866   //   R14_state
 867   //   R16_thread
 868   //
 869   // Registers updated
 870   //   R14_state
 871   //   R3_ARG1 (=R3_RET)
 872   //   R4_ARG2
 873 
 874   // After entering the vm we remove the activation and retry the
 875   // entry point in case the compilation is complete.
 876 
 877   // InterpreterRuntime::frequency_counter_overflow takes one argument
 878   // that indicates if the counter overflow occurs at a backwards
 879   // branch (NULL bcp). We pass zero. The call returns the address
 880   // of the verified entry point for the method or NULL if the
 881   // compilation did not complete (either went background or bailed
 882   // out).
 883   __ li(R4_ARG2, 0);
 884 
 885   // Pass false to call_VM so it doesn't check for pending exceptions,
 886   // since at this point in the method invocation the exception
 887   // handler would try to exit the monitor of synchronized methods
 888   // which haven't been entered yet.
 889   //
 890   // Returns verified_entry_point or NULL, we don't care which.
 891   //
 892   // Do not use the variant `frequency_counter_overflow' that returns
 893   // a structure, because this will change the argument list by a
 894   // hidden parameter (gcc 4.1).
 895 
 896   __ call_VM(noreg,
 897              CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow),
 898              R4_ARG2,
 899              false);
 900   // Returns verified_entry_point or NULL, we don't care which as we ignore it
 901   // and run interpreted.
 902 
 903   // Reload method, it may have moved.
 904   __ ld(R19_method, state_(_method));
 905 
 906   // We jump now to the label "continue_after_compile".
 907   __ b(continue_entry);
 908 }
 909 
 910 // Increment invocation count and check for overflow.
 911 //
 912 // R19_method must contain Method* of method to profile.
 913 void CppInterpreterGenerator::generate_counter_incr(Label& overflow) {
 914   Label done;
 915   const Register Rcounters             = R12_scratch2;
 916   const Register iv_be_count           = R11_scratch1;
 917   const Register invocation_limit      = R12_scratch2;
 918   const Register invocation_limit_addr = invocation_limit;
 919 
 920   // Load and ev. allocate MethodCounters object.
 921   __ get_method_counters(R19_method, Rcounters, done);
 922 
 923   // Update standard invocation counters.
 924   __ increment_invocation_counter(Rcounters, iv_be_count, R0);
 925 
 926   // Compare against limit.
 927   BLOCK_COMMENT("Compare counter against limit:");
 928   assert(4 == sizeof(InvocationCounter::InterpreterInvocationLimit),
 929          "must be 4 bytes");
 930   __ load_const(invocation_limit_addr, (address)&InvocationCounter::InterpreterInvocationLimit);
 931   __ lwa(invocation_limit, 0, invocation_limit_addr);
 932   __ cmpw(CCR0, iv_be_count, invocation_limit);
 933   __ bge(CCR0, overflow);
 934   __ bind(done);
 935 }
 936 
 937 //
 938 // Call a JNI method.
 939 //
 940 // Interpreter stub for calling a native method. (C++ interpreter)
 941 // This sets up a somewhat different looking stack for calling the native method
 942 // than the typical interpreter frame setup.
 943 //
 944 address CppInterpreterGenerator::generate_native_entry(void) {
 945   if (native_entry != NULL) return native_entry;
 946   address entry = __ pc();
 947 
 948   // Read
 949   //   R16_thread
 950   //   R15_prev_state  - address of caller's BytecodeInterpreter, if this snippet
 951   //                     gets called by the frame manager.
 952   //   R19_method      - callee's Method
 953   //   R17_tos         - address of caller's tos
 954   //   R1_SP           - caller's stack pointer
 955   //   R21_sender_SP   - initial caller sp
 956   //
 957   // Update
 958   //   R14_state       - address of caller's BytecodeInterpreter
 959   //   R3_RET          - integer result, if any.
 960   //   F1_RET          - float result, if any.
 961   //
 962   //
 963   // Stack layout at this point:
 964   //
 965   //    0       [TOP_IJAVA_FRAME_ABI]         <-- R1_SP
 966   //            alignment (optional)
 967   //            [outgoing Java arguments]     <-- R17_tos
 968   //            ...
 969   //    PARENT  [PARENT_IJAVA_FRAME_ABI]
 970   //            ...
 971   //
 972 
 973   const bool inc_counter = UseCompiler || CountCompiledCalls;
 974 
 975   const Register signature_handler_fd   = R21_tmp1;
 976   const Register pending_exception      = R22_tmp2;
 977   const Register result_handler_addr    = R23_tmp3;
 978   const Register native_method_fd       = R24_tmp4;
 979   const Register access_flags           = R25_tmp5;
 980   const Register active_handles         = R26_tmp6;
 981   const Register sync_state             = R27_tmp7;
 982   const Register sync_state_addr        = sync_state;     // Address is dead after use.
 983   const Register suspend_flags          = R24_tmp4;
 984 
 985   const Register return_pc              = R28_tmp8;       // Register will be locked for some time.
 986 
 987   const ConditionRegister is_synced     = CCR4_is_synced; // Live-on-exit from compute_interpreter_state.
 988 
 989 
 990   // R1_SP still points to caller's SP at this point.
 991 
 992   // Save initial_caller_sp to caller's abi. The caller frame must be
 993   // resized before returning to get rid of the c2i arguments (if
 994   // any).
 995   // Override the saved SP with the senderSP so we can pop c2i
 996   // arguments (if any) off when we return
 997   __ std(R21_sender_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
 998 
 999   // Save LR to caller's frame. We don't use _abi(lr) here, because it is not safe.
1000   __ mflr(return_pc);
1001   __ std(return_pc, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
1002 
1003   assert(return_pc->is_nonvolatile(), "return_pc must be a non-volatile register");
1004 
1005   __ verify_method_ptr(R19_method);
1006 
1007   //=============================================================================
1008 
1009   // If this snippet gets called by the frame manager (at label
1010   // `call_special'), then R15_prev_state is valid. If this snippet
1011   // is not called by the frame manager, but e.g. by the call stub or
1012   // by compiled code, then R15_prev_state is invalid.
1013   {
1014     // Set R15_prev_state to 0 if we don't return to the frame
1015     // manager; we will return to the call_stub or to compiled code
1016     // instead. If R15_prev_state is 0 there will be only one
1017     // interpreter frame (we will set this up later) in this C frame!
1018     // So we must take care about retrieving prev_state_(_prev_link)
1019     // and restoring R1_SP when popping that interpreter.
1020     Label prev_state_is_valid;
1021 
1022     __ load_const(R11_scratch1/*frame_manager_returnpc_addr*/, (address)&frame_manager_specialized_return);
1023     __ ld(R12_scratch2/*frame_manager_returnpc*/, 0, R11_scratch1/*frame_manager_returnpc_addr*/);
1024     __ cmpd(CCR0, return_pc, R12_scratch2/*frame_manager_returnpc*/);
1025     __ beq(CCR0, prev_state_is_valid);
1026 
1027     __ li(R15_prev_state, 0);
1028 
1029     __ BIND(prev_state_is_valid);
1030   }
1031 
1032   //=============================================================================
1033   // Allocate new frame and initialize interpreter state.
1034 
1035   Label exception_return;
1036   Label exception_return_sync_check;
1037   Label stack_overflow_return;
1038 
1039   // Generate new interpreter state and jump to stack_overflow_return in case of
1040   // a stack overflow.
1041   generate_compute_interpreter_state(stack_overflow_return);
1042 
1043   //=============================================================================
1044   // Increment invocation counter. On overflow, entry to JNI method
1045   // will be compiled.
1046   Label invocation_counter_overflow;
1047   if (inc_counter) {
1048     generate_counter_incr(invocation_counter_overflow);
1049   }
1050 
1051   Label continue_after_compile;
1052   __ BIND(continue_after_compile);
1053 
1054   // access_flags = method->access_flags();
1055   // Load access flags.
1056   assert(access_flags->is_nonvolatile(),
1057          "access_flags must be in a non-volatile register");
1058   // Type check.
1059   // TODO: PPC port: assert(4 == methodOopDesc::sz_access_flags(), "unexpected field size");
1060   __ lwz(access_flags, method_(access_flags));
1061 
1062   // We don't want to reload R19_method and access_flags after calls
1063   // to some helper functions.
1064   assert(R19_method->is_nonvolatile(), "R19_method must be a non-volatile register");
1065 
1066   // Check for synchronized methods. Must happen AFTER invocation counter
1067   // check, so method is not locked if counter overflows.
1068 
1069   {
1070     Label method_is_not_synced;
1071     // Is_synced is still alive.
1072     assert(is_synced->is_nonvolatile(), "is_synced must be non-volatile");
1073     __ bfalse(is_synced, method_is_not_synced);
1074 
1075     lock_method();
1076     // Reload method, it may have moved.
1077     __ ld(R19_method, state_(_method));
1078 
1079     __ BIND(method_is_not_synced);
1080   }
1081 
1082   // jvmti/jvmpi support
1083   __ notify_method_entry();
1084 
1085   // Reload method, it may have moved.
1086   __ ld(R19_method, state_(_method));
1087 
1088   //=============================================================================
1089   // Get and call the signature handler
1090 
1091   __ ld(signature_handler_fd, method_(signature_handler));
1092   Label call_signature_handler;
1093 
1094   __ cmpdi(CCR0, signature_handler_fd, 0);
1095   __ bne(CCR0, call_signature_handler);
1096 
1097   // Method has never been called. Either generate a specialized
1098   // handler or point to the slow one.
1099   //
1100   // Pass parameter 'false' to avoid exception check in call_VM.
1101   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R19_method, false);
1102 
1103   // Check for an exception while looking up the target method. If we
1104   // incurred one, bail.
1105   __ ld(pending_exception, thread_(pending_exception));
1106   __ cmpdi(CCR0, pending_exception, 0);
1107   __ bne(CCR0, exception_return_sync_check); // has pending exception
1108 
1109   // reload method
1110   __ ld(R19_method, state_(_method));
1111 
1112   // Reload signature handler, it may have been created/assigned in the meanwhile
1113   __ ld(signature_handler_fd, method_(signature_handler));
1114 
1115   __ BIND(call_signature_handler);
1116 
1117   // Before we call the signature handler we push a new frame to
1118   // protect the interpreter frame volatile registers when we return
1119   // from jni but before we can get back to Java.
1120 
1121   // First set the frame anchor while the SP/FP registers are
1122   // convenient and the slow signature handler can use this same frame
1123   // anchor.
1124 
1125   // We have a TOP_IJAVA_FRAME here, which belongs to us.
1126   __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/);
1127 
1128   // Now the interpreter frame (and its call chain) have been
1129   // invalidated and flushed. We are now protected against eager
1130   // being enabled in native code. Even if it goes eager the
1131   // registers will be reloaded as clean and we will invalidate after
1132   // the call so no spurious flush should be possible.
1133 
1134   // Call signature handler and pass locals address.
1135   //
1136   // Our signature handlers copy required arguments to the C stack
1137   // (outgoing C args), R3_ARG1 to R10_ARG8, and F1_ARG1 to
1138   // F13_ARG13.
1139   __ mr(R3_ARG1, R18_locals);
1140   __ ld(signature_handler_fd, 0, signature_handler_fd);
1141   __ call_stub(signature_handler_fd);
1142   // reload method
1143   __ ld(R19_method, state_(_method));
1144 
1145   // Remove the register parameter varargs slots we allocated in
1146   // compute_interpreter_state. SP+16 ends up pointing to the ABI
1147   // outgoing argument area.
1148   //
1149   // Not needed on PPC64.
1150   //__ add(SP, SP, Argument::n_register_parameters*BytesPerWord);
1151 
1152   assert(result_handler_addr->is_nonvolatile(), "result_handler_addr must be in a non-volatile register");
1153   // Save across call to native method.
1154   __ mr(result_handler_addr, R3_RET);
1155 
1156   // Set up fixed parameters and call the native method.
1157   // If the method is static, get mirror into R4_ARG2.
1158 
1159   {
1160     Label method_is_not_static;
1161     // access_flags is non-volatile and still, no need to restore it
1162 
1163     // restore access flags
1164     __ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT);
1165     __ bfalse(CCR0, method_is_not_static);
1166 
1167     // constants = method->constants();
1168     __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
1169     __ ld(R11_scratch1/*constants*/, in_bytes(ConstMethod::constants_offset()), R11_scratch1);
1170     // pool_holder = method->constants()->pool_holder();
1171     __ ld(R11_scratch1/*pool_holder*/, ConstantPool::pool_holder_offset_in_bytes(),
1172           R11_scratch1/*constants*/);
1173 
1174     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
1175 
1176     // mirror = pool_holder->klass_part()->java_mirror();
1177     __ ld(R0/*mirror*/, mirror_offset, R11_scratch1/*pool_holder*/);
1178     // state->_native_mirror = mirror;
1179     __ std(R0/*mirror*/, state_(_oop_temp));
1180     // R4_ARG2 = &state->_oop_temp;
1181     __ addir(R4_ARG2, state_(_oop_temp));
1182 
1183     __ BIND(method_is_not_static);
1184   }
1185 
1186   // At this point, arguments have been copied off the stack into
1187   // their JNI positions. Oops are boxed in-place on the stack, with
1188   // handles copied to arguments. The result handler address is in a
1189   // register.
1190 
1191   // pass JNIEnv address as first parameter
1192   __ addir(R3_ARG1, thread_(jni_environment));
1193 
1194   // Load the native_method entry before we change the thread state.
1195   __ ld(native_method_fd, method_(native_function));
1196 
1197   //=============================================================================
1198   // Transition from _thread_in_Java to _thread_in_native. As soon as
1199   // we make this change the safepoint code needs to be certain that
1200   // the last Java frame we established is good. The pc in that frame
1201   // just needs to be near here not an actual return address.
1202 
1203   // We use release_store_fence to update values like the thread state, where
1204   // we don't want the current thread to continue until all our prior memory
1205   // accesses (including the new thread state) are visible to other threads.
1206   __ li(R0, _thread_in_native);
1207   __ release();
1208 
1209   // TODO: PPC port: assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
1210   __ stw(R0, thread_(thread_state));
1211 
1212   if (UseMembar) {
1213     __ fence();
1214   }
1215 
1216   //=============================================================================
1217   // Call the native method. Argument registers must not have been
1218   // overwritten since "__ call_stub(signature_handler);" (except for
1219   // ARG1 and ARG2 for static methods)
1220   __ call_c(native_method_fd);
1221 
1222   __ std(R3_RET, state_(_native_lresult));
1223   __ stfd(F1_RET, state_(_native_fresult));
1224 
1225   // The frame_manager_lr field, which we use for setting the last
1226   // java frame, gets overwritten by the signature handler. Restore
1227   // it now.
1228   __ get_PC_trash_LR(R11_scratch1);
1229   __ std(R11_scratch1, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
1230 
1231   // Because of GC R19_method may no longer be valid.
1232 
1233   // Block, if necessary, before resuming in _thread_in_Java state.
1234   // In order for GC to work, don't clear the last_Java_sp until after
1235   // blocking.
1236 
1237 
1238 
1239   //=============================================================================
1240   // Switch thread to "native transition" state before reading the
1241   // synchronization state.  This additional state is necessary
1242   // because reading and testing the synchronization state is not
1243   // atomic w.r.t. GC, as this scenario demonstrates: Java thread A,
1244   // in _thread_in_native state, loads _not_synchronized and is
1245   // preempted.  VM thread changes sync state to synchronizing and
1246   // suspends threads for GC. Thread A is resumed to finish this
1247   // native method, but doesn't block here since it didn't see any
1248   // synchronization in progress, and escapes.
1249 
1250   // We use release_store_fence to update values like the thread state, where
1251   // we don't want the current thread to continue until all our prior memory
1252   // accesses (including the new thread state) are visible to other threads.
1253   __ li(R0/*thread_state*/, _thread_in_native_trans);
1254   __ release();
1255   __ stw(R0/*thread_state*/, thread_(thread_state));
1256   if (UseMembar) {
1257     __ fence();
1258   }
1259   // Write serialization page so that the VM thread can do a pseudo remote
1260   // membar. We use the current thread pointer to calculate a thread
1261   // specific offset to write to within the page. This minimizes bus
1262   // traffic due to cache line collision.
1263   else {
1264     __ serialize_memory(R16_thread, R11_scratch1, R12_scratch2);
1265   }
1266 
1267   // Now before we return to java we must look for a current safepoint
1268   // (a new safepoint can not start since we entered native_trans).
1269   // We must check here because a current safepoint could be modifying
1270   // the callers registers right this moment.
1271 
1272   // Acquire isn't strictly necessary here because of the fence, but
1273   // sync_state is declared to be volatile, so we do it anyway.
1274   __ load_const(sync_state_addr, SafepointSynchronize::address_of_state());
1275 
1276   // TODO: PPC port: assert(4 == SafepointSynchronize::sz_state(), "unexpected field size");
1277   __ lwz(sync_state, 0, sync_state_addr);
1278 
1279   // TODO: PPC port: assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
1280   __ lwz(suspend_flags, thread_(suspend_flags));
1281 
1282   __ acquire();
1283 
1284   Label sync_check_done;
1285   Label do_safepoint;
1286   // No synchronization in progress nor yet synchronized
1287   __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
1288   // not suspended
1289   __ cmpwi(CCR1, suspend_flags, 0);
1290 
1291   __ bne(CCR0, do_safepoint);
1292   __ beq(CCR1, sync_check_done);
1293   __ bind(do_safepoint);
1294   // Block.  We do the call directly and leave the current
1295   // last_Java_frame setup undisturbed.  We must save any possible
1296   // native result acrosss the call. No oop is present
1297 
1298   __ mr(R3_ARG1, R16_thread);
1299   __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans),
1300             relocInfo::none);
1301   __ bind(sync_check_done);
1302 
1303   //=============================================================================
1304   // <<<<<< Back in Interpreter Frame >>>>>
1305 
1306   // We are in thread_in_native_trans here and back in the normal
1307   // interpreter frame. We don't have to do anything special about
1308   // safepoints and we can switch to Java mode anytime we are ready.
1309 
1310   // Note: frame::interpreter_frame_result has a dependency on how the
1311   // method result is saved across the call to post_method_exit. For
1312   // native methods it assumes that the non-FPU/non-void result is
1313   // saved in _native_lresult and a FPU result in _native_fresult. If
1314   // this changes then the interpreter_frame_result implementation
1315   // will need to be updated too.
1316 
1317   // On PPC64, we have stored the result directly after the native call.
1318 
1319   //=============================================================================
1320   // back in Java
1321 
1322   // We use release_store_fence to update values like the thread state, where
1323   // we don't want the current thread to continue until all our prior memory
1324   // accesses (including the new thread state) are visible to other threads.
1325   __ li(R0/*thread_state*/, _thread_in_Java);
1326   __ release();
1327   __ stw(R0/*thread_state*/, thread_(thread_state));
1328   if (UseMembar) {
1329     __ fence();
1330   }
1331 
1332   __ reset_last_Java_frame();
1333 
1334   // Reload GR27_method, call killed it. We can't look at
1335   // state->_method until we're back in java state because in java
1336   // state gc can't happen until we get to a safepoint.
1337   //
1338   // We've set thread_state to _thread_in_Java already, so restoring
1339   // R19_method from R14_state works; R19_method is invalid, because
1340   // GC may have happened.
1341   __ ld(R19_method, state_(_method)); // reload method, may have moved
1342 
1343   // jvmdi/jvmpi support. Whether we've got an exception pending or
1344   // not, and whether unlocking throws an exception or not, we notify
1345   // on native method exit. If we do have an exception, we'll end up
1346   // in the caller's context to handle it, so if we don't do the
1347   // notify here, we'll drop it on the floor.
1348 
1349   __ notify_method_exit(true/*native method*/,
1350                         ilgl /*illegal state (not used for native methods)*/);
1351 
1352 
1353 
1354   //=============================================================================
1355   // Handle exceptions
1356 
1357   // See if we must unlock.
1358   //
1359   {
1360     Label method_is_not_synced;
1361     // is_synced is still alive
1362     assert(is_synced->is_nonvolatile(), "is_synced must be non-volatile");
1363     __ bfalse(is_synced, method_is_not_synced);
1364 
1365     unlock_method();
1366 
1367     __ bind(method_is_not_synced);
1368   }
1369 
1370   // Reset active handles after returning from native.
1371   // thread->active_handles()->clear();
1372   __ ld(active_handles, thread_(active_handles));
1373   // JNIHandleBlock::_top is an int.
1374   // TODO:  PPC port: assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
1375   __ li(R0, 0);
1376   __ stw(R0, JNIHandleBlock::top_offset_in_bytes(), active_handles);
1377 
1378   Label no_pending_exception_from_native_method;
1379   __ ld(R0/*pending_exception*/, thread_(pending_exception));
1380   __ cmpdi(CCR0, R0/*pending_exception*/, 0);
1381   __ beq(CCR0, no_pending_exception_from_native_method);
1382 
1383 
1384   //-----------------------------------------------------------------------------
1385   // An exception is pending. We call into the runtime only if the
1386   // caller was not interpreted. If it was interpreted the
1387   // interpreter will do the correct thing. If it isn't interpreted
1388   // (call stub/compiled code) we will change our return and continue.
1389   __ BIND(exception_return);
1390 
1391   Label return_to_initial_caller_with_pending_exception;
1392   __ cmpdi(CCR0, R15_prev_state, 0);
1393   __ beq(CCR0, return_to_initial_caller_with_pending_exception);
1394 
1395   // We are returning to an interpreter activation, just pop the state,
1396   // pop our frame, leave the exception pending, and return.
1397   __ pop_interpreter_state(/*prev_state_may_be_0=*/false);
1398   __ pop_interpreter_frame(R11_scratch1, R12_scratch2, R21_tmp1 /* set to return pc */, R22_tmp2);
1399   __ mtlr(R21_tmp1);
1400   __ blr();
1401 
1402   __ BIND(exception_return_sync_check);
1403 
1404   assert(is_synced->is_nonvolatile(), "is_synced must be non-volatile");
1405   __ bfalse(is_synced, exception_return);
1406   unlock_method();
1407   __ b(exception_return);
1408 
1409 
1410   __ BIND(return_to_initial_caller_with_pending_exception);
1411   // We are returning to a c2i-adapter / call-stub, get the address of the
1412   // exception handler, pop the frame and return to the handler.
1413 
1414   // First, pop to caller's frame.
1415   __ pop_interpreter_frame(R11_scratch1, R12_scratch2, R21_tmp1  /* set to return pc */, R22_tmp2);
1416 
1417   __ push_frame_abi112(0, R11_scratch1);
1418   // Get the address of the exception handler.
1419   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
1420                   R16_thread,
1421                   R21_tmp1 /* return pc */);
1422   __ pop_frame();
1423 
1424   // Load the PC of the the exception handler into LR.
1425   __ mtlr(R3_RET);
1426 
1427   // Load exception into R3_ARG1 and clear pending exception in thread.
1428   __ ld(R3_ARG1/*exception*/, thread_(pending_exception));
1429   __ li(R4_ARG2, 0);
1430   __ std(R4_ARG2, thread_(pending_exception));
1431 
1432   // Load the original return pc into R4_ARG2.
1433   __ mr(R4_ARG2/*issuing_pc*/, R21_tmp1);
1434 
1435   // Resize frame to get rid of a potential extension.
1436   __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
1437 
1438   // Return to exception handler.
1439   __ blr();
1440 
1441 
1442   //-----------------------------------------------------------------------------
1443   // No exception pending.
1444   __ BIND(no_pending_exception_from_native_method);
1445 
1446   // Move native method result back into proper registers and return.
1447   // Invoke result handler (may unbox/promote).
1448   __ ld(R3_RET, state_(_native_lresult));
1449   __ lfd(F1_RET, state_(_native_fresult));
1450   __ call_stub(result_handler_addr);
1451 
1452   // We have created a new BytecodeInterpreter object, now we must destroy it.
1453   //
1454   // Restore previous R14_state and caller's SP.  R15_prev_state may
1455   // be 0 here, because our caller may be the call_stub or compiled
1456   // code.
1457   __ pop_interpreter_state(/*prev_state_may_be_0=*/true);
1458   __ pop_interpreter_frame(R11_scratch1, R12_scratch2, R21_tmp1 /* set to return pc */, R22_tmp2);
1459   // Resize frame to get rid of a potential extension.
1460   __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
1461 
1462   // Must use the return pc which was loaded from the caller's frame
1463   // as the VM uses return-pc-patching for deoptimization.
1464   __ mtlr(R21_tmp1);
1465   __ blr();
1466 
1467 
1468 
1469   //=============================================================================
1470   // We encountered an exception while computing the interpreter
1471   // state, so R14_state isn't valid. Act as if we just returned from
1472   // the callee method with a pending exception.
1473   __ BIND(stack_overflow_return);
1474 
1475   //
1476   // Register state:
1477   //   R14_state         invalid; trashed by compute_interpreter_state
1478   //   R15_prev_state    valid, but may be 0
1479   //
1480   //   R1_SP             valid, points to caller's SP; wasn't yet updated by
1481   //                     compute_interpreter_state
1482   //
1483 
1484   // Create exception oop and make it pending.
1485 
1486   // Throw the exception via RuntimeStub "throw_StackOverflowError_entry".
1487   //
1488   // Previously, we called C-Code directly. As a consequence, a
1489   // possible GC tried to process the argument oops of the top frame
1490   // (see RegisterMap::clear, which sets the corresponding flag to
1491   // true). This lead to crashes because:
1492   //   1. The top register map did not contain locations for the argument registers
1493   //   2. The arguments are dead anyway, could be already overwritten in the worst case
1494   // Solution: Call via special runtime stub that pushes it's own
1495   // frame. This runtime stub has the flag "CodeBlob::caller_must_gc_arguments()"
1496   // set to "false", what prevents the dead arguments getting GC'd.
1497   //
1498   // 2 cases exist:
1499   // 1. We were called by the c2i adapter / call stub
1500   // 2. We were called by the frame manager
1501   //
1502   // Both cases are handled by this code:
1503   // 1. - initial_caller_sp was saved in both cases on entry, so it's safe to load it back even if it was not changed.
1504   //    - control flow will be:
1505   //      throw_stackoverflow_stub->VM->throw_stackoverflow_stub->forward_excep->excp_blob of caller method
1506   // 2. - control flow will be:
1507   //      throw_stackoverflow_stub->VM->throw_stackoverflow_stub->forward_excep->rethrow_excp_entry of frame manager->resume_method
1508   //      Since we restored the caller SP above, the rethrow_excp_entry can restore the original interpreter state
1509   //      registers using the stack and resume the calling method with a pending excp.
1510 
1511   // Pop any c2i extension from the stack, restore LR just to be sure
1512   __ ld(R0, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
1513   __ mtlr(R0);
1514   // Resize frame to get rid of a potential extension.
1515   __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
1516 
1517   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order");
1518   // Load target address of the runtime stub.
1519   __ load_const(R12_scratch2, (StubRoutines::throw_StackOverflowError_entry()));
1520   __ mtctr(R12_scratch2);
1521   __ bctr();
1522 
1523 
1524   //=============================================================================
1525   // Counter overflow.
1526 
1527   if (inc_counter) {
1528     // Handle invocation counter overflow
1529     __ bind(invocation_counter_overflow);
1530 
1531     generate_counter_overflow(continue_after_compile);
1532   }
1533 
1534   native_entry = entry;
1535   return entry;
1536 }
1537 
1538 bool AbstractInterpreter::can_be_compiled(methodHandle m) {
1539   // No special entry points that preclude compilation.
1540   return true;
1541 }
1542 
1543 // Unlock the current method.
1544 //
1545 void CppInterpreterGenerator::unlock_method(void) {
1546   // Find preallocated monitor and unlock method. Method monitor is
1547   // the first one.
1548 
1549   // Registers alive
1550   //   R14_state
1551   //
1552   // Registers updated
1553   //   volatiles
1554   //
1555   const Register monitor = R4_ARG2;
1556 
1557   // Pass address of initial monitor we allocated.
1558   //
1559   // First monitor.
1560   __ addi(monitor, R14_state, -frame::interpreter_frame_monitor_size_in_bytes());
1561 
1562   // Unlock method
1563   __ unlock_object(monitor);
1564 }
1565 
1566 // Lock the current method.
1567 //
1568 void CppInterpreterGenerator::lock_method(void) {
1569   // Find preallocated monitor and lock method. Method monitor is the
1570   // first one.
1571 
1572   //
1573   // Registers alive
1574   //   R14_state
1575   //
1576   // Registers updated
1577   //   volatiles
1578   //
1579 
1580   const Register monitor = R4_ARG2;
1581   const Register object  = R5_ARG3;
1582 
1583   // Pass address of initial monitor we allocated.
1584   __ addi(monitor, R14_state, -frame::interpreter_frame_monitor_size_in_bytes());
1585 
1586   // Pass object address.
1587   __ ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor);
1588 
1589   // Lock method.
1590   __ lock_object(monitor, object);
1591 }
1592 
1593 // Generate code for handling resuming a deopted method.
1594 void CppInterpreterGenerator::generate_deopt_handling(Register result_index) {
1595 
1596   //=============================================================================
1597   // Returning from a compiled method into a deopted method. The
1598   // bytecode at the bcp has completed. The result of the bytecode is
1599   // in the native abi (the tosca for the template based
1600   // interpreter). Any stack space that was used by the bytecode that
1601   // has completed has been removed (e.g. parameters for an invoke) so
1602   // all that we have to do is place any pending result on the
1603   // expression stack and resume execution on the next bytecode.
1604 
1605   Label return_from_deopt_common;
1606 
1607   // R3_RET and F1_RET are live here! Load the array index of the
1608   // required result stub address and continue at return_from_deopt_common.
1609 
1610   // Deopt needs to jump to here to enter the interpreter (return a result).
1611   deopt_frame_manager_return_atos = __ pc();
1612   __ li(result_index, AbstractInterpreter::BasicType_as_index(T_OBJECT));
1613   __ b(return_from_deopt_common);
1614 
1615   deopt_frame_manager_return_btos = __ pc();
1616   __ li(result_index, AbstractInterpreter::BasicType_as_index(T_BOOLEAN));
1617   __ b(return_from_deopt_common);
1618 
1619   deopt_frame_manager_return_itos = __ pc();
1620   __ li(result_index, AbstractInterpreter::BasicType_as_index(T_INT));
1621   __ b(return_from_deopt_common);
1622 
1623   deopt_frame_manager_return_ltos = __ pc();
1624   __ li(result_index, AbstractInterpreter::BasicType_as_index(T_LONG));
1625   __ b(return_from_deopt_common);
1626 
1627   deopt_frame_manager_return_ftos = __ pc();
1628   __ li(result_index, AbstractInterpreter::BasicType_as_index(T_FLOAT));
1629   __ b(return_from_deopt_common);
1630 
1631   deopt_frame_manager_return_dtos = __ pc();
1632   __ li(result_index, AbstractInterpreter::BasicType_as_index(T_DOUBLE));
1633   __ b(return_from_deopt_common);
1634 
1635   deopt_frame_manager_return_vtos = __ pc();
1636   __ li(result_index, AbstractInterpreter::BasicType_as_index(T_VOID));
1637   // Last one, fall-through to return_from_deopt_common.
1638 
1639   // Deopt return common. An index is present that lets us move any
1640   // possible result being return to the interpreter's stack.
1641   //
1642   __ BIND(return_from_deopt_common);
1643 
1644 }
1645 
1646 // Generate the code to handle a more_monitors message from the c++ interpreter.
1647 void CppInterpreterGenerator::generate_more_monitors() {
1648 
1649   //
1650   // Registers alive
1651   //   R16_thread      - JavaThread*
1652   //   R15_prev_state  - previous BytecodeInterpreter or 0
1653   //   R14_state       - BytecodeInterpreter* address of receiver's interpreter state
1654   //   R1_SP           - old stack pointer
1655   //
1656   // Registers updated
1657   //   R1_SP          - new stack pointer
1658   //
1659 
1660   // Very-local scratch registers.
1661   const Register old_tos         = R21_tmp1;
1662   const Register new_tos         = R22_tmp2;
1663   const Register stack_base      = R23_tmp3;
1664   const Register stack_limit     = R24_tmp4;
1665   const Register slot            = R25_tmp5;
1666   const Register n_slots         = R25_tmp5;
1667 
1668   // Interpreter state fields.
1669   const Register msg             = R24_tmp4;
1670 
1671   // Load up relevant interpreter state.
1672 
1673   __ ld(stack_base, state_(_stack_base));                // Old stack_base
1674   __ ld(old_tos, state_(_stack));                        // Old tos
1675   __ ld(stack_limit, state_(_stack_limit));              // Old stack_limit
1676 
1677   // extracted monitor_size
1678   int monitor_size = frame::interpreter_frame_monitor_size_in_bytes();
1679   assert(Assembler::is_aligned((unsigned int)monitor_size,
1680                                (unsigned int)frame::alignment_in_bytes),
1681          "size of a monitor must respect alignment of SP");
1682 
1683   // Save and restore top LR
1684   __ ld(R12_scratch2, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
1685   __ resize_frame(-monitor_size, R11_scratch1);// Allocate space for new monitor
1686   __ std(R12_scratch2, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
1687     // Initial_caller_sp is used as unextended_sp for non initial callers.
1688   __ std(R1_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
1689   __ addi(stack_base, stack_base, -monitor_size);        // New stack_base
1690   __ addi(new_tos, old_tos, -monitor_size);              // New tos
1691   __ addi(stack_limit, stack_limit, -monitor_size);      // New stack_limit
1692 
1693   __ std(R1_SP, state_(_last_Java_sp));                  // Update frame_bottom
1694 
1695   __ std(stack_base, state_(_stack_base));               // Update stack_base
1696   __ std(new_tos, state_(_stack));                       // Update tos
1697   __ std(stack_limit, state_(_stack_limit));             // Update stack_limit
1698 
1699   __ li(msg, BytecodeInterpreter::got_monitors);         // Tell interpreter we allocated the lock
1700   __ stw(msg, state_(_msg));
1701 
1702   // Shuffle expression stack down. Recall that stack_base points
1703   // just above the new expression stack bottom. Old_tos and new_tos
1704   // are used to scan thru the old and new expression stacks.
1705 
1706   Label copy_slot, copy_slot_finished;
1707   __ sub(n_slots, stack_base, new_tos);
1708   __ srdi_(n_slots, n_slots, LogBytesPerWord);           // compute number of slots to copy
1709   assert(LogBytesPerWord == 3, "conflicts assembler instructions");
1710   __ beq(CCR0, copy_slot_finished);                       // nothing to copy
1711 
1712   __ mtctr(n_slots);
1713 
1714   // loop
1715   __ bind(copy_slot);
1716   __ ldu(slot, BytesPerWord, old_tos);                   // slot = *++old_tos;
1717   __ stdu(slot, BytesPerWord, new_tos);                  // *++new_tos = slot;
1718   __ bdnz(copy_slot);
1719 
1720   __ bind(copy_slot_finished);
1721 
1722   // Restart interpreter
1723   __ li(R0, 0);
1724   __ std(R0, BasicObjectLock::obj_offset_in_bytes(), stack_base);  // Mark lock as unused
1725 }
1726 
1727 address CppInterpreterGenerator::generate_normal_entry(void) {
1728   if (interpreter_frame_manager != NULL) return interpreter_frame_manager;
1729 
1730   address entry = __ pc();
1731 
1732   address return_from_native_pc = (address) NULL;
1733 
1734   // Initial entry to frame manager (from call_stub or c2i_adapter)
1735 
1736   //
1737   // Registers alive
1738   //   R16_thread               - JavaThread*
1739   //   R19_method               - callee's Method (method to be invoked)
1740   //   R17_tos                  - address of sender tos (prepushed)
1741   //   R1_SP                    - SP prepared by call stub such that caller's outgoing args are near top
1742   //   LR                       - return address to caller (call_stub or c2i_adapter)
1743   //   R21_sender_SP            - initial caller sp
1744   //
1745   // Registers updated
1746   //   R15_prev_state           - 0
1747   //
1748   // Stack layout at this point:
1749   //
1750   //   0       [TOP_IJAVA_FRAME_ABI]         <-- R1_SP
1751   //           alignment (optional)
1752   //           [outgoing Java arguments]     <-- R17_tos
1753   //           ...
1754   //   PARENT  [PARENT_IJAVA_FRAME_ABI]
1755   //           ...
1756   //
1757 
1758   // Save initial_caller_sp to caller's abi.
1759   // The caller frame must be resized before returning to get rid of
1760   // the c2i part on top of the calling compiled frame (if any).
1761   // R21_tmp1 must match sender_sp in gen_c2i_adapter.
1762   // Now override the saved SP with the senderSP so we can pop c2i
1763   // arguments (if any) off when we return.
1764   __ std(R21_sender_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
1765 
1766   // Save LR to caller's frame. We don't use _abi(lr) here,
1767   // because it is not safe.
1768   __ mflr(R0);
1769   __ std(R0, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
1770 
1771   // If we come here, it is the first invocation of the frame manager.
1772   // So there is no previous interpreter state.
1773   __ li(R15_prev_state, 0);
1774 
1775 
1776   // Fall through to where "recursive" invocations go.
1777 
1778   //=============================================================================
1779   // Dispatch an instance of the interpreter. Recursive activations
1780   // come here.
1781 
1782   Label re_dispatch;
1783   __ BIND(re_dispatch);
1784 
1785   //
1786   // Registers alive
1787   //    R16_thread        - JavaThread*
1788   //    R19_method        - callee's Method
1789   //    R17_tos           - address of caller's tos (prepushed)
1790   //    R15_prev_state    - address of caller's BytecodeInterpreter or 0
1791   //    R1_SP             - caller's SP trimmed such that caller's outgoing args are near top.
1792   //
1793   // Stack layout at this point:
1794   //
1795   //   0       [TOP_IJAVA_FRAME_ABI]
1796   //           alignment (optional)
1797   //           [outgoing Java arguments]
1798   //           ...
1799   //   PARENT  [PARENT_IJAVA_FRAME_ABI]
1800   //           ...
1801 
1802   // fall through to interpreted execution
1803 
1804   //=============================================================================
1805   // Allocate a new Java frame and initialize the new interpreter state.
1806 
1807   Label stack_overflow_return;
1808 
1809   // Create a suitable new Java frame plus a new BytecodeInterpreter instance
1810   // in the current (frame manager's) C frame.
1811   generate_compute_interpreter_state(stack_overflow_return);
1812 
1813   // fall through
1814 
1815   //=============================================================================
1816   // Interpreter dispatch.
1817 
1818   Label call_interpreter;
1819   __ BIND(call_interpreter);
1820 
1821   //
1822   // Registers alive
1823   //   R16_thread       - JavaThread*
1824   //   R15_prev_state   - previous BytecodeInterpreter or 0
1825   //   R14_state        - address of receiver's BytecodeInterpreter
1826   //   R1_SP            - receiver's stack pointer
1827   //
1828 
1829   // Thread fields.
1830   const Register pending_exception = R21_tmp1;
1831 
1832   // Interpreter state fields.
1833   const Register msg               = R24_tmp4;
1834 
1835   // MethodOop fields.
1836   const Register parameter_count   = R25_tmp5;
1837   const Register result_index      = R26_tmp6;
1838 
1839   const Register dummy             = R28_tmp8;
1840 
1841   // Address of various interpreter stubs.
1842   // R29_tmp9 is reserved.
1843   const Register stub_addr         = R27_tmp7;
1844 
1845   // Uncommon trap needs to jump to here to enter the interpreter
1846   // (re-execute current bytecode).
1847   unctrap_frame_manager_entry  = __ pc();
1848 
1849   // If we are profiling, store our fp (BSP) in the thread so we can
1850   // find it during a tick.
1851   if (Arguments::has_profile()) {
1852     // On PPC64 we store the pointer to the current BytecodeInterpreter,
1853     // instead of the bsp of ia64. This should suffice to be able to
1854     // find all interesting information.
1855     __ std(R14_state, thread_(last_interpreter_fp));
1856   }
1857 
1858   // R16_thread, R14_state and R15_prev_state are nonvolatile
1859   // registers. There is no need to save these. If we needed to save
1860   // some state in the current Java frame, this could be a place to do
1861   // so.
1862 
1863   // Call Java bytecode dispatcher passing "BytecodeInterpreter* istate".
1864   __ call_VM_leaf(CAST_FROM_FN_PTR(address,
1865                                    JvmtiExport::can_post_interpreter_events()
1866                                    ? BytecodeInterpreter::runWithChecks
1867                                    : BytecodeInterpreter::run),
1868                   R14_state);
1869 
1870   interpreter_return_address  = __ last_calls_return_pc();
1871 
1872   // R16_thread, R14_state and R15_prev_state have their values preserved.
1873 
1874   // If we are profiling, clear the fp in the thread to tell
1875   // the profiler that we are no longer in the interpreter.
1876   if (Arguments::has_profile()) {
1877     __ li(R11_scratch1, 0);
1878     __ std(R11_scratch1, thread_(last_interpreter_fp));
1879   }
1880 
1881   // Load message from bytecode dispatcher.
1882   // TODO: PPC port: guarantee(4 == BytecodeInterpreter::sz_msg(), "unexpected field size");
1883   __ lwz(msg, state_(_msg));
1884 
1885 
1886   Label more_monitors;
1887   Label return_from_native;
1888   Label return_from_native_common;
1889   Label return_from_native_no_exception;
1890   Label return_from_interpreted_method;
1891   Label return_from_recursive_activation;
1892   Label unwind_recursive_activation;
1893   Label resume_interpreter;
1894   Label return_to_initial_caller;
1895   Label unwind_initial_activation;
1896   Label unwind_initial_activation_pending_exception;
1897   Label call_method;
1898   Label call_special;
1899   Label retry_method;
1900   Label retry_method_osr;
1901   Label popping_frame;
1902   Label throwing_exception;
1903 
1904   // Branch according to the received message
1905 
1906   __ cmpwi(CCR1, msg, BytecodeInterpreter::call_method);
1907   __ cmpwi(CCR2, msg, BytecodeInterpreter::return_from_method);
1908 
1909   __ beq(CCR1, call_method);
1910   __ beq(CCR2, return_from_interpreted_method);
1911 
1912   __ cmpwi(CCR3, msg, BytecodeInterpreter::more_monitors);
1913   __ cmpwi(CCR4, msg, BytecodeInterpreter::throwing_exception);
1914 
1915   __ beq(CCR3, more_monitors);
1916   __ beq(CCR4, throwing_exception);
1917 
1918   __ cmpwi(CCR5, msg, BytecodeInterpreter::popping_frame);
1919   __ cmpwi(CCR6, msg, BytecodeInterpreter::do_osr);
1920 
1921   __ beq(CCR5, popping_frame);
1922   __ beq(CCR6, retry_method_osr);
1923 
1924   __ stop("bad message from interpreter");
1925 
1926 
1927   //=============================================================================
1928   // Add a monitor just below the existing one(s). State->_stack_base
1929   // points to the lowest existing one, so we insert the new one just
1930   // below it and shuffle the expression stack down. Ref. the above
1931   // stack layout picture, we must update _stack_base, _stack, _stack_limit
1932   // and _last_Java_sp in the interpreter state.
1933 
1934   __ BIND(more_monitors);
1935 
1936   generate_more_monitors();
1937   __ b(call_interpreter);
1938 
1939   generate_deopt_handling(result_index);
1940 
1941   // Restoring the R14_state is already done by the deopt_blob.
1942 
1943   // Current tos includes no parameter slots.
1944   __ ld(R17_tos, state_(_stack));
1945   __ li(msg, BytecodeInterpreter::deopt_resume);
1946   __ b(return_from_native_common);
1947 
1948   // We are sent here when we are unwinding from a native method or
1949   // adapter with an exception pending. We need to notify the interpreter
1950   // that there is an exception to process.
1951   // We arrive here also if the frame manager called an (interpreted) target
1952   // which returns with a StackOverflow exception.
1953   // The control flow is in this case is:
1954   // frame_manager->throw_excp_stub->forward_excp->rethrow_excp_entry
1955 
1956   AbstractInterpreter::_rethrow_exception_entry = __ pc();
1957 
1958   // Restore R14_state.
1959   __ ld(R14_state, 0, R1_SP);
1960   __ addi(R14_state, R14_state,
1961               -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
1962 
1963   // Store exception oop into thread object.
1964   __ std(R3_RET, thread_(pending_exception));
1965   __ li(msg, BytecodeInterpreter::method_resume /*rethrow_exception*/);
1966   //
1967   // NOTE: the interpreter frame as setup be deopt does NOT include
1968   // any parameter slots (good thing since we have no callee here
1969   // and couldn't remove them) so we don't have to do any calculations
1970   // here to figure it out.
1971   //
1972   __ ld(R17_tos, state_(_stack));
1973   __ b(return_from_native_common);
1974 
1975 
1976   //=============================================================================
1977   // Returning from a native method.  Result is in the native abi
1978   // location so we must move it to the java expression stack.
1979 
1980   __ BIND(return_from_native);
1981   guarantee(return_from_native_pc == (address) NULL, "precondition");
1982   return_from_native_pc = __ pc();
1983 
1984   // Restore R14_state.
1985   __ ld(R14_state, 0, R1_SP);
1986   __ addi(R14_state, R14_state, -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
1987 
1988   //
1989   // Registers alive
1990   //   R16_thread
1991   //   R14_state    - address of caller's BytecodeInterpreter.
1992   //   R3_RET       - integer result, if any.
1993   //   F1_RET       - float result, if any.
1994   //
1995   // Registers updated
1996   //   R19_method   - callee's Method
1997   //   R17_tos      - caller's tos, with outgoing args popped
1998   //   result_index - index of result handler.
1999   //   msg          - message for resuming interpreter.
2000   //
2001 
2002   // Very-local scratch registers.
2003 
2004   const ConditionRegister have_pending_exception = CCR0;
2005 
2006   // Load callee Method, gc may have moved it.
2007   __ ld(R19_method, state_(_result._to_call._callee));
2008 
2009   // Load address of caller's tos. includes parameter slots.
2010   __ ld(R17_tos, state_(_stack));
2011 
2012   // Pop callee's parameters.
2013 
2014   __ ld(parameter_count, in_bytes(Method::const_offset()), R19_method);
2015   __ lhz(parameter_count, in_bytes(ConstMethod::size_of_parameters_offset()), parameter_count);
2016   __ sldi(parameter_count, parameter_count, Interpreter::logStackElementSize);
2017   __ add(R17_tos, R17_tos, parameter_count);
2018 
2019   // Result stub address array index
2020   // TODO: PPC port: assert(4 == methodOopDesc::sz_result_index(), "unexpected field size");
2021   __ lwa(result_index, method_(result_index));
2022 
2023   __ li(msg, BytecodeInterpreter::method_resume);
2024 
2025   //
2026   // Registers alive
2027   //   R16_thread
2028   //   R14_state    - address of caller's BytecodeInterpreter.
2029   //   R17_tos      - address of caller's tos with outgoing args already popped
2030   //   R3_RET       - integer return value, if any.
2031   //   F1_RET       - float return value, if any.
2032   //   result_index - index of result handler.
2033   //   msg          - message for resuming interpreter.
2034   //
2035   // Registers updated
2036   //   R3_RET       - new address of caller's tos, including result, if any
2037   //
2038 
2039   __ BIND(return_from_native_common);
2040 
2041   // Check for pending exception
2042   __ ld(pending_exception, thread_(pending_exception));
2043   __ cmpdi(CCR0, pending_exception, 0);
2044   __ beq(CCR0, return_from_native_no_exception);
2045 
2046   // If there's a pending exception, we really have no result, so
2047   // R3_RET is dead. Resume_interpreter assumes the new tos is in
2048   // R3_RET.
2049   __ mr(R3_RET, R17_tos);
2050   // `resume_interpreter' expects R15_prev_state to be alive.
2051   __ ld(R15_prev_state, state_(_prev_link));
2052   __ b(resume_interpreter);
2053 
2054   __ BIND(return_from_native_no_exception);
2055 
2056   // No pending exception, copy method result from native ABI register
2057   // to tos.
2058 
2059   // Address of stub descriptor address array.
2060   __ load_const(stub_addr, CppInterpreter::tosca_result_to_stack());
2061 
2062   // Pass address of tos to stub.
2063   __ mr(R4_ARG2, R17_tos);
2064 
2065   // Address of stub descriptor address.
2066   __ sldi(result_index, result_index, LogBytesPerWord);
2067   __ add(stub_addr, stub_addr, result_index);
2068 
2069   // Stub descriptor address.
2070   __ ld(stub_addr, 0, stub_addr);
2071 
2072   // TODO: don't do this via a call, do it in place!
2073   //
2074   // call stub via descriptor
2075   // in R3_ARG1/F1_ARG1: result value (R3_RET or F1_RET)
2076   __ call_stub(stub_addr);
2077 
2078   // new tos = result of call in R3_RET
2079 
2080   // `resume_interpreter' expects R15_prev_state to be alive.
2081   __ ld(R15_prev_state, state_(_prev_link));
2082   __ b(resume_interpreter);
2083 
2084   //=============================================================================
2085   // We encountered an exception while computing the interpreter
2086   // state, so R14_state isn't valid. Act as if we just returned from
2087   // the callee method with a pending exception.
2088   __ BIND(stack_overflow_return);
2089 
2090   //
2091   // Registers alive
2092   //   R16_thread        - JavaThread*
2093   //   R1_SP             - old stack pointer
2094   //   R19_method        - callee's Method
2095   //   R17_tos           - address of caller's tos (prepushed)
2096   //   R15_prev_state    - address of caller's BytecodeInterpreter or 0
2097   //   R18_locals        - address of callee's locals array
2098   //
2099   // Registers updated
2100   //   R3_RET           - address of resuming tos, if recursive unwind
2101 
2102   Label Lskip_unextend_SP;
2103 
2104   {
2105   const ConditionRegister is_initial_call = CCR0;
2106   const Register tos_save = R21_tmp1;
2107   const Register tmp = R22_tmp2;
2108 
2109   assert(tos_save->is_nonvolatile(), "need a nonvolatile");
2110 
2111   // Is the exception thrown in the initial Java frame of this frame
2112   // manager frame?
2113   __ cmpdi(is_initial_call, R15_prev_state, 0);
2114   __ bne(is_initial_call, Lskip_unextend_SP);
2115 
2116   // Pop any c2i extension from the stack. This is necessary in the
2117   // non-recursive case (that is we were called by the c2i adapter,
2118   // meaning we have to prev state). In this case we entered the frame
2119   // manager through a special entry which pushes the orignal
2120   // unextended SP to the stack. Here we load it back.
2121   __ ld(R0, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
2122   __ mtlr(R0);
2123   // Resize frame to get rid of a potential extension.
2124   __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
2125 
2126   // Fall through
2127 
2128   __ bind(Lskip_unextend_SP);
2129 
2130   // Throw the exception via RuntimeStub "throw_StackOverflowError_entry".
2131   //
2132   // Previously, we called C-Code directly. As a consequence, a
2133   // possible GC tried to process the argument oops of the top frame
2134   // (see RegisterMap::clear, which sets the corresponding flag to
2135   // true). This lead to crashes because:
2136   // 1. The top register map did not contain locations for the argument registers
2137   // 2. The arguments are dead anyway, could be already overwritten in the worst case
2138   // Solution: Call via special runtime stub that pushes it's own frame. This runtime stub has the flag
2139   // "CodeBlob::caller_must_gc_arguments()" set to "false", what prevents the dead arguments getting GC'd.
2140   //
2141   // 2 cases exist:
2142   // 1. We were called by the c2i adapter / call stub
2143   // 2. We were called by the frame manager
2144   //
2145   // Both cases are handled by this code:
2146   // 1. - initial_caller_sp was saved on stack => Load it back and we're ok
2147   //    - control flow will be:
2148   //      throw_stackoverflow_stub->VM->throw_stackoverflow_stub->forward_excep->excp_blob of calling method
2149   // 2. - control flow will be:
2150   //      throw_stackoverflow_stub->VM->throw_stackoverflow_stub->forward_excep->
2151   //        ->rethrow_excp_entry of frame manager->resume_method
2152   //      Since we restored the caller SP above, the rethrow_excp_entry can restore the original interpreter state
2153   //      registers using the stack and resume the calling method with a pending excp.
2154 
2155   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order");
2156   __ load_const(R3_ARG1, (StubRoutines::throw_StackOverflowError_entry()));
2157   __ mtctr(R3_ARG1);
2158   __ bctr();
2159   }
2160   //=============================================================================
2161   // We have popped a frame from an interpreted call. We are assured
2162   // of returning to an interpreted call by the popframe abi. We have
2163   // no return value all we have to do is pop the current frame and
2164   // then make sure that the top of stack (of the caller) gets set to
2165   // where it was when we entered the callee (i.e. the args are still
2166   // in place).  Or we are returning to the interpreter. In the first
2167   // case we must extract result (if any) from the java expression
2168   // stack and store it in the location the native abi would expect
2169   // for a call returning this type. In the second case we must simply
2170   // do a stack to stack move as we unwind.
2171 
2172   __ BIND(popping_frame);
2173 
2174   // Registers alive
2175   //   R14_state
2176   //   R15_prev_state
2177   //   R17_tos
2178   //
2179   // Registers updated
2180   //   R19_method
2181   //   R3_RET
2182   //   msg
2183   {
2184     Label L;
2185 
2186     // Reload callee method, gc may have moved it.
2187     __ ld(R19_method, state_(_method));
2188 
2189     // We may be returning to a deoptimized frame in which case the
2190     // usual assumption of a recursive return is not true.
2191 
2192     // not equal = is recursive call
2193     __ cmpdi(CCR0, R15_prev_state, 0);
2194 
2195     __ bne(CCR0, L);
2196 
2197     // Pop_frame capability.
2198     // The pop_frame api says that the underlying frame is a Java frame, in this case
2199     // (prev_state==null) it must be a compiled frame:
2200     //
2201     // Stack at this point: I, C2I + C, ...
2202     //
2203     // The outgoing arguments of the call have just been copied (popframe_preserve_args).
2204     // By the pop_frame api, we must end up in an interpreted frame. So the compiled frame
2205     // will be deoptimized. Deoptimization will restore the outgoing arguments from
2206     // popframe_preserve_args, adjust the tos such that it includes the popframe_preserve_args,
2207     // and adjust the bci such that the call will be executed again.
2208     // We have no results, just pop the interpreter frame, resize the compiled frame to get rid
2209     // of the c2i extension and return to the deopt_handler.
2210     __ b(unwind_initial_activation);
2211 
2212     // is recursive call
2213     __ bind(L);
2214 
2215     // Resume_interpreter expects the original tos in R3_RET.
2216     __ ld(R3_RET, prev_state_(_stack));
2217 
2218     // We're done.
2219     __ li(msg, BytecodeInterpreter::popping_frame);
2220 
2221     __ b(unwind_recursive_activation);
2222   }
2223 
2224 
2225   //=============================================================================
2226 
2227   // We have finished an interpreted call. We are either returning to
2228   // native (call_stub/c2) or we are returning to the interpreter.
2229   // When returning to native, we must extract the result (if any)
2230   // from the java expression stack and store it in the location the
2231   // native abi expects. When returning to the interpreter we must
2232   // simply do a stack to stack move as we unwind.
2233 
2234   __ BIND(return_from_interpreted_method);
2235 
2236   //
2237   // Registers alive
2238   //   R16_thread     - JavaThread*
2239   //   R15_prev_state - address of caller's BytecodeInterpreter or 0
2240   //   R14_state      - address of callee's interpreter state
2241   //   R1_SP          - callee's stack pointer
2242   //
2243   // Registers updated
2244   //   R19_method     - callee's method
2245   //   R3_RET         - address of result (new caller's tos),
2246   //
2247   // if returning to interpreted
2248   //   msg  - message for interpreter,
2249   // if returning to interpreted
2250   //
2251 
2252   // Check if this is the initial invocation of the frame manager.
2253   // If so, R15_prev_state will be null.
2254   __ cmpdi(CCR0, R15_prev_state, 0);
2255 
2256   // Reload callee method, gc may have moved it.
2257   __ ld(R19_method, state_(_method));
2258 
2259   // Load the method's result type.
2260   __ lwz(result_index, method_(result_index));
2261 
2262   // Go to return_to_initial_caller if R15_prev_state is null.
2263   __ beq(CCR0, return_to_initial_caller);
2264 
2265   // Copy callee's result to caller's expression stack via inline stack-to-stack
2266   // converters.
2267   {
2268     Register new_tos   = R3_RET;
2269     Register from_temp = R4_ARG2;
2270     Register from      = R5_ARG3;
2271     Register tos       = R6_ARG4;
2272     Register tmp1      = R7_ARG5;
2273     Register tmp2      = R8_ARG6;
2274 
2275     ConditionRegister result_type_is_void   = CCR1;
2276     ConditionRegister result_type_is_long   = CCR2;
2277     ConditionRegister result_type_is_double = CCR3;
2278 
2279     Label stack_to_stack_void;
2280     Label stack_to_stack_double_slot; // T_LONG, T_DOUBLE
2281     Label stack_to_stack_single_slot; // T_BOOLEAN, T_BYTE, T_CHAR, T_SHORT, T_INT, T_FLOAT, T_OBJECT
2282     Label stack_to_stack_done;
2283 
2284     // Pass callee's address of tos + BytesPerWord
2285     __ ld(from_temp, state_(_stack));
2286 
2287     // result type: void
2288     __ cmpwi(result_type_is_void, result_index, AbstractInterpreter::BasicType_as_index(T_VOID));
2289 
2290     // Pass caller's tos == callee's locals address
2291     __ ld(tos, state_(_locals));
2292 
2293     // result type: long
2294     __ cmpwi(result_type_is_long, result_index, AbstractInterpreter::BasicType_as_index(T_LONG));
2295 
2296     __ addi(from, from_temp, Interpreter::stackElementSize);
2297 
2298     // !! don't branch above this line !!
2299 
2300     // handle void
2301     __ beq(result_type_is_void,   stack_to_stack_void);
2302 
2303     // result type: double
2304     __ cmpwi(result_type_is_double, result_index, AbstractInterpreter::BasicType_as_index(T_DOUBLE));
2305 
2306     // handle long or double
2307     __ beq(result_type_is_long, stack_to_stack_double_slot);
2308     __ beq(result_type_is_double, stack_to_stack_double_slot);
2309 
2310     // fall through to single slot types (incl. object)
2311 
2312     {
2313       __ BIND(stack_to_stack_single_slot);
2314       // T_BOOLEAN, T_BYTE, T_CHAR, T_SHORT, T_INT, T_FLOAT, T_OBJECT
2315 
2316       __ ld(tmp1, 0, from);
2317       __ std(tmp1, 0, tos);
2318       // New expression stack top
2319       __ addi(new_tos, tos, - BytesPerWord);
2320 
2321       __ b(stack_to_stack_done);
2322     }
2323 
2324     {
2325       __ BIND(stack_to_stack_double_slot);
2326       // T_LONG, T_DOUBLE
2327 
2328       // Move both entries for debug purposes even though only one is live
2329       __ ld(tmp1, BytesPerWord, from);
2330       __ ld(tmp2, 0, from);
2331       __ std(tmp1, 0, tos);
2332       __ std(tmp2, -BytesPerWord, tos);
2333 
2334       // new expression stack top
2335       __ addi(new_tos, tos, - 2 * BytesPerWord); // two slots
2336       __ b(stack_to_stack_done);
2337     }
2338 
2339     {
2340       __ BIND(stack_to_stack_void);
2341       // T_VOID
2342 
2343       // new expression stack top
2344       __ mr(new_tos, tos);
2345       // fall through to stack_to_stack_done
2346     }
2347 
2348     __ BIND(stack_to_stack_done);
2349   }
2350 
2351   // new tos = R3_RET
2352 
2353   // Get the message for the interpreter
2354   __ li(msg, BytecodeInterpreter::method_resume);
2355 
2356   // And fall thru
2357 
2358 
2359   //=============================================================================
2360   // Restore caller's interpreter state and pass pointer to caller's
2361   // new tos to caller.
2362 
2363   __ BIND(unwind_recursive_activation);
2364 
2365   //
2366   // Registers alive
2367   //   R15_prev_state   - address of caller's BytecodeInterpreter
2368   //   R3_RET           - address of caller's tos
2369   //   msg              - message for caller's BytecodeInterpreter
2370   //   R1_SP            - callee's stack pointer
2371   //
2372   // Registers updated
2373   //   R14_state        - address of caller's BytecodeInterpreter
2374   //   R15_prev_state   - address of its parent or 0
2375   //
2376 
2377   // Pop callee's interpreter and set R14_state to caller's interpreter.
2378   __ pop_interpreter_state(/*prev_state_may_be_0=*/false);
2379 
2380   // And fall thru
2381 
2382 
2383   //=============================================================================
2384   // Resume the (calling) interpreter after a call.
2385 
2386   __ BIND(resume_interpreter);
2387 
2388   //
2389   // Registers alive
2390   //   R14_state        - address of resuming BytecodeInterpreter
2391   //   R15_prev_state   - address of its parent or 0
2392   //   R3_RET           - address of resuming tos
2393   //   msg              - message for resuming interpreter
2394   //   R1_SP            - callee's stack pointer
2395   //
2396   // Registers updated
2397   //   R1_SP            - caller's stack pointer
2398   //
2399 
2400   // Restore C stack pointer of caller (resuming interpreter),
2401   // R14_state already points to the resuming BytecodeInterpreter.
2402   __ pop_interpreter_frame_to_state(R14_state, R21_tmp1, R11_scratch1, R12_scratch2);
2403 
2404   // Store new address of tos (holding return value) in interpreter state.
2405   __ std(R3_RET, state_(_stack));
2406 
2407   // Store message for interpreter.
2408   __ stw(msg, state_(_msg));
2409 
2410   __ b(call_interpreter);
2411 
2412   //=============================================================================
2413   // Interpreter returning to native code (call_stub/c1/c2) from
2414   // initial activation. Convert stack result and unwind activation.
2415 
2416   __ BIND(return_to_initial_caller);
2417 
2418   //
2419   // Registers alive
2420   //   R19_method       - callee's Method
2421   //   R14_state        - address of callee's interpreter state
2422   //   R16_thread       - JavaThread
2423   //   R1_SP            - callee's stack pointer
2424   //
2425   // Registers updated
2426   //   R3_RET/F1_RET - result in expected output register
2427   //
2428 
2429   // If we have an exception pending we have no result and we
2430   // must figure out where to really return to.
2431   //
2432   __ ld(pending_exception, thread_(pending_exception));
2433   __ cmpdi(CCR0, pending_exception, 0);
2434   __ bne(CCR0, unwind_initial_activation_pending_exception);
2435 
2436   __ lwa(result_index, method_(result_index));
2437 
2438   // Address of stub descriptor address array.
2439   __ load_const(stub_addr, CppInterpreter::stack_result_to_native());
2440 
2441   // Pass address of callee's tos + BytesPerWord.
2442   // Will then point directly to result.
2443   __ ld(R3_ARG1, state_(_stack));
2444   __ addi(R3_ARG1, R3_ARG1, Interpreter::stackElementSize);
2445 
2446   // Address of stub descriptor address
2447   __ sldi(result_index, result_index, LogBytesPerWord);
2448   __ add(stub_addr, stub_addr, result_index);
2449 
2450   // Stub descriptor address
2451   __ ld(stub_addr, 0, stub_addr);
2452 
2453   // TODO: don't do this via a call, do it in place!
2454   //
2455   // call stub via descriptor
2456   __ call_stub(stub_addr);
2457 
2458   __ BIND(unwind_initial_activation);
2459 
2460   // Unwind from initial activation. No exception is pending.
2461 
2462   //
2463   // Stack layout at this point:
2464   //
2465   //    0       [TOP_IJAVA_FRAME_ABI]         <-- R1_SP
2466   //            ...
2467   //    CALLER  [PARENT_IJAVA_FRAME_ABI]
2468   //            ...
2469   //    CALLER  [unextended ABI]
2470   //            ...
2471   //
2472   //  The CALLER frame has a C2I adapter or is an entry-frame.
2473   //
2474 
2475   // An interpreter frame exists, we may pop the TOP_IJAVA_FRAME and
2476   // turn the caller's PARENT_IJAVA_FRAME back into a TOP_IJAVA_FRAME.
2477   // But, we simply restore the return pc from the caller's frame and
2478   // use the caller's initial_caller_sp as the new SP which pops the
2479   // interpreter frame and "resizes" the caller's frame to its "unextended"
2480   // size.
2481 
2482   // get rid of top frame
2483   __ pop_frame();
2484 
2485   // Load return PC from parent frame.
2486   __ ld(R21_tmp1, _parent_ijava_frame_abi(lr), R1_SP);
2487 
2488   // Resize frame to get rid of a potential extension.
2489   __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
2490 
2491   // update LR
2492   __ mtlr(R21_tmp1);
2493 
2494   // return
2495   __ blr();
2496 
2497   //=============================================================================
2498   // Unwind from initial activation. An exception is pending
2499 
2500   __ BIND(unwind_initial_activation_pending_exception);
2501 
2502   //
2503   // Stack layout at this point:
2504   //
2505   //   0       [TOP_IJAVA_FRAME_ABI]         <-- R1_SP
2506   //           ...
2507   //   CALLER  [PARENT_IJAVA_FRAME_ABI]
2508   //           ...
2509   //   CALLER  [unextended ABI]
2510   //           ...
2511   //
2512   // The CALLER frame has a C2I adapter or is an entry-frame.
2513   //
2514 
2515   // An interpreter frame exists, we may pop the TOP_IJAVA_FRAME and
2516   // turn the caller's PARENT_IJAVA_FRAME back into a TOP_IJAVA_FRAME.
2517   // But, we just pop the current TOP_IJAVA_FRAME and fall through
2518 
2519   __ pop_frame();
2520   __ ld(R3_ARG1, _top_ijava_frame_abi(lr), R1_SP);
2521 
2522   //
2523   // Stack layout at this point:
2524   //
2525   //   CALLER  [PARENT_IJAVA_FRAME_ABI]      <-- R1_SP
2526   //           ...
2527   //   CALLER  [unextended ABI]
2528   //           ...
2529   //
2530   // The CALLER frame has a C2I adapter or is an entry-frame.
2531   //
2532   // Registers alive
2533   //   R16_thread
2534   //   R3_ARG1 - return address to caller
2535   //
2536   // Registers updated
2537   //   R3_ARG1 - address of pending exception
2538   //   R4_ARG2 - issuing pc = return address to caller
2539   //   LR      - address of exception handler stub
2540   //
2541 
2542   // Resize frame to get rid of a potential extension.
2543   __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
2544 
2545   __ mr(R14, R3_ARG1);   // R14 := ARG1
2546   __ mr(R4_ARG2, R3_ARG1);  // ARG2 := ARG1
2547 
2548   // Find the address of the "catch_exception" stub.
2549   __ push_frame_abi112(0, R11_scratch1);
2550   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
2551                   R16_thread,
2552                   R4_ARG2);
2553   __ pop_frame();
2554 
2555   // Load continuation address into LR.
2556   __ mtlr(R3_RET);
2557 
2558   // Load address of pending exception and clear it in thread object.
2559   __ ld(R3_ARG1/*R3_RET*/, thread_(pending_exception));
2560   __ li(R4_ARG2, 0);
2561   __ std(R4_ARG2, thread_(pending_exception));
2562 
2563   // re-load issuing pc
2564   __ mr(R4_ARG2, R14);
2565 
2566   // Branch to found exception handler.
2567   __ blr();
2568 
2569   //=============================================================================
2570   // Call a new method. Compute new args and trim the expression stack
2571   // to only what we are currently using and then recurse.
2572 
2573   __ BIND(call_method);
2574 
2575   //
2576   //  Registers alive
2577   //    R16_thread
2578   //    R14_state      - address of caller's BytecodeInterpreter
2579   //    R1_SP          - caller's stack pointer
2580   //
2581   //  Registers updated
2582   //    R15_prev_state - address of caller's BytecodeInterpreter
2583   //    R17_tos        - address of caller's tos
2584   //    R19_method     - callee's Method
2585   //    R1_SP          - trimmed back
2586   //
2587 
2588   // Very-local scratch registers.
2589 
2590   const Register offset = R21_tmp1;
2591   const Register tmp    = R22_tmp2;
2592   const Register self_entry  = R23_tmp3;
2593   const Register stub_entry  = R24_tmp4;
2594 
2595   const ConditionRegister cr = CCR0;
2596 
2597   // Load the address of the frame manager.
2598   __ load_const(self_entry, &interpreter_frame_manager);
2599   __ ld(self_entry, 0, self_entry);
2600 
2601   // Load BytecodeInterpreter._result._to_call._callee (callee's Method).
2602   __ ld(R19_method, state_(_result._to_call._callee));
2603   // Load BytecodeInterpreter._stack (outgoing tos).
2604   __ ld(R17_tos, state_(_stack));
2605 
2606   // Save address of caller's BytecodeInterpreter.
2607   __ mr(R15_prev_state, R14_state);
2608 
2609   // Load the callee's entry point.
2610   // Load BytecodeInterpreter._result._to_call._callee_entry_point.
2611   __ ld(stub_entry, state_(_result._to_call._callee_entry_point));
2612 
2613   // Check whether stub_entry is equal to self_entry.
2614   __ cmpd(cr, self_entry, stub_entry);
2615   // if (self_entry == stub_entry)
2616   //   do a re-dispatch
2617   __ beq(cr, re_dispatch);
2618   // else
2619   //   call the specialized entry (adapter for jni or compiled code)
2620   __ BIND(call_special);
2621 
2622   //
2623   // Call the entry generated by `InterpreterGenerator::generate_native_entry'.
2624   //
2625   // Registers alive
2626   //   R16_thread
2627   //   R15_prev_state    - address of caller's BytecodeInterpreter
2628   //   R19_method        - callee's Method
2629   //   R17_tos           - address of caller's tos
2630   //   R1_SP             - caller's stack pointer
2631   //
2632 
2633   // Mark return from specialized entry for generate_native_entry.
2634   guarantee(return_from_native_pc != (address) NULL, "precondition");
2635   frame_manager_specialized_return = return_from_native_pc;
2636 
2637   // Set sender_SP in case we call interpreter native wrapper which
2638   // will expect it. Compiled code should not care.
2639   __ mr(R21_sender_SP, R1_SP);
2640 
2641   // Do a tail call here, and let the link register point to
2642   // frame_manager_specialized_return which is return_from_native_pc.
2643   __ load_const(tmp, frame_manager_specialized_return);
2644   __ call_stub_and_return_to(stub_entry,  tmp /* return_pc=tmp */);
2645 
2646 
2647   //=============================================================================
2648   //
2649   // InterpretMethod triggered OSR compilation of some Java method M
2650   // and now asks to run the compiled code.  We call this code the
2651   // `callee'.
2652   //
2653   // This is our current idea on how OSR should look like on PPC64:
2654   //
2655   // While interpreting a Java method M the stack is:
2656   //
2657   //  (InterpretMethod (M), IJAVA_FRAME (M), ANY_FRAME, ...).
2658   //
2659   // After having OSR compiled M, `InterpretMethod' returns to the
2660   // frame manager, sending the message `retry_method_osr'.  The stack
2661   // is:
2662   //
2663   //  (IJAVA_FRAME (M), ANY_FRAME, ...).
2664   //
2665   // The compiler will have generated an `nmethod' suitable for
2666   // continuing execution of M at the bytecode index at which OSR took
2667   // place.  So now the frame manager calls the OSR entry.  The OSR
2668   // entry sets up a JIT_FRAME for M and continues execution of M with
2669   // initial state determined by the IJAVA_FRAME.
2670   //
2671   //  (JIT_FRAME (M), IJAVA_FRAME (M), ANY_FRAME, ...).
2672   //
2673 
2674   __ BIND(retry_method_osr);
2675   {
2676   //
2677   // Registers alive
2678   //   R16_thread
2679   //   R15_prev_state     - address of caller's BytecodeInterpreter
2680   //   R14_state          - address of callee's BytecodeInterpreter
2681   //   R1_SP              - callee's SP before call to InterpretMethod
2682   //
2683   // Registers updated
2684   //   R17                - pointer to callee's locals array
2685   //                       (declared via `interpreter_arg_ptr_reg' in the AD file)
2686   //   R19_method         - callee's Method
2687   //   R1_SP              - callee's SP (will become SP of OSR adapter frame)
2688   //
2689 
2690   // Provide a debugger breakpoint in the frame manager if breakpoints
2691   // in osr'd methods are requested.
2692 #ifdef COMPILER2
2693   NOT_PRODUCT( if (OptoBreakpointOSR) { __ illtrap(); } )
2694 #endif
2695 
2696   // Load callee's pointer to locals array from callee's state.
2697   //  __ ld(R17, state_(_locals));
2698 
2699   // Load osr entry.
2700   __ ld(R12_scratch2, state_(_result._osr._osr_entry));
2701 
2702   // Load address of temporary osr buffer to arg1.
2703   __ ld(R3_ARG1, state_(_result._osr._osr_buf));
2704   __ mtctr(R12_scratch2);
2705 
2706   // Load method oop, gc may move it during execution of osr'd method.
2707   __ ld(R22_tmp2, state_(_method));
2708   // Load message 'call_method'.
2709   __ li(R23_tmp3, BytecodeInterpreter::call_method);
2710 
2711   {
2712     // Pop the IJAVA frame of the method which we are going to call osr'd.
2713     Label no_state, skip_no_state;
2714     __ pop_interpreter_state(/*prev_state_may_be_0=*/true);
2715     __ cmpdi(CCR0, R14_state,0);
2716     __ beq(CCR0, no_state);
2717     // return to interpreter
2718     __ pop_interpreter_frame_to_state(R14_state, R11_scratch1, R12_scratch2, R21_tmp1);
2719 
2720     // Init _result._to_call._callee and tell gc that it contains a valid oop
2721     // by setting _msg to 'call_method'.
2722     __ std(R22_tmp2, state_(_result._to_call._callee));
2723     // TODO: PPC port: assert(4 == BytecodeInterpreter::sz_msg(), "unexpected field size");
2724     __ stw(R23_tmp3, state_(_msg));
2725 
2726     __ load_const(R21_tmp1, frame_manager_specialized_return);
2727     __ b(skip_no_state);
2728     __ bind(no_state);
2729 
2730     // Return to initial caller.
2731 
2732     // Get rid of top frame.
2733     __ pop_frame();
2734 
2735     // Load return PC from parent frame.
2736     __ ld(R21_tmp1, _parent_ijava_frame_abi(lr), R1_SP);
2737 
2738     // Resize frame to get rid of a potential extension.
2739     __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
2740 
2741     __ bind(skip_no_state);
2742 
2743     // Update LR with return pc.
2744     __ mtlr(R21_tmp1);
2745   }
2746   // Jump to the osr entry point.
2747   __ bctr();
2748 
2749   }
2750 
2751   //=============================================================================
2752   // Interpreted method "returned" with an exception, pass it on.
2753   // Pass no result, unwind activation and continue/return to
2754   // interpreter/call_stub/c2.
2755 
2756   __ BIND(throwing_exception);
2757 
2758   // Check if this is the initial invocation of the frame manager.  If
2759   // so, previous interpreter state in R15_prev_state will be null.
2760 
2761   // New tos of caller is callee's first parameter address, that is
2762   // callee's incoming arguments are popped.
2763   __ ld(R3_RET, state_(_locals));
2764 
2765   // Check whether this is an initial call.
2766   __ cmpdi(CCR0, R15_prev_state, 0);
2767   // Yes, called from the call stub or from generated code via a c2i frame.
2768   __ beq(CCR0, unwind_initial_activation_pending_exception);
2769 
2770   // Send resume message, interpreter will see the exception first.
2771 
2772   __ li(msg, BytecodeInterpreter::method_resume);
2773   __ b(unwind_recursive_activation);
2774 
2775 
2776   //=============================================================================
2777   // Push the last instruction out to the code buffer.
2778 
2779   {
2780     __ unimplemented("end of InterpreterGenerator::generate_normal_entry", 128);
2781   }
2782 
2783   interpreter_frame_manager = entry;
2784   return interpreter_frame_manager;
2785 }
2786 
2787 // Generate code for various sorts of method entries
2788 //
2789 address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
2790   address entry_point = NULL;
2791 
2792   switch (kind) {
2793     case Interpreter::zerolocals                 :                                                                              break;
2794     case Interpreter::zerolocals_synchronized    :                                                                              break;
2795     case Interpreter::native                     : // Fall thru
2796     case Interpreter::native_synchronized        : entry_point = ((CppInterpreterGenerator*)this)->generate_native_entry();     break;
2797     case Interpreter::empty                      :                                                                              break;
2798     case Interpreter::accessor                   : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry();      break;
2799     case Interpreter::abstract                   : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry();      break;
2800     // These are special interpreter intrinsics which we don't support so far.
2801     case Interpreter::java_lang_math_sin         :                                                                              break;
2802     case Interpreter::java_lang_math_cos         :                                                                              break;
2803     case Interpreter::java_lang_math_tan         :                                                                              break;
2804     case Interpreter::java_lang_math_abs         :                                                                              break;
2805     case Interpreter::java_lang_math_log         :                                                                              break;
2806     case Interpreter::java_lang_math_log10       :                                                                              break;
2807     case Interpreter::java_lang_math_sqrt        :                                                                              break;
2808     case Interpreter::java_lang_math_pow         :                                                                              break;
2809     case Interpreter::java_lang_math_exp         :                                                                              break;
2810     case Interpreter::java_lang_ref_reference_get: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
2811     default                                      : ShouldNotReachHere();                                                        break;
2812   }
2813 
2814   if (entry_point) {
2815     return entry_point;
2816   }
2817   return ((InterpreterGenerator*)this)->generate_normal_entry();
2818 }
2819 
2820 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
2821  : CppInterpreterGenerator(code) {
2822    generate_all(); // down here so it can be "virtual"
2823 }
2824 
2825 // How much stack a topmost interpreter method activation needs in words.
2826 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
2827   // Computation is in bytes not words to match layout_activation_impl
2828   // below, but the return is in words.
2829 
2830   //
2831   //  0       [TOP_IJAVA_FRAME_ABI]                                                    \
2832   //          alignment (optional)                                             \       |
2833   //          [operand stack / Java parameters] > stack                        |       |
2834   //          [monitors] (optional)             > monitors                     |       |
2835   //          [PARENT_IJAVA_FRAME_ABI]                                \        |       |
2836   //          [BytecodeInterpreter object]      > interpreter \       |        |       |
2837   //          alignment (optional)                            | round | parent | round | top
2838   //          [Java result] (2 slots)           > result      |       |        |       |
2839   //          [Java non-arg locals]             \ locals      |       |        |       |
2840   //          [arg locals]                      /             /       /        /       /
2841   //
2842 
2843   int locals = method->max_locals() * BytesPerWord;
2844   int interpreter = frame::interpreter_frame_cinterpreterstate_size_in_bytes();
2845   int result = 2 * BytesPerWord;
2846 
2847   int parent = round_to(interpreter + result + locals, 16) + frame::parent_ijava_frame_abi_size;
2848 
2849   int stack = method->max_stack() * BytesPerWord;
2850   int monitors = method->is_synchronized() ? frame::interpreter_frame_monitor_size_in_bytes() : 0;
2851   int top = round_to(parent + monitors + stack, 16) + frame::top_ijava_frame_abi_size;
2852 
2853   return (top / BytesPerWord);
2854 }
2855 
2856 void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
2857                                                   frame* caller,
2858                                                   frame* current,
2859                                                   Method* method,
2860                                                   intptr_t* locals,
2861                                                   intptr_t* stack,
2862                                                   intptr_t* stack_base,
2863                                                   intptr_t* monitor_base,
2864                                                   intptr_t* frame_sp,
2865                                                   bool is_top_frame) {
2866   // What about any vtable?
2867   //
2868   to_fill->_thread = JavaThread::current();
2869   // This gets filled in later but make it something recognizable for now.
2870   to_fill->_bcp = method->code_base();
2871   to_fill->_locals = locals;
2872   to_fill->_constants = method->constants()->cache();
2873   to_fill->_method = method;
2874   to_fill->_mdx = NULL;
2875   to_fill->_stack = stack;
2876 
2877   if (is_top_frame && JavaThread::current()->popframe_forcing_deopt_reexecution()) {
2878     to_fill->_msg = deopt_resume2;
2879   } else {
2880     to_fill->_msg = method_resume;
2881   }
2882   to_fill->_result._to_call._bcp_advance = 0;
2883   to_fill->_result._to_call._callee_entry_point = NULL; // doesn't matter to anyone
2884   to_fill->_result._to_call._callee = NULL; // doesn't matter to anyone
2885   to_fill->_prev_link = NULL;
2886 
2887   if (caller->is_interpreted_frame()) {
2888     interpreterState prev  = caller->get_interpreterState();
2889 
2890     // Support MH calls. Make sure the interpreter will return the right address:
2891     // 1. Caller did ordinary interpreted->compiled call call: Set a prev_state
2892     //    which makes the CPP interpreter return to frame manager "return_from_interpreted_method"
2893     //    entry after finishing execution.
2894     // 2. Caller did a MH call: If the caller has a MethodHandleInvoke in it's
2895     //    state (invariant: must be the caller of the bottom vframe) we used the
2896     //    "call_special" entry to do the call, meaning the arguments have not been
2897     //    popped from the stack. Therefore, don't enter a prev state in this case
2898     //    in order to return to "return_from_native" frame manager entry which takes
2899     //    care of popping arguments. Also, don't overwrite the MH.invoke Method in
2900     //    the prev_state in order to be able to figure out the number of arguments to
2901     //     pop.
2902     // The parameter method can represent MethodHandle.invokeExact(...).
2903     // The MethodHandleCompiler generates these synthetic Methods,
2904     // including bytecodes, if an invokedynamic call gets inlined. In
2905     // this case we want to return like from any other interpreted
2906     // Java call, so we set _prev_link.
2907     to_fill->_prev_link = prev;
2908 
2909     if (*prev->_bcp == Bytecodes::_invokeinterface || *prev->_bcp == Bytecodes::_invokedynamic) {
2910       prev->_result._to_call._bcp_advance = 5;
2911     } else {
2912       prev->_result._to_call._bcp_advance = 3;
2913     }
2914   }
2915   to_fill->_oop_temp = NULL;
2916   to_fill->_stack_base = stack_base;
2917   // Need +1 here because stack_base points to the word just above the
2918   // first expr stack entry and stack_limit is supposed to point to
2919   // the word just below the last expr stack entry. See
2920   // generate_compute_interpreter_state.
2921   to_fill->_stack_limit = stack_base - (method->max_stack() + 1);
2922   to_fill->_monitor_base = (BasicObjectLock*) monitor_base;
2923 
2924   to_fill->_frame_bottom = frame_sp;
2925 
2926   // PPC64 specific
2927   to_fill->_last_Java_pc = NULL;
2928   to_fill->_last_Java_fp = NULL;
2929   to_fill->_last_Java_sp = frame_sp;
2930 #ifdef ASSERT
2931   to_fill->_self_link = to_fill;
2932   to_fill->_native_fresult = 123456.789;
2933   to_fill->_native_lresult = CONST64(0xdeafcafedeadc0de);
2934 #endif
2935 }
2936 
2937 void BytecodeInterpreter::pd_layout_interpreterState(interpreterState istate,
2938                                                      address last_Java_pc,
2939                                                      intptr_t* last_Java_fp) {
2940   istate->_last_Java_pc = last_Java_pc;
2941   istate->_last_Java_fp = last_Java_fp;
2942 }
2943 
2944 template<class M> static void frame_size_helper(M* method,
2945                                                 int monitors,
2946                                                 int& monitor_size,
2947                                                 int& top_frame_size) {
2948   monitor_size = frame::interpreter_frame_monitor_size_in_bytes() * monitors;
2949   top_frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
2950                                 + monitor_size
2951                                 + (method->max_stack() *Interpreter::stackElementWords * BytesPerWord)
2952                                 + 2*BytesPerWord,
2953                                 frame::alignment_in_bytes)
2954                       + frame::top_ijava_frame_abi_size;
2955 }
2956 
2957 template<class M> int AbstractInterpreter::size_activation(M* method,
2958                                                            int temps,
2959                                                            int popframe_args,
2960                                                            int monitors,
2961                                                            int callee_params,
2962                                                            int callee_locals,
2963                                                            bool is_top_frame) {
2964   int monitor_size = 0;
2965   int top_frame_size = 0;
2966   frame_size_helper<M>(method, monitors, monitor_size, top_frame_size);
2967 
2968   int frame_size;
2969   if (is_top_frame) {
2970     frame_size = top_frame_size;
2971   } else {
2972     frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
2973                           + monitor_size
2974                           + ((temps - callee_params + callee_locals) *
2975                              Interpreter::stackElementWords * BytesPerWord)
2976                           + 2*BytesPerWord,
2977                           frame::alignment_in_bytes)
2978                  + frame::parent_ijava_frame_abi_size;
2979     assert(popframe_args==0, "non-zero for top_frame only");
2980   }
2981 
2982   return frame_size/BytesPerWord;
2983 }
2984 
2985 template int AbstractInterpreter::size_activation<Method>(Method* method,
2986                                                           int temps,
2987                                                           int popframe_args,
2988                                                           int monitors,
2989                                                           int callee_params,
2990                                                           int callee_locals,
2991                                                           bool is_top_frame);
2992 
2993 template int AbstractInterpreter::size_activation<ciMethod>(ciMethod* method,
2994                                                             int temps,
2995                                                             int popframe_args,
2996                                                             int monitors,
2997                                                             int callee_params,
2998                                                             int callee_locals,
2999                                                             bool is_top_frame);
3000 
3001 void AbstractInterpreter::layout_activation(Method* method,
3002                                             int temps,        // Number of slots on java expression stack in use.
3003                                             int popframe_args,
3004                                             int monitors,     // Number of active monitors.
3005                                             int caller_actual_parameters,
3006                                             int callee_params,// Number of slots for callee parameters.
3007                                             int callee_locals,// Number of slots for locals.
3008                                             frame* caller,
3009                                             frame* interpreter_frame,
3010                                             bool is_top_frame,
3011                                             bool is_bottom_frame) {
3012 
3013   // NOTE this code must exactly mimic what
3014   // InterpreterGenerator::generate_compute_interpreter_state() does
3015   // as far as allocating an interpreter frame. However there is an
3016   // exception. With the C++ based interpreter only the top most frame
3017   // has a full sized expression stack.  The 16 byte slop factor is
3018   // both the abi scratch area and a place to hold a result from a
3019   // callee on its way to the callers stack.
3020 
3021   int monitor_size = 0;
3022   int top_frame_size = 0;
3023   frame_size_helper<Method>(method, monitors, monitor_size, top_frame_size);
3024 
3025   intptr_t sp = (intptr_t)interpreter_frame->sp();
3026   intptr_t fp = *(intptr_t *)sp;
3027   assert(fp == (intptr_t)caller->sp(), "fp must match");
3028   interpreterState cur_state =
3029     (interpreterState)(fp - frame::interpreter_frame_cinterpreterstate_size_in_bytes());
3030 
3031   // Now fill in the interpreterState object.
3032 
3033   intptr_t* locals;
3034   if (caller->is_interpreted_frame()) {
3035     // Locals must agree with the caller because it will be used to set the
3036     // caller's tos when we return.
3037     interpreterState prev  = caller->get_interpreterState();
3038     // Calculate start of "locals" for MH calls.  For MH calls, the
3039     // current method() (= MH target) and prev->callee() (=
3040     // MH.invoke*()) are different and especially have different
3041     // signatures. To pop the argumentsof the caller, we must use
3042     // the prev->callee()->size_of_arguments() because that's what
3043     // the caller actually pushed.  Currently, for synthetic MH
3044     // calls (deoptimized from inlined MH calls), detected by
3045     // is_method_handle_invoke(), we use the callee's arguments
3046     // because here, the caller's and callee's signature match.
3047     if (true /*!caller->is_at_mh_callsite()*/) {
3048       locals = prev->stack() + method->size_of_parameters();
3049     } else {
3050       // Normal MH call.
3051       locals = prev->stack() + prev->callee()->size_of_parameters();
3052     }
3053   } else {
3054     bool is_deopted;
3055     locals = (intptr_t*) (fp + ((method->max_locals() - 1) * BytesPerWord) +
3056                           frame::parent_ijava_frame_abi_size);
3057   }
3058 
3059   intptr_t* monitor_base = (intptr_t*) cur_state;
3060   intptr_t* stack_base   = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
3061 
3062   // Provide pop_frame capability on PPC64, add popframe_args.
3063   // +1 because stack is always prepushed.
3064   intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (temps + popframe_args + 1) * BytesPerWord);
3065 
3066   BytecodeInterpreter::layout_interpreterState(cur_state,
3067                                                caller,
3068                                                interpreter_frame,
3069                                                method,
3070                                                locals,
3071                                                stack,
3072                                                stack_base,
3073                                                monitor_base,
3074                                                (intptr_t*)(((intptr_t)fp)-top_frame_size),
3075                                                is_top_frame);
3076 
3077   BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address,
3078                                                   interpreter_frame->fp());
3079 }
3080 
3081 #endif // CC_INTERP