1 /*
   2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.hpp"
  27 #include "interpreter/bytecodeHistogram.hpp"
  28 #include "interpreter/interpreter.hpp"
  29 #include "interpreter/interpreterGenerator.hpp"
  30 #include "interpreter/interpreterRuntime.hpp"
  31 #include "interpreter/templateTable.hpp"
  32 #include "oops/arrayOop.hpp"
  33 #include "oops/methodDataOop.hpp"
  34 #include "oops/methodOop.hpp"
  35 #include "oops/oop.inline.hpp"
  36 #include "prims/jvmtiExport.hpp"
  37 #include "prims/jvmtiThreadState.hpp"
  38 #include "runtime/arguments.hpp"
  39 #include "runtime/deoptimization.hpp"
  40 #include "runtime/frame.inline.hpp"
  41 #include "runtime/sharedRuntime.hpp"
  42 #include "runtime/stubRoutines.hpp"
  43 #include "runtime/synchronizer.hpp"
  44 #include "runtime/timer.hpp"
  45 #include "runtime/vframeArray.hpp"
  46 #include "utilities/debug.hpp"
  47 
  48 #ifndef CC_INTERP
  49 #ifndef FAST_DISPATCH
  50 #define FAST_DISPATCH 1
  51 #endif
  52 #undef FAST_DISPATCH
  53 
  54 
  55 // Generation of Interpreter
  56 //
  57 // The InterpreterGenerator generates the interpreter into Interpreter::_code.
  58 
  59 
  60 #define __ _masm->
  61 
  62 
  63 //----------------------------------------------------------------------------------------------------
  64 
  65 
  66 void InterpreterGenerator::save_native_result(void) {
  67   // result potentially in O0/O1: save it across calls
  68   const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
  69 
  70   // result potentially in F0/F1: save it across calls
  71   const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
  72 
  73   // save and restore any potential method result value around the unlocking operation
  74   __ stf(FloatRegisterImpl::D, F0, d_tmp);
  75 #ifdef _LP64
  76   __ stx(O0, l_tmp);
  77 #else
  78   __ std(O0, l_tmp);
  79 #endif
  80 }
  81 
  82 void InterpreterGenerator::restore_native_result(void) {
  83   const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
  84   const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
  85 
  86   // Restore any method result value
  87   __ ldf(FloatRegisterImpl::D, d_tmp, F0);
  88 #ifdef _LP64
  89   __ ldx(l_tmp, O0);
  90 #else
  91   __ ldd(l_tmp, O0);
  92 #endif
  93 }
  94 
  95 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
  96   assert(!pass_oop || message == NULL, "either oop or message but not both");
  97   address entry = __ pc();
  98   // expression stack must be empty before entering the VM if an exception happened
  99   __ empty_expression_stack();
 100   // load exception object
 101   __ set((intptr_t)name, G3_scratch);
 102   if (pass_oop) {
 103     __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i);
 104   } else {
 105     __ set((intptr_t)message, G4_scratch);
 106     __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch);
 107   }
 108   // throw exception
 109   assert(Interpreter::throw_exception_entry() != NULL, "generate it first");
 110   AddressLiteral thrower(Interpreter::throw_exception_entry());
 111   __ jump_to(thrower, G3_scratch);
 112   __ delayed()->nop();
 113   return entry;
 114 }
 115 
 116 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
 117   address entry = __ pc();
 118   // expression stack must be empty before entering the VM if an exception
 119   // happened
 120   __ empty_expression_stack();
 121   // load exception object
 122   __ call_VM(Oexception,
 123              CAST_FROM_FN_PTR(address,
 124                               InterpreterRuntime::throw_ClassCastException),
 125              Otos_i);
 126   __ should_not_reach_here();
 127   return entry;
 128 }
 129 
 130 
 131 // Arguments are: required type in G5_method_type, and
 132 // failing object (or NULL) in G3_method_handle.
 133 address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
 134   address entry = __ pc();
 135   // expression stack must be empty before entering the VM if an exception
 136   // happened
 137   __ empty_expression_stack();
 138   // load exception object
 139   __ call_VM(Oexception,
 140              CAST_FROM_FN_PTR(address,
 141                               InterpreterRuntime::throw_WrongMethodTypeException),
 142              G5_method_type,    // required
 143              G3_method_handle); // actual
 144   __ should_not_reach_here();
 145   return entry;
 146 }
 147 
 148 
 149 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
 150   address entry = __ pc();
 151   // expression stack must be empty before entering the VM if an exception happened
 152   __ empty_expression_stack();
 153   // convention: expect aberrant index in register G3_scratch, then shuffle the
 154   // index to G4_scratch for the VM call
 155   __ mov(G3_scratch, G4_scratch);
 156   __ set((intptr_t)name, G3_scratch);
 157   __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch);
 158   __ should_not_reach_here();
 159   return entry;
 160 }
 161 
 162 
 163 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
 164   address entry = __ pc();
 165   // expression stack must be empty before entering the VM if an exception happened
 166   __ empty_expression_stack();
 167   __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
 168   __ should_not_reach_here();
 169   return entry;
 170 }
 171 
 172 
 173 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
 174   TosState incoming_state = state;
 175 
 176   Label cont;
 177   address compiled_entry = __ pc();
 178 
 179   address entry = __ pc();
 180 #if !defined(_LP64) && defined(COMPILER2)
 181   // All return values are where we want them, except for Longs.  C2 returns
 182   // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
 183   // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
 184   // build even if we are returning from interpreted we just do a little
 185   // stupid shuffing.
 186   // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
 187   // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
 188   // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
 189 
 190   if (incoming_state == ltos) {
 191     __ srl (G1,  0, O1);
 192     __ srlx(G1, 32, O0);
 193   }
 194 #endif // !_LP64 && COMPILER2
 195 
 196   __ bind(cont);
 197 
 198   // The callee returns with the stack possibly adjusted by adapter transition
 199   // We remove that possible adjustment here.
 200   // All interpreter local registers are untouched. Any result is passed back
 201   // in the O0/O1 or float registers. Before continuing, the arguments must be
 202   // popped from the java expression stack; i.e., Lesp must be adjusted.
 203 
 204   __ mov(Llast_SP, SP);   // Remove any adapter added stack space.
 205 
 206   Label L_got_cache, L_giant_index;
 207   const Register cache = G3_scratch;
 208   const Register size  = G1_scratch;
 209   if (EnableInvokeDynamic) {
 210     __ ldub(Address(Lbcp, 0), G1_scratch);  // Load current bytecode.
 211     __ cmp(G1_scratch, Bytecodes::_invokedynamic);
 212     __ br(Assembler::equal, false, Assembler::pn, L_giant_index);
 213     __ delayed()->nop();
 214   }
 215   __ get_cache_and_index_at_bcp(cache, G1_scratch, 1);
 216   __ bind(L_got_cache);
 217   __ ld_ptr(cache, constantPoolCacheOopDesc::base_offset() +
 218                    ConstantPoolCacheEntry::flags_offset(), size);
 219   __ and3(size, 0xFF, size);                   // argument size in words
 220   __ sll(size, Interpreter::logStackElementSize, size); // each argument size in bytes
 221   __ add(Lesp, size, Lesp);                    // pop arguments
 222   __ dispatch_next(state, step);
 223 
 224   // out of the main line of code...
 225   if (EnableInvokeDynamic) {
 226     __ bind(L_giant_index);
 227     __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4));
 228     __ ba(false, L_got_cache);
 229     __ delayed()->nop();
 230   }
 231 
 232   return entry;
 233 }
 234 
 235 
 236 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
 237   address entry = __ pc();
 238   __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
 239   { Label L;
 240     Address exception_addr(G2_thread, Thread::pending_exception_offset());
 241     __ ld_ptr(exception_addr, Gtemp);  // Load pending exception.
 242     __ tst(Gtemp);
 243     __ brx(Assembler::equal, false, Assembler::pt, L);
 244     __ delayed()->nop();
 245     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
 246     __ should_not_reach_here();
 247     __ bind(L);
 248   }
 249   __ dispatch_next(state, step);
 250   return entry;
 251 }
 252 
 253 // A result handler converts/unboxes a native call result into
 254 // a java interpreter/compiler result. The current frame is an
 255 // interpreter frame. The activation frame unwind code must be
 256 // consistent with that of TemplateTable::_return(...). In the
 257 // case of native methods, the caller's SP was not modified.
 258 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
 259   address entry = __ pc();
 260   Register Itos_i  = Otos_i ->after_save();
 261   Register Itos_l  = Otos_l ->after_save();
 262   Register Itos_l1 = Otos_l1->after_save();
 263   Register Itos_l2 = Otos_l2->after_save();
 264   switch (type) {
 265     case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
 266     case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i);   break; // cannot use and3, 0xFFFF too big as immediate value!
 267     case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i);   break;
 268     case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i);   break;
 269     case T_LONG   :
 270 #ifndef _LP64
 271                     __ mov(O1, Itos_l2);  // move other half of long
 272 #endif              // ifdef or no ifdef, fall through to the T_INT case
 273     case T_INT    : __ mov(O0, Itos_i);                         break;
 274     case T_VOID   : /* nothing to do */                         break;
 275     case T_FLOAT  : assert(F0 == Ftos_f, "fix this code" );     break;
 276     case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" );     break;
 277     case T_OBJECT :
 278       __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i);
 279       __ verify_oop(Itos_i);
 280       break;
 281     default       : ShouldNotReachHere();
 282   }
 283   __ ret();                           // return from interpreter activation
 284   __ delayed()->restore(I5_savedSP, G0, SP);  // remove interpreter frame
 285   NOT_PRODUCT(__ emit_long(0);)       // marker for disassembly
 286   return entry;
 287 }
 288 
 289 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
 290   address entry = __ pc();
 291   __ push(state);
 292   __ call_VM(noreg, runtime_entry);
 293   __ dispatch_via(vtos, Interpreter::normal_table(vtos));
 294   return entry;
 295 }
 296 
 297 
 298 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
 299   address entry = __ pc();
 300   __ dispatch_next(state);
 301   return entry;
 302 }
 303 
 304 //
 305 // Helpers for commoning out cases in the various type of method entries.
 306 //
 307 
 308 // increment invocation count & check for overflow
 309 //
 310 // Note: checking for negative value instead of overflow
 311 //       so we have a 'sticky' overflow test
 312 //
 313 // Lmethod: method
 314 // ??: invocation counter
 315 //
 316 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
 317   // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
 318   if (TieredCompilation) {
 319     const int increment = InvocationCounter::count_increment;
 320     const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
 321     Label no_mdo, done;
 322     if (ProfileInterpreter) {
 323       // If no method data exists, go to profile_continue.
 324       __ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch);
 325       __ br_null(G4_scratch, false, Assembler::pn, no_mdo);
 326       __ delayed()->nop();
 327       // Increment counter
 328       Address mdo_invocation_counter(G4_scratch,
 329                                      in_bytes(methodDataOopDesc::invocation_counter_offset()) +
 330                                      in_bytes(InvocationCounter::counter_offset()));
 331       __ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
 332                                  G3_scratch, Lscratch,
 333                                  Assembler::zero, overflow);
 334       __ ba(false, done);
 335       __ delayed()->nop();
 336     }
 337 
 338     // Increment counter in methodOop
 339     __ bind(no_mdo);
 340     Address invocation_counter(Lmethod,
 341                                in_bytes(methodOopDesc::invocation_counter_offset()) +
 342                                in_bytes(InvocationCounter::counter_offset()));
 343     __ increment_mask_and_jump(invocation_counter, increment, mask,
 344                                G3_scratch, Lscratch,
 345                                Assembler::zero, overflow);
 346     __ bind(done);
 347   } else {
 348     // Update standard invocation counters
 349     __ increment_invocation_counter(O0, G3_scratch);
 350     if (ProfileInterpreter) {  // %%% Merge this into methodDataOop
 351       Address interpreter_invocation_counter(Lmethod,in_bytes(methodOopDesc::interpreter_invocation_counter_offset()));
 352       __ ld(interpreter_invocation_counter, G3_scratch);
 353       __ inc(G3_scratch);
 354       __ st(G3_scratch, interpreter_invocation_counter);
 355     }
 356 
 357     if (ProfileInterpreter && profile_method != NULL) {
 358       // Test to see if we should create a method data oop
 359       AddressLiteral profile_limit((address)&InvocationCounter::InterpreterProfileLimit);
 360       __ load_contents(profile_limit, G3_scratch);
 361       __ cmp(O0, G3_scratch);
 362       __ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue);
 363       __ delayed()->nop();
 364 
 365       // if no method data exists, go to profile_method
 366       __ test_method_data_pointer(*profile_method);
 367     }
 368 
 369     AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
 370     __ load_contents(invocation_limit, G3_scratch);
 371     __ cmp(O0, G3_scratch);
 372     __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
 373     __ delayed()->nop();
 374   }
 375 
 376 }
 377 
 378 // Allocate monitor and lock method (asm interpreter)
 379 // ebx - methodOop
 380 //
 381 void InterpreterGenerator::lock_method(void) {
 382   __ ld(Lmethod, in_bytes(methodOopDesc::access_flags_offset()), O0);  // Load access flags.
 383 
 384 #ifdef ASSERT
 385  { Label ok;
 386    __ btst(JVM_ACC_SYNCHRONIZED, O0);
 387    __ br( Assembler::notZero, false, Assembler::pt, ok);
 388    __ delayed()->nop();
 389    __ stop("method doesn't need synchronization");
 390    __ bind(ok);
 391   }
 392 #endif // ASSERT
 393 
 394   // get synchronization object to O0
 395   { Label done;
 396     const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
 397     __ btst(JVM_ACC_STATIC, O0);
 398     __ br( Assembler::zero, true, Assembler::pt, done);
 399     __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
 400 
 401     __ ld_ptr( Lmethod, in_bytes(methodOopDesc::constants_offset()), O0);
 402     __ ld_ptr( O0, constantPoolOopDesc::pool_holder_offset_in_bytes(), O0);
 403 
 404     // lock the mirror, not the klassOop
 405     __ ld_ptr( O0, mirror_offset, O0);
 406 
 407 #ifdef ASSERT
 408     __ tst(O0);
 409     __ breakpoint_trap(Assembler::zero);
 410 #endif // ASSERT
 411 
 412     __ bind(done);
 413   }
 414 
 415   __ add_monitor_to_stack(true, noreg, noreg);  // allocate monitor elem
 416   __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes());   // store object
 417   // __ untested("lock_object from method entry");
 418   __ lock_object(Lmonitors, O0);
 419 }
 420 
 421 
 422 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size,
 423                                                          Register Rscratch,
 424                                                          Register Rscratch2) {
 425   const int page_size = os::vm_page_size();
 426   Address saved_exception_pc(G2_thread, JavaThread::saved_exception_pc_offset());
 427   Label after_frame_check;
 428 
 429   assert_different_registers(Rframe_size, Rscratch, Rscratch2);
 430 
 431   __ set( page_size,   Rscratch );
 432   __ cmp( Rframe_size, Rscratch );
 433 
 434   __ br( Assembler::lessEqual, false, Assembler::pt, after_frame_check );
 435   __ delayed()->nop();
 436 
 437   // get the stack base, and in debug, verify it is non-zero
 438   __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch );
 439 #ifdef ASSERT
 440   Label base_not_zero;
 441   __ cmp( Rscratch, G0 );
 442   __ brx( Assembler::notEqual, false, Assembler::pn, base_not_zero );
 443   __ delayed()->nop();
 444   __ stop("stack base is zero in generate_stack_overflow_check");
 445   __ bind(base_not_zero);
 446 #endif
 447 
 448   // get the stack size, and in debug, verify it is non-zero
 449   assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" );
 450   __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 );
 451 #ifdef ASSERT
 452   Label size_not_zero;
 453   __ cmp( Rscratch2, G0 );
 454   __ brx( Assembler::notEqual, false, Assembler::pn, size_not_zero );
 455   __ delayed()->nop();
 456   __ stop("stack size is zero in generate_stack_overflow_check");
 457   __ bind(size_not_zero);
 458 #endif
 459 
 460   // compute the beginning of the protected zone minus the requested frame size
 461   __ sub( Rscratch, Rscratch2,   Rscratch );
 462   __ set( (StackRedPages+StackYellowPages) * page_size, Rscratch2 );
 463   __ add( Rscratch, Rscratch2,   Rscratch );
 464 
 465   // Add in the size of the frame (which is the same as subtracting it from the
 466   // SP, which would take another register
 467   __ add( Rscratch, Rframe_size, Rscratch );
 468 
 469   // the frame is greater than one page in size, so check against
 470   // the bottom of the stack
 471   __ cmp( SP, Rscratch );
 472   __ brx( Assembler::greater, false, Assembler::pt, after_frame_check );
 473   __ delayed()->nop();
 474 
 475   // Save the return address as the exception pc
 476   __ st_ptr(O7, saved_exception_pc);
 477 
 478   // the stack will overflow, throw an exception
 479   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
 480 
 481   // if you get to here, then there is enough stack space
 482   __ bind( after_frame_check );
 483 }
 484 
 485 
 486 //
 487 // Generate a fixed interpreter frame. This is identical setup for interpreted
 488 // methods and for native methods hence the shared code.
 489 
 490 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 491   //
 492   //
 493   // The entry code sets up a new interpreter frame in 4 steps:
 494   //
 495   // 1) Increase caller's SP by for the extra local space needed:
 496   //    (check for overflow)
 497   //    Efficient implementation of xload/xstore bytecodes requires
 498   //    that arguments and non-argument locals are in a contigously
 499   //    addressable memory block => non-argument locals must be
 500   //    allocated in the caller's frame.
 501   //
 502   // 2) Create a new stack frame and register window:
 503   //    The new stack frame must provide space for the standard
 504   //    register save area, the maximum java expression stack size,
 505   //    the monitor slots (0 slots initially), and some frame local
 506   //    scratch locations.
 507   //
 508   // 3) The following interpreter activation registers must be setup:
 509   //    Lesp       : expression stack pointer
 510   //    Lbcp       : bytecode pointer
 511   //    Lmethod    : method
 512   //    Llocals    : locals pointer
 513   //    Lmonitors  : monitor pointer
 514   //    LcpoolCache: constant pool cache
 515   //
 516   // 4) Initialize the non-argument locals if necessary:
 517   //    Non-argument locals may need to be initialized to NULL
 518   //    for GC to work. If the oop-map information is accurate
 519   //    (in the absence of the JSR problem), no initialization
 520   //    is necessary.
 521   //
 522   // (gri - 2/25/2000)
 523 
 524 
 525   const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset());
 526   const Address size_of_locals    (G5_method, methodOopDesc::size_of_locals_offset());
 527   const Address max_stack         (G5_method, methodOopDesc::max_stack_offset());
 528   int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
 529 
 530   const int extra_space =
 531     rounded_vm_local_words +                   // frame local scratch space
 532     //6815692//methodOopDesc::extra_stack_words() +       // extra push slots for MH adapters
 533     frame::memory_parameter_word_sp_offset +   // register save area
 534     (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
 535 
 536   const Register Glocals_size = G3;
 537   const Register Otmp1 = O3;
 538   const Register Otmp2 = O4;
 539   // Lscratch can't be used as a temporary because the call_stub uses
 540   // it to assert that the stack frame was setup correctly.
 541 
 542   __ lduh( size_of_parameters, Glocals_size);
 543 
 544   // Gargs points to first local + BytesPerWord
 545   // Set the saved SP after the register window save
 546   //
 547   assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP);
 548   __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1);
 549   __ add(Gargs, Otmp1, Gargs);
 550 
 551   if (native_call) {
 552     __ calc_mem_param_words( Glocals_size, Gframe_size );
 553     __ add( Gframe_size,  extra_space, Gframe_size);
 554     __ round_to( Gframe_size, WordsPerLong );
 555     __ sll( Gframe_size, LogBytesPerWord, Gframe_size );
 556   } else {
 557 
 558     //
 559     // Compute number of locals in method apart from incoming parameters
 560     //
 561     __ lduh( size_of_locals, Otmp1 );
 562     __ sub( Otmp1, Glocals_size, Glocals_size );
 563     __ round_to( Glocals_size, WordsPerLong );
 564     __ sll( Glocals_size, Interpreter::logStackElementSize, Glocals_size );
 565 
 566     // see if the frame is greater than one page in size. If so,
 567     // then we need to verify there is enough stack space remaining
 568     // Frame_size = (max_stack + extra_space) * BytesPerWord;
 569     __ lduh( max_stack, Gframe_size );
 570     __ add( Gframe_size, extra_space, Gframe_size );
 571     __ round_to( Gframe_size, WordsPerLong );
 572     __ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size);
 573 
 574     // Add in java locals size for stack overflow check only
 575     __ add( Gframe_size, Glocals_size, Gframe_size );
 576 
 577     const Register Otmp2 = O4;
 578     assert_different_registers(Otmp1, Otmp2, O5_savedSP);
 579     generate_stack_overflow_check(Gframe_size, Otmp1, Otmp2);
 580 
 581     __ sub( Gframe_size, Glocals_size, Gframe_size);
 582 
 583     //
 584     // bump SP to accomodate the extra locals
 585     //
 586     __ sub( SP, Glocals_size, SP );
 587   }
 588 
 589   //
 590   // now set up a stack frame with the size computed above
 591   //
 592   __ neg( Gframe_size );
 593   __ save( SP, Gframe_size, SP );
 594 
 595   //
 596   // now set up all the local cache registers
 597   //
 598   // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note
 599   // that all present references to Lbyte_code initialize the register
 600   // immediately before use
 601   if (native_call) {
 602     __ mov(G0, Lbcp);
 603   } else {
 604     __ ld_ptr(G5_method, methodOopDesc::const_offset(), Lbcp);
 605     __ add(Lbcp, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
 606   }
 607   __ mov( G5_method, Lmethod);                 // set Lmethod
 608   __ get_constant_pool_cache( LcpoolCache );   // set LcpoolCache
 609   __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
 610 #ifdef _LP64
 611   __ add( Lmonitors, STACK_BIAS, Lmonitors );   // Account for 64 bit stack bias
 612 #endif
 613   __ sub(Lmonitors, BytesPerWord, Lesp);       // set Lesp
 614 
 615   // setup interpreter activation registers
 616   __ sub(Gargs, BytesPerWord, Llocals);        // set Llocals
 617 
 618   if (ProfileInterpreter) {
 619 #ifdef FAST_DISPATCH
 620     // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
 621     // they both use I2.
 622     assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
 623 #endif // FAST_DISPATCH
 624     __ set_method_data_pointer();
 625   }
 626 
 627 }
 628 
 629 // Empty method, generate a very fast return.
 630 
 631 address InterpreterGenerator::generate_empty_entry(void) {
 632 
 633   // A method that does nother but return...
 634 
 635   address entry = __ pc();
 636   Label slow_path;
 637 
 638   __ verify_oop(G5_method);
 639 
 640   // do nothing for empty methods (do not even increment invocation counter)
 641   if ( UseFastEmptyMethods) {
 642     // If we need a safepoint check, generate full interpreter entry.
 643     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
 644     __ set(sync_state, G3_scratch);
 645     __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
 646     __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
 647     __ delayed()->nop();
 648 
 649     // Code: _return
 650     __ retl();
 651     __ delayed()->mov(O5_savedSP, SP);
 652 
 653     __ bind(slow_path);
 654     (void) generate_normal_entry(false);
 655 
 656     return entry;
 657   }
 658   return NULL;
 659 }
 660 
 661 // Call an accessor method (assuming it is resolved, otherwise drop into
 662 // vanilla (slow path) entry
 663 
 664 // Generates code to elide accessor methods
 665 // Uses G3_scratch and G1_scratch as scratch
 666 address InterpreterGenerator::generate_accessor_entry(void) {
 667 
 668   // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
 669   // parameter size = 1
 670   // Note: We can only use this code if the getfield has been resolved
 671   //       and if we don't have a null-pointer exception => check for
 672   //       these conditions first and use slow path if necessary.
 673   address entry = __ pc();
 674   Label slow_path;
 675 
 676 
 677   // XXX: for compressed oops pointer loading and decoding doesn't fit in
 678   // delay slot and damages G1
 679   if ( UseFastAccessorMethods && !UseCompressedOops ) {
 680     // Check if we need to reach a safepoint and generate full interpreter
 681     // frame if so.
 682     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
 683     __ load_contents(sync_state, G3_scratch);
 684     __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
 685     __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
 686     __ delayed()->nop();
 687 
 688     // Check if local 0 != NULL
 689     __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
 690     __ tst(Otos_i);  // check if local 0 == NULL and go the slow path
 691     __ brx(Assembler::zero, false, Assembler::pn, slow_path);
 692     __ delayed()->nop();
 693 
 694 
 695     // read first instruction word and extract bytecode @ 1 and index @ 2
 696     // get first 4 bytes of the bytecodes (big endian!)
 697     __ ld_ptr(G5_method, methodOopDesc::const_offset(), G1_scratch);
 698     __ ld(G1_scratch, constMethodOopDesc::codes_offset(), G1_scratch);
 699 
 700     // move index @ 2 far left then to the right most two bytes.
 701     __ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
 702     __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
 703                       ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
 704 
 705     // get constant pool cache
 706     __ ld_ptr(G5_method, methodOopDesc::constants_offset(), G3_scratch);
 707     __ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch);
 708 
 709     // get specific constant pool cache entry
 710     __ add(G3_scratch, G1_scratch, G3_scratch);
 711 
 712     // Check the constant Pool cache entry to see if it has been resolved.
 713     // If not, need the slow path.
 714     ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
 715     __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch);
 716     __ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
 717     __ and3(G1_scratch, 0xFF, G1_scratch);
 718     __ cmp(G1_scratch, Bytecodes::_getfield);
 719     __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
 720     __ delayed()->nop();
 721 
 722     // Get the type and return field offset from the constant pool cache
 723     __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch);
 724     __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch);
 725 
 726     Label xreturn_path;
 727     // Need to differentiate between igetfield, agetfield, bgetfield etc.
 728     // because they are different sizes.
 729     // Get the type from the constant pool cache
 730     __ srl(G1_scratch, ConstantPoolCacheEntry::tosBits, G1_scratch);
 731     // Make sure we don't need to mask G1_scratch for tosBits after the above shift
 732     ConstantPoolCacheEntry::verify_tosBits();
 733     __ cmp(G1_scratch, atos );
 734     __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
 735     __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
 736     __ cmp(G1_scratch, itos);
 737     __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
 738     __ delayed()->ld(Otos_i, G3_scratch, Otos_i);
 739     __ cmp(G1_scratch, stos);
 740     __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
 741     __ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
 742     __ cmp(G1_scratch, ctos);
 743     __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
 744     __ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
 745 #ifdef ASSERT
 746     __ cmp(G1_scratch, btos);
 747     __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
 748     __ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
 749     __ should_not_reach_here();
 750 #endif
 751     __ ldsb(Otos_i, G3_scratch, Otos_i);
 752     __ bind(xreturn_path);
 753 
 754     // _ireturn/_areturn
 755     __ retl();                      // return from leaf routine
 756     __ delayed()->mov(O5_savedSP, SP);
 757 
 758     // Generate regular method entry
 759     __ bind(slow_path);
 760     (void) generate_normal_entry(false);
 761     return entry;
 762   }
 763   return NULL;
 764 }
 765 
 766 //
 767 // Interpreter stub for calling a native method. (asm interpreter)
 768 // This sets up a somewhat different looking stack for calling the native method
 769 // than the typical interpreter frame setup.
 770 //
 771 
 772 address InterpreterGenerator::generate_native_entry(bool synchronized) {
 773   address entry = __ pc();
 774 
 775   // the following temporary registers are used during frame creation
 776   const Register Gtmp1 = G3_scratch ;
 777   const Register Gtmp2 = G1_scratch;
 778   bool inc_counter  = UseCompiler || CountCompiledCalls;
 779 
 780   // make sure registers are different!
 781   assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
 782 
 783   const Address Laccess_flags(Lmethod, methodOopDesc::access_flags_offset());
 784 
 785   __ verify_oop(G5_method);
 786 
 787   const Register Glocals_size = G3;
 788   assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
 789 
 790   // make sure method is native & not abstract
 791   // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
 792 #ifdef ASSERT
 793   __ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1);
 794   {
 795     Label L;
 796     __ btst(JVM_ACC_NATIVE, Gtmp1);
 797     __ br(Assembler::notZero, false, Assembler::pt, L);
 798     __ delayed()->nop();
 799     __ stop("tried to execute non-native method as native");
 800     __ bind(L);
 801   }
 802   { Label L;
 803     __ btst(JVM_ACC_ABSTRACT, Gtmp1);
 804     __ br(Assembler::zero, false, Assembler::pt, L);
 805     __ delayed()->nop();
 806     __ stop("tried to execute abstract method as non-abstract");
 807     __ bind(L);
 808   }
 809 #endif // ASSERT
 810 
 811  // generate the code to allocate the interpreter stack frame
 812   generate_fixed_frame(true);
 813 
 814   //
 815   // No locals to initialize for native method
 816   //
 817 
 818   // this slot will be set later, we initialize it to null here just in
 819   // case we get a GC before the actual value is stored later
 820   __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS);
 821 
 822   const Address do_not_unlock_if_synchronized(G2_thread,
 823     JavaThread::do_not_unlock_if_synchronized_offset());
 824   // Since at this point in the method invocation the exception handler
 825   // would try to exit the monitor of synchronized methods which hasn't
 826   // been entered yet, we set the thread local variable
 827   // _do_not_unlock_if_synchronized to true. If any exception was thrown by
 828   // runtime, exception handling i.e. unlock_if_synchronized_method will
 829   // check this thread local flag.
 830   // This flag has two effects, one is to force an unwind in the topmost
 831   // interpreter frame and not perform an unlock while doing so.
 832 
 833   __ movbool(true, G3_scratch);
 834   __ stbool(G3_scratch, do_not_unlock_if_synchronized);
 835 
 836   // increment invocation counter and check for overflow
 837   //
 838   // Note: checking for negative value instead of overflow
 839   //       so we have a 'sticky' overflow test (may be of
 840   //       importance as soon as we have true MT/MP)
 841   Label invocation_counter_overflow;
 842   Label Lcontinue;
 843   if (inc_counter) {
 844     generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
 845 
 846   }
 847   __ bind(Lcontinue);
 848 
 849   bang_stack_shadow_pages(true);
 850 
 851   // reset the _do_not_unlock_if_synchronized flag
 852   __ stbool(G0, do_not_unlock_if_synchronized);
 853 
 854   // check for synchronized methods
 855   // Must happen AFTER invocation_counter check and stack overflow check,
 856   // so method is not locked if overflows.
 857 
 858   if (synchronized) {
 859     lock_method();
 860   } else {
 861 #ifdef ASSERT
 862     { Label ok;
 863       __ ld(Laccess_flags, O0);
 864       __ btst(JVM_ACC_SYNCHRONIZED, O0);
 865       __ br( Assembler::zero, false, Assembler::pt, ok);
 866       __ delayed()->nop();
 867       __ stop("method needs synchronization");
 868       __ bind(ok);
 869     }
 870 #endif // ASSERT
 871   }
 872 
 873 
 874   // start execution
 875   __ verify_thread();
 876 
 877   // JVMTI support
 878   __ notify_method_entry();
 879 
 880   // native call
 881 
 882   // (note that O0 is never an oop--at most it is a handle)
 883   // It is important not to smash any handles created by this call,
 884   // until any oop handle in O0 is dereferenced.
 885 
 886   // (note that the space for outgoing params is preallocated)
 887 
 888   // get signature handler
 889   { Label L;
 890     Address signature_handler(Lmethod, methodOopDesc::signature_handler_offset());
 891     __ ld_ptr(signature_handler, G3_scratch);
 892     __ tst(G3_scratch);
 893     __ brx(Assembler::notZero, false, Assembler::pt, L);
 894     __ delayed()->nop();
 895     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod);
 896     __ ld_ptr(signature_handler, G3_scratch);
 897     __ bind(L);
 898   }
 899 
 900   // Push a new frame so that the args will really be stored in
 901   // Copy a few locals across so the new frame has the variables
 902   // we need but these values will be dead at the jni call and
 903   // therefore not gc volatile like the values in the current
 904   // frame (Lmethod in particular)
 905 
 906   // Flush the method pointer to the register save area
 907   __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
 908   __ mov(Llocals, O1);
 909 
 910   // calculate where the mirror handle body is allocated in the interpreter frame:
 911   __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2);
 912 
 913   // Calculate current frame size
 914   __ sub(SP, FP, O3);         // Calculate negative of current frame size
 915   __ save(SP, O3, SP);        // Allocate an identical sized frame
 916 
 917   // Note I7 has leftover trash. Slow signature handler will fill it in
 918   // should we get there. Normal jni call will set reasonable last_Java_pc
 919   // below (and fix I7 so the stack trace doesn't have a meaningless frame
 920   // in it).
 921 
 922   // Load interpreter frame's Lmethod into same register here
 923 
 924   __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
 925 
 926   __ mov(I1, Llocals);
 927   __ mov(I2, Lscratch2);     // save the address of the mirror
 928 
 929 
 930   // ONLY Lmethod and Llocals are valid here!
 931 
 932   // call signature handler, It will move the arg properly since Llocals in current frame
 933   // matches that in outer frame
 934 
 935   __ callr(G3_scratch, 0);
 936   __ delayed()->nop();
 937 
 938   // Result handler is in Lscratch
 939 
 940   // Reload interpreter frame's Lmethod since slow signature handler may block
 941   __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
 942 
 943   { Label not_static;
 944 
 945     __ ld(Laccess_flags, O0);
 946     __ btst(JVM_ACC_STATIC, O0);
 947     __ br( Assembler::zero, false, Assembler::pt, not_static);
 948     // get native function entry point(O0 is a good temp until the very end)
 949     __ delayed()->ld_ptr(Lmethod, in_bytes(methodOopDesc::native_function_offset()), O0);
 950     // for static methods insert the mirror argument
 951     const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
 952 
 953     __ ld_ptr(Lmethod, methodOopDesc:: constants_offset(), O1);
 954     __ ld_ptr(O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1);
 955     __ ld_ptr(O1, mirror_offset, O1);
 956 #ifdef ASSERT
 957     if (!PrintSignatureHandlers)  // do not dirty the output with this
 958     { Label L;
 959       __ tst(O1);
 960       __ brx(Assembler::notZero, false, Assembler::pt, L);
 961       __ delayed()->nop();
 962       __ stop("mirror is missing");
 963       __ bind(L);
 964     }
 965 #endif // ASSERT
 966     __ st_ptr(O1, Lscratch2, 0);
 967     __ mov(Lscratch2, O1);
 968     __ bind(not_static);
 969   }
 970 
 971   // At this point, arguments have been copied off of stack into
 972   // their JNI positions, which are O1..O5 and SP[68..].
 973   // Oops are boxed in-place on the stack, with handles copied to arguments.
 974   // The result handler is in Lscratch.  O0 will shortly hold the JNIEnv*.
 975 
 976 #ifdef ASSERT
 977   { Label L;
 978     __ tst(O0);
 979     __ brx(Assembler::notZero, false, Assembler::pt, L);
 980     __ delayed()->nop();
 981     __ stop("native entry point is missing");
 982     __ bind(L);
 983   }
 984 #endif // ASSERT
 985 
 986   //
 987   // setup the frame anchor
 988   //
 989   // The scavenge function only needs to know that the PC of this frame is
 990   // in the interpreter method entry code, it doesn't need to know the exact
 991   // PC and hence we can use O7 which points to the return address from the
 992   // previous call in the code stream (signature handler function)
 993   //
 994   // The other trick is we set last_Java_sp to FP instead of the usual SP because
 995   // we have pushed the extra frame in order to protect the volatile register(s)
 996   // in that frame when we return from the jni call
 997   //
 998 
 999   __ set_last_Java_frame(FP, O7);
1000   __ mov(O7, I7);  // make dummy interpreter frame look like one above,
1001                    // not meaningless information that'll confuse me.
1002 
1003   // flush the windows now. We don't care about the current (protection) frame
1004   // only the outer frames
1005 
1006   __ flush_windows();
1007 
1008   // mark windows as flushed
1009   Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
1010   __ set(JavaFrameAnchor::flushed, G3_scratch);
1011   __ st(G3_scratch, flags);
1012 
1013   // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
1014 
1015   Address thread_state(G2_thread, JavaThread::thread_state_offset());
1016 #ifdef ASSERT
1017   { Label L;
1018     __ ld(thread_state, G3_scratch);
1019     __ cmp(G3_scratch, _thread_in_Java);
1020     __ br(Assembler::equal, false, Assembler::pt, L);
1021     __ delayed()->nop();
1022     __ stop("Wrong thread state in native stub");
1023     __ bind(L);
1024   }
1025 #endif // ASSERT
1026   __ set(_thread_in_native, G3_scratch);
1027   __ st(G3_scratch, thread_state);
1028 
1029   // Call the jni method, using the delay slot to set the JNIEnv* argument.
1030   __ save_thread(L7_thread_cache); // save Gthread
1031   __ callr(O0, 0);
1032   __ delayed()->
1033      add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0);
1034 
1035   // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD
1036 
1037   __ restore_thread(L7_thread_cache); // restore G2_thread
1038   __ reinit_heapbase();
1039 
1040   // must we block?
1041 
1042   // Block, if necessary, before resuming in _thread_in_Java state.
1043   // In order for GC to work, don't clear the last_Java_sp until after blocking.
1044   { Label no_block;
1045     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
1046 
1047     // Switch thread to "native transition" state before reading the synchronization state.
1048     // This additional state is necessary because reading and testing the synchronization
1049     // state is not atomic w.r.t. GC, as this scenario demonstrates:
1050     //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1051     //     VM thread changes sync state to synchronizing and suspends threads for GC.
1052     //     Thread A is resumed to finish this native method, but doesn't block here since it
1053     //     didn't see any synchronization is progress, and escapes.
1054     __ set(_thread_in_native_trans, G3_scratch);
1055     __ st(G3_scratch, thread_state);
1056     if(os::is_MP()) {
1057       if (UseMembar) {
1058         // Force this write out before the read below
1059         __ membar(Assembler::StoreLoad);
1060       } else {
1061         // Write serialization page so VM thread can do a pseudo remote membar.
1062         // We use the current thread pointer to calculate a thread specific
1063         // offset to write to within the page. This minimizes bus traffic
1064         // due to cache line collision.
1065         __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
1066       }
1067     }
1068     __ load_contents(sync_state, G3_scratch);
1069     __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
1070 
1071     Label L;
1072     __ br(Assembler::notEqual, false, Assembler::pn, L);
1073     __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
1074     __ cmp(G3_scratch, 0);
1075     __ br(Assembler::equal, false, Assembler::pt, no_block);
1076     __ delayed()->nop();
1077     __ bind(L);
1078 
1079     // Block.  Save any potential method result value before the operation and
1080     // use a leaf call to leave the last_Java_frame setup undisturbed.
1081     save_native_result();
1082     __ call_VM_leaf(L7_thread_cache,
1083                     CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
1084                     G2_thread);
1085 
1086     // Restore any method result value
1087     restore_native_result();
1088     __ bind(no_block);
1089   }
1090 
1091   // Clear the frame anchor now
1092 
1093   __ reset_last_Java_frame();
1094 
1095   // Move the result handler address
1096   __ mov(Lscratch, G3_scratch);
1097   // return possible result to the outer frame
1098 #ifndef __LP64
1099   __ mov(O0, I0);
1100   __ restore(O1, G0, O1);
1101 #else
1102   __ restore(O0, G0, O0);
1103 #endif /* __LP64 */
1104 
1105   // Move result handler to expected register
1106   __ mov(G3_scratch, Lscratch);
1107 
1108   // Back in normal (native) interpreter frame. State is thread_in_native_trans
1109   // switch to thread_in_Java.
1110 
1111   __ set(_thread_in_Java, G3_scratch);
1112   __ st(G3_scratch, thread_state);
1113 
1114   // reset handle block
1115   __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch);
1116   __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
1117 
1118   // If we have an oop result store it where it will be safe for any further gc
1119   // until we return now that we've released the handle it might be protected by
1120 
1121   {
1122     Label no_oop, store_result;
1123 
1124     __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
1125     __ cmp(G3_scratch, Lscratch);
1126     __ brx(Assembler::notEqual, false, Assembler::pt, no_oop);
1127     __ delayed()->nop();
1128     __ addcc(G0, O0, O0);
1129     __ brx(Assembler::notZero, true, Assembler::pt, store_result);     // if result is not NULL:
1130     __ delayed()->ld_ptr(O0, 0, O0);                                   // unbox it
1131     __ mov(G0, O0);
1132 
1133     __ bind(store_result);
1134     // Store it where gc will look for it and result handler expects it.
1135     __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS);
1136 
1137     __ bind(no_oop);
1138 
1139   }
1140 
1141 
1142   // handle exceptions (exception handling will handle unlocking!)
1143   { Label L;
1144     Address exception_addr(G2_thread, Thread::pending_exception_offset());
1145     __ ld_ptr(exception_addr, Gtemp);
1146     __ tst(Gtemp);
1147     __ brx(Assembler::equal, false, Assembler::pt, L);
1148     __ delayed()->nop();
1149     // Note: This could be handled more efficiently since we know that the native
1150     //       method doesn't have an exception handler. We could directly return
1151     //       to the exception handler for the caller.
1152     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
1153     __ should_not_reach_here();
1154     __ bind(L);
1155   }
1156 
1157   // JVMTI support (preserves thread register)
1158   __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
1159 
1160   if (synchronized) {
1161     // save and restore any potential method result value around the unlocking operation
1162     save_native_result();
1163 
1164     __ add( __ top_most_monitor(), O1);
1165     __ unlock_object(O1);
1166 
1167     restore_native_result();
1168   }
1169 
1170 #if defined(COMPILER2) && !defined(_LP64)
1171 
1172   // C2 expects long results in G1 we can't tell if we're returning to interpreted
1173   // or compiled so just be safe.
1174 
1175   __ sllx(O0, 32, G1);          // Shift bits into high G1
1176   __ srl (O1, 0, O1);           // Zero extend O1
1177   __ or3 (O1, G1, G1);          // OR 64 bits into G1
1178 
1179 #endif /* COMPILER2 && !_LP64 */
1180 
1181   // dispose of return address and remove activation
1182 #ifdef ASSERT
1183   {
1184     Label ok;
1185     __ cmp(I5_savedSP, FP);
1186     __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, ok);
1187     __ delayed()->nop();
1188     __ stop("bad I5_savedSP value");
1189     __ should_not_reach_here();
1190     __ bind(ok);
1191   }
1192 #endif
1193   if (TraceJumps) {
1194     // Move target to register that is recordable
1195     __ mov(Lscratch, G3_scratch);
1196     __ JMP(G3_scratch, 0);
1197   } else {
1198     __ jmp(Lscratch, 0);
1199   }
1200   __ delayed()->nop();
1201 
1202 
1203   if (inc_counter) {
1204     // handle invocation counter overflow
1205     __ bind(invocation_counter_overflow);
1206     generate_counter_overflow(Lcontinue);
1207   }
1208 
1209 
1210 
1211   return entry;
1212 }
1213 
1214 
1215 // Generic method entry to (asm) interpreter
1216 //------------------------------------------------------------------------------------------------------------------------
1217 //
1218 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1219   address entry = __ pc();
1220 
1221   bool inc_counter  = UseCompiler || CountCompiledCalls;
1222 
1223   // the following temporary registers are used during frame creation
1224   const Register Gtmp1 = G3_scratch ;
1225   const Register Gtmp2 = G1_scratch;
1226 
1227   // make sure registers are different!
1228   assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
1229 
1230   const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset());
1231   const Address size_of_locals    (G5_method, methodOopDesc::size_of_locals_offset());
1232   // Seems like G5_method is live at the point this is used. So we could make this look consistent
1233   // and use in the asserts.
1234   const Address access_flags      (Lmethod,   methodOopDesc::access_flags_offset());
1235 
1236   __ verify_oop(G5_method);
1237 
1238   const Register Glocals_size = G3;
1239   assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
1240 
1241   // make sure method is not native & not abstract
1242   // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
1243 #ifdef ASSERT
1244   __ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1);
1245   {
1246     Label L;
1247     __ btst(JVM_ACC_NATIVE, Gtmp1);
1248     __ br(Assembler::zero, false, Assembler::pt, L);
1249     __ delayed()->nop();
1250     __ stop("tried to execute native method as non-native");
1251     __ bind(L);
1252   }
1253   { Label L;
1254     __ btst(JVM_ACC_ABSTRACT, Gtmp1);
1255     __ br(Assembler::zero, false, Assembler::pt, L);
1256     __ delayed()->nop();
1257     __ stop("tried to execute abstract method as non-abstract");
1258     __ bind(L);
1259   }
1260 #endif // ASSERT
1261 
1262   // generate the code to allocate the interpreter stack frame
1263 
1264   generate_fixed_frame(false);
1265 
1266 #ifdef FAST_DISPATCH
1267   __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables);
1268                                           // set bytecode dispatch table base
1269 #endif
1270 
1271   //
1272   // Code to initialize the extra (i.e. non-parm) locals
1273   //
1274   Register init_value = noreg;    // will be G0 if we must clear locals
1275   // The way the code was setup before zerolocals was always true for vanilla java entries.
1276   // It could only be false for the specialized entries like accessor or empty which have
1277   // no extra locals so the testing was a waste of time and the extra locals were always
1278   // initialized. We removed this extra complication to already over complicated code.
1279 
1280   init_value = G0;
1281   Label clear_loop;
1282 
1283   // NOTE: If you change the frame layout, this code will need to
1284   // be updated!
1285   __ lduh( size_of_locals, O2 );
1286   __ lduh( size_of_parameters, O1 );
1287   __ sll( O2, Interpreter::logStackElementSize, O2);
1288   __ sll( O1, Interpreter::logStackElementSize, O1 );
1289   __ sub( Llocals, O2, O2 );
1290   __ sub( Llocals, O1, O1 );
1291 
1292   __ bind( clear_loop );
1293   __ inc( O2, wordSize );
1294 
1295   __ cmp( O2, O1 );
1296   __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
1297   __ delayed()->st_ptr( init_value, O2, 0 );
1298 
1299   const Address do_not_unlock_if_synchronized(G2_thread,
1300     JavaThread::do_not_unlock_if_synchronized_offset());
1301   // Since at this point in the method invocation the exception handler
1302   // would try to exit the monitor of synchronized methods which hasn't
1303   // been entered yet, we set the thread local variable
1304   // _do_not_unlock_if_synchronized to true. If any exception was thrown by
1305   // runtime, exception handling i.e. unlock_if_synchronized_method will
1306   // check this thread local flag.
1307   __ movbool(true, G3_scratch);
1308   __ stbool(G3_scratch, do_not_unlock_if_synchronized);
1309 
1310   // increment invocation counter and check for overflow
1311   //
1312   // Note: checking for negative value instead of overflow
1313   //       so we have a 'sticky' overflow test (may be of
1314   //       importance as soon as we have true MT/MP)
1315   Label invocation_counter_overflow;
1316   Label profile_method;
1317   Label profile_method_continue;
1318   Label Lcontinue;
1319   if (inc_counter) {
1320     generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
1321     if (ProfileInterpreter) {
1322       __ bind(profile_method_continue);
1323     }
1324   }
1325   __ bind(Lcontinue);
1326 
1327   bang_stack_shadow_pages(false);
1328 
1329   // reset the _do_not_unlock_if_synchronized flag
1330   __ stbool(G0, do_not_unlock_if_synchronized);
1331 
1332   // check for synchronized methods
1333   // Must happen AFTER invocation_counter check and stack overflow check,
1334   // so method is not locked if overflows.
1335 
1336   if (synchronized) {
1337     lock_method();
1338   } else {
1339 #ifdef ASSERT
1340     { Label ok;
1341       __ ld(access_flags, O0);
1342       __ btst(JVM_ACC_SYNCHRONIZED, O0);
1343       __ br( Assembler::zero, false, Assembler::pt, ok);
1344       __ delayed()->nop();
1345       __ stop("method needs synchronization");
1346       __ bind(ok);
1347     }
1348 #endif // ASSERT
1349   }
1350 
1351   // start execution
1352 
1353   __ verify_thread();
1354 
1355   // jvmti support
1356   __ notify_method_entry();
1357 
1358   // start executing instructions
1359   __ dispatch_next(vtos);
1360 
1361 
1362   if (inc_counter) {
1363     if (ProfileInterpreter) {
1364       // We have decided to profile this method in the interpreter
1365       __ bind(profile_method);
1366 
1367       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1368       __ set_method_data_pointer_for_bcp();
1369       __ ba(false, profile_method_continue);
1370       __ delayed()->nop();
1371     }
1372 
1373     // handle invocation counter overflow
1374     __ bind(invocation_counter_overflow);
1375     generate_counter_overflow(Lcontinue);
1376   }
1377 
1378 
1379   return entry;
1380 }
1381 
1382 
1383 //----------------------------------------------------------------------------------------------------
1384 // Entry points & stack frame layout
1385 //
1386 // Here we generate the various kind of entries into the interpreter.
1387 // The two main entry type are generic bytecode methods and native call method.
1388 // These both come in synchronized and non-synchronized versions but the
1389 // frame layout they create is very similar. The other method entry
1390 // types are really just special purpose entries that are really entry
1391 // and interpretation all in one. These are for trivial methods like
1392 // accessor, empty, or special math methods.
1393 //
1394 // When control flow reaches any of the entry types for the interpreter
1395 // the following holds ->
1396 //
1397 // C2 Calling Conventions:
1398 //
1399 // The entry code below assumes that the following registers are set
1400 // when coming in:
1401 //    G5_method: holds the methodOop of the method to call
1402 //    Lesp:    points to the TOS of the callers expression stack
1403 //             after having pushed all the parameters
1404 //
1405 // The entry code does the following to setup an interpreter frame
1406 //   pop parameters from the callers stack by adjusting Lesp
1407 //   set O0 to Lesp
1408 //   compute X = (max_locals - num_parameters)
1409 //   bump SP up by X to accomadate the extra locals
1410 //   compute X = max_expression_stack
1411 //               + vm_local_words
1412 //               + 16 words of register save area
1413 //   save frame doing a save sp, -X, sp growing towards lower addresses
1414 //   set Lbcp, Lmethod, LcpoolCache
1415 //   set Llocals to i0
1416 //   set Lmonitors to FP - rounded_vm_local_words
1417 //   set Lesp to Lmonitors - 4
1418 //
1419 //  The frame has now been setup to do the rest of the entry code
1420 
1421 // Try this optimization:  Most method entries could live in a
1422 // "one size fits all" stack frame without all the dynamic size
1423 // calculations.  It might be profitable to do all this calculation
1424 // statically and approximately for "small enough" methods.
1425 
1426 //-----------------------------------------------------------------------------------------------
1427 
1428 // C1 Calling conventions
1429 //
1430 // Upon method entry, the following registers are setup:
1431 //
1432 // g2 G2_thread: current thread
1433 // g5 G5_method: method to activate
1434 // g4 Gargs  : pointer to last argument
1435 //
1436 //
1437 // Stack:
1438 //
1439 // +---------------+ <--- sp
1440 // |               |
1441 // : reg save area :
1442 // |               |
1443 // +---------------+ <--- sp + 0x40
1444 // |               |
1445 // : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
1446 // |               |
1447 // +---------------+ <--- sp + 0x5c
1448 // |               |
1449 // :     free      :
1450 // |               |
1451 // +---------------+ <--- Gargs
1452 // |               |
1453 // :   arguments   :
1454 // |               |
1455 // +---------------+
1456 // |               |
1457 //
1458 //
1459 //
1460 // AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
1461 //
1462 // +---------------+ <--- sp
1463 // |               |
1464 // : reg save area :
1465 // |               |
1466 // +---------------+ <--- sp + 0x40
1467 // |               |
1468 // : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
1469 // |               |
1470 // +---------------+ <--- sp + 0x5c
1471 // |               |
1472 // :               :
1473 // |               | <--- Lesp
1474 // +---------------+ <--- Lmonitors (fp - 0x18)
1475 // |   VM locals   |
1476 // +---------------+ <--- fp
1477 // |               |
1478 // : reg save area :
1479 // |               |
1480 // +---------------+ <--- fp + 0x40
1481 // |               |
1482 // : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
1483 // |               |
1484 // +---------------+ <--- fp + 0x5c
1485 // |               |
1486 // :     free      :
1487 // |               |
1488 // +---------------+
1489 // |               |
1490 // : nonarg locals :
1491 // |               |
1492 // +---------------+
1493 // |               |
1494 // :   arguments   :
1495 // |               | <--- Llocals
1496 // +---------------+ <--- Gargs
1497 // |               |
1498 
1499 static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
1500 
1501   // Figure out the size of an interpreter frame (in words) given that we have a fully allocated
1502   // expression stack, the callee will have callee_extra_locals (so we can account for
1503   // frame extension) and monitor_size for monitors. Basically we need to calculate
1504   // this exactly like generate_fixed_frame/generate_compute_interpreter_state.
1505   //
1506   //
1507   // The big complicating thing here is that we must ensure that the stack stays properly
1508   // aligned. This would be even uglier if monitor size wasn't modulo what the stack
1509   // needs to be aligned for). We are given that the sp (fp) is already aligned by
1510   // the caller so we must ensure that it is properly aligned for our callee.
1511   //
1512   const int rounded_vm_local_words =
1513        round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
1514   // callee_locals and max_stack are counts, not the size in frame.
1515   const int locals_size =
1516        round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
1517   const int max_stack_words = max_stack * Interpreter::stackElementWords;
1518   return (round_to((max_stack_words
1519                    //6815692//+ methodOopDesc::extra_stack_words()
1520                    + rounded_vm_local_words
1521                    + frame::memory_parameter_word_sp_offset), WordsPerLong)
1522                    // already rounded
1523                    + locals_size + monitor_size);
1524 }
1525 
1526 // How much stack a method top interpreter activation needs in words.
1527 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
1528 
1529   // See call_stub code
1530   int call_stub_size  = round_to(7 + frame::memory_parameter_word_sp_offset,
1531                                  WordsPerLong);    // 7 + register save area
1532 
1533   // Save space for one monitor to get into the interpreted method in case
1534   // the method is synchronized
1535   int monitor_size    = method->is_synchronized() ?
1536                                 1*frame::interpreter_frame_monitor_size() : 0;
1537   return size_activation_helper(method->max_locals(), method->max_stack(),
1538                                  monitor_size) + call_stub_size;
1539 }
1540 
1541 int AbstractInterpreter::layout_activation(methodOop method,
1542                                            int tempcount,
1543                                            int popframe_extra_args,
1544                                            int moncount,
1545                                            int callee_param_count,
1546                                            int callee_local_count,
1547                                            frame* caller,
1548                                            frame* interpreter_frame,
1549                                            bool is_top_frame) {
1550   // Note: This calculation must exactly parallel the frame setup
1551   // in InterpreterGenerator::generate_fixed_frame.
1552   // If f!=NULL, set up the following variables:
1553   //   - Lmethod
1554   //   - Llocals
1555   //   - Lmonitors (to the indicated number of monitors)
1556   //   - Lesp (to the indicated number of temps)
1557   // The frame f (if not NULL) on entry is a description of the caller of the frame
1558   // we are about to layout. We are guaranteed that we will be able to fill in a
1559   // new interpreter frame as its callee (i.e. the stack space is allocated and
1560   // the amount was determined by an earlier call to this method with f == NULL).
1561   // On return f (if not NULL) while describe the interpreter frame we just layed out.
1562 
1563   int monitor_size           = moncount * frame::interpreter_frame_monitor_size();
1564   int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
1565 
1566   assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
1567   //
1568   // Note: if you look closely this appears to be doing something much different
1569   // than generate_fixed_frame. What is happening is this. On sparc we have to do
1570   // this dance with interpreter_sp_adjustment because the window save area would
1571   // appear just below the bottom (tos) of the caller's java expression stack. Because
1572   // the interpreter want to have the locals completely contiguous generate_fixed_frame
1573   // will adjust the caller's sp for the "extra locals" (max_locals - parameter_size).
1574   // Now in generate_fixed_frame the extension of the caller's sp happens in the callee.
1575   // In this code the opposite occurs the caller adjusts it's own stack base on the callee.
1576   // This is mostly ok but it does cause a problem when we get to the initial frame (the oldest)
1577   // because the oldest frame would have adjust its callers frame and yet that frame
1578   // already exists and isn't part of this array of frames we are unpacking. So at first
1579   // glance this would seem to mess up that frame. However Deoptimization::fetch_unroll_info_helper()
1580   // will after it calculates all of the frame's on_stack_size()'s will then figure out the
1581   // amount to adjust the caller of the initial (oldest) frame and the calculation will all
1582   // add up. It does seem like it simpler to account for the adjustment here (and remove the
1583   // callee... parameters here). However this would mean that this routine would have to take
1584   // the caller frame as input so we could adjust its sp (and set it's interpreter_sp_adjustment)
1585   // and run the calling loop in the reverse order. This would also would appear to mean making
1586   // this code aware of what the interactions are when that initial caller fram was an osr or
1587   // other adapter frame. deoptimization is complicated enough and  hard enough to debug that
1588   // there is no sense in messing working code.
1589   //
1590 
1591   int rounded_cls = round_to((callee_local_count - callee_param_count), WordsPerLong);
1592   assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align");
1593 
1594   int raw_frame_size = size_activation_helper(rounded_cls, method->max_stack(),
1595                                               monitor_size);
1596 
1597   if (interpreter_frame != NULL) {
1598     // The skeleton frame must already look like an interpreter frame
1599     // even if not fully filled out.
1600     assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame");
1601 
1602     intptr_t* fp = interpreter_frame->fp();
1603 
1604     JavaThread* thread = JavaThread::current();
1605     RegisterMap map(thread, false);
1606     // More verification that skeleton frame is properly walkable
1607     assert(fp == caller->sp(), "fp must match");
1608 
1609     intptr_t* montop     = fp - rounded_vm_local_words;
1610 
1611     // preallocate monitors (cf. __ add_monitor_to_stack)
1612     intptr_t* monitors = montop - monitor_size;
1613 
1614     // preallocate stack space
1615     intptr_t*  esp = monitors - 1 -
1616                      (tempcount * Interpreter::stackElementWords) -
1617                      popframe_extra_args;
1618 
1619     int local_words = method->max_locals() * Interpreter::stackElementWords;
1620     int parm_words  = method->size_of_parameters() * Interpreter::stackElementWords;
1621     NEEDS_CLEANUP;
1622     intptr_t* locals;
1623     if (caller->is_interpreted_frame()) {
1624       // Can force the locals area to end up properly overlapping the top of the expression stack.
1625       intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1;
1626       // Note that this computation means we replace size_of_parameters() values from the caller
1627       // interpreter frame's expression stack with our argument locals
1628       locals = Lesp_ptr + parm_words;
1629       int delta = local_words - parm_words;
1630       int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
1631       *interpreter_frame->register_addr(I5_savedSP)    = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
1632     } else {
1633       assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
1634       // Don't have Lesp available; lay out locals block in the caller
1635       // adjacent to the register window save area.
1636       //
1637       // Compiled frames do not allocate a varargs area which is why this if
1638       // statement is needed.
1639       //
1640       if (caller->is_compiled_frame()) {
1641         locals = fp + frame::register_save_words + local_words - 1;
1642       } else {
1643         locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
1644       }
1645       if (!caller->is_entry_frame()) {
1646         // Caller wants his own SP back
1647         int caller_frame_size = caller->cb()->frame_size();
1648         *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS;
1649       }
1650     }
1651     if (TraceDeoptimization) {
1652       if (caller->is_entry_frame()) {
1653         // make sure I5_savedSP and the entry frames notion of saved SP
1654         // agree.  This assertion duplicate a check in entry frame code
1655         // but catches the failure earlier.
1656         assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP),
1657                "would change callers SP");
1658       }
1659       if (caller->is_entry_frame()) {
1660         tty->print("entry ");
1661       }
1662       if (caller->is_compiled_frame()) {
1663         tty->print("compiled ");
1664         if (caller->is_deoptimized_frame()) {
1665           tty->print("(deopt) ");
1666         }
1667       }
1668       if (caller->is_interpreted_frame()) {
1669         tty->print("interpreted ");
1670       }
1671       tty->print_cr("caller fp=0x%x sp=0x%x", caller->fp(), caller->sp());
1672       tty->print_cr("save area = 0x%x, 0x%x", caller->sp(), caller->sp() + 16);
1673       tty->print_cr("save area = 0x%x, 0x%x", caller->fp(), caller->fp() + 16);
1674       tty->print_cr("interpreter fp=0x%x sp=0x%x", interpreter_frame->fp(), interpreter_frame->sp());
1675       tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->sp(), interpreter_frame->sp() + 16);
1676       tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->fp(), interpreter_frame->fp() + 16);
1677       tty->print_cr("Llocals = 0x%x", locals);
1678       tty->print_cr("Lesp = 0x%x", esp);
1679       tty->print_cr("Lmonitors = 0x%x", monitors);
1680     }
1681 
1682     if (method->max_locals() > 0) {
1683       assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area");
1684       assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area");
1685       assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
1686       assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
1687     }
1688 #ifdef _LP64
1689     assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
1690 #endif
1691 
1692     *interpreter_frame->register_addr(Lmethod)     = (intptr_t) method;
1693     *interpreter_frame->register_addr(Llocals)     = (intptr_t) locals;
1694     *interpreter_frame->register_addr(Lmonitors)   = (intptr_t) monitors;
1695     *interpreter_frame->register_addr(Lesp)        = (intptr_t) esp;
1696     // Llast_SP will be same as SP as there is no adapter space
1697     *interpreter_frame->register_addr(Llast_SP)    = (intptr_t) interpreter_frame->sp() - STACK_BIAS;
1698     *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache();
1699 #ifdef FAST_DISPATCH
1700     *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table();
1701 #endif
1702 
1703 
1704 #ifdef ASSERT
1705     BasicObjectLock* mp = (BasicObjectLock*)monitors;
1706 
1707     assert(interpreter_frame->interpreter_frame_method() == method, "method matches");
1708     assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match");
1709     assert(interpreter_frame->interpreter_frame_monitor_end()   == mp, "monitor_end matches");
1710     assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches");
1711     assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches");
1712 
1713     // check bounds
1714     intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1);
1715     intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words;
1716     assert(lo < monitors && montop <= hi, "monitors in bounds");
1717     assert(lo <= esp && esp < monitors, "esp in bounds");
1718 #endif // ASSERT
1719   }
1720 
1721   return raw_frame_size;
1722 }
1723 
1724 //----------------------------------------------------------------------------------------------------
1725 // Exceptions
1726 void TemplateInterpreterGenerator::generate_throw_exception() {
1727 
1728   // Entry point in previous activation (i.e., if the caller was interpreted)
1729   Interpreter::_rethrow_exception_entry = __ pc();
1730   // O0: exception
1731 
1732   // entry point for exceptions thrown within interpreter code
1733   Interpreter::_throw_exception_entry = __ pc();
1734   __ verify_thread();
1735   // expression stack is undefined here
1736   // O0: exception, i.e. Oexception
1737   // Lbcp: exception bcx
1738   __ verify_oop(Oexception);
1739 
1740 
1741   // expression stack must be empty before entering the VM in case of an exception
1742   __ empty_expression_stack();
1743   // find exception handler address and preserve exception oop
1744   // call C routine to find handler and jump to it
1745   __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception);
1746   __ push_ptr(O1); // push exception for exception handler bytecodes
1747 
1748   __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!)
1749   __ delayed()->nop();
1750 
1751 
1752   // if the exception is not handled in the current frame
1753   // the frame is removed and the exception is rethrown
1754   // (i.e. exception continuation is _rethrow_exception)
1755   //
1756   // Note: At this point the bci is still the bxi for the instruction which caused
1757   //       the exception and the expression stack is empty. Thus, for any VM calls
1758   //       at this point, GC will find a legal oop map (with empty expression stack).
1759 
1760   // in current activation
1761   // tos: exception
1762   // Lbcp: exception bcp
1763 
1764   //
1765   // JVMTI PopFrame support
1766   //
1767 
1768   Interpreter::_remove_activation_preserving_args_entry = __ pc();
1769   Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
1770   // Set the popframe_processing bit in popframe_condition indicating that we are
1771   // currently handling popframe, so that call_VMs that may happen later do not trigger new
1772   // popframe handling cycles.
1773 
1774   __ ld(popframe_condition_addr, G3_scratch);
1775   __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch);
1776   __ stw(G3_scratch, popframe_condition_addr);
1777 
1778   // Empty the expression stack, as in normal exception handling
1779   __ empty_expression_stack();
1780   __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
1781 
1782   {
1783     // Check to see whether we are returning to a deoptimized frame.
1784     // (The PopFrame call ensures that the caller of the popped frame is
1785     // either interpreted or compiled and deoptimizes it if compiled.)
1786     // In this case, we can't call dispatch_next() after the frame is
1787     // popped, but instead must save the incoming arguments and restore
1788     // them after deoptimization has occurred.
1789     //
1790     // Note that we don't compare the return PC against the
1791     // deoptimization blob's unpack entry because of the presence of
1792     // adapter frames in C2.
1793     Label caller_not_deoptimized;
1794     __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7);
1795     __ tst(O0);
1796     __ brx(Assembler::notEqual, false, Assembler::pt, caller_not_deoptimized);
1797     __ delayed()->nop();
1798 
1799     const Register Gtmp1 = G3_scratch;
1800     const Register Gtmp2 = G1_scratch;
1801 
1802     // Compute size of arguments for saving when returning to deoptimized caller
1803     __ lduh(Lmethod, in_bytes(methodOopDesc::size_of_parameters_offset()), Gtmp1);
1804     __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
1805     __ sub(Llocals, Gtmp1, Gtmp2);
1806     __ add(Gtmp2, wordSize, Gtmp2);
1807     // Save these arguments
1808     __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2);
1809     // Inform deoptimization that it is responsible for restoring these arguments
1810     __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1);
1811     Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
1812     __ st(Gtmp1, popframe_condition_addr);
1813 
1814     // Return from the current method
1815     // The caller's SP was adjusted upon method entry to accomodate
1816     // the callee's non-argument locals. Undo that adjustment.
1817     __ ret();
1818     __ delayed()->restore(I5_savedSP, G0, SP);
1819 
1820     __ bind(caller_not_deoptimized);
1821   }
1822 
1823   // Clear the popframe condition flag
1824   __ stw(G0 /* popframe_inactive */, popframe_condition_addr);
1825 
1826   // Get out of the current method (how this is done depends on the particular compiler calling
1827   // convention that the interpreter currently follows)
1828   // The caller's SP was adjusted upon method entry to accomodate
1829   // the callee's non-argument locals. Undo that adjustment.
1830   __ restore(I5_savedSP, G0, SP);
1831   // The method data pointer was incremented already during
1832   // call profiling. We have to restore the mdp for the current bcp.
1833   if (ProfileInterpreter) {
1834     __ set_method_data_pointer_for_bcp();
1835   }
1836   // Resume bytecode interpretation at the current bcp
1837   __ dispatch_next(vtos);
1838   // end of JVMTI PopFrame support
1839 
1840   Interpreter::_remove_activation_entry = __ pc();
1841 
1842   // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here)
1843   __ pop_ptr(Oexception);                                  // get exception
1844 
1845   // Intel has the following comment:
1846   //// remove the activation (without doing throws on illegalMonitorExceptions)
1847   // They remove the activation without checking for bad monitor state.
1848   // %%% We should make sure this is the right semantics before implementing.
1849 
1850   // %%% changed set_vm_result_2 to set_vm_result and get_vm_result_2 to get_vm_result. Is there a bug here?
1851   __ set_vm_result(Oexception);
1852   __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false);
1853 
1854   __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI);
1855 
1856   __ get_vm_result(Oexception);
1857   __ verify_oop(Oexception);
1858 
1859     const int return_reg_adjustment = frame::pc_return_offset;
1860   Address issuing_pc_addr(I7, return_reg_adjustment);
1861 
1862   // We are done with this activation frame; find out where to go next.
1863   // The continuation point will be an exception handler, which expects
1864   // the following registers set up:
1865   //
1866   // Oexception: exception
1867   // Oissuing_pc: the local call that threw exception
1868   // Other On: garbage
1869   // In/Ln:  the contents of the caller's register window
1870   //
1871   // We do the required restore at the last possible moment, because we
1872   // need to preserve some state across a runtime call.
1873   // (Remember that the caller activation is unknown--it might not be
1874   // interpreted, so things like Lscratch are useless in the caller.)
1875 
1876   // Although the Intel version uses call_C, we can use the more
1877   // compact call_VM.  (The only real difference on SPARC is a
1878   // harmlessly ignored [re]set_last_Java_frame, compared with
1879   // the Intel code which lacks this.)
1880   __ mov(Oexception,      Oexception ->after_save());  // get exception in I0 so it will be on O0 after restore
1881   __ add(issuing_pc_addr, Oissuing_pc->after_save());  // likewise set I1 to a value local to the caller
1882   __ super_call_VM_leaf(L7_thread_cache,
1883                         CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
1884                         G2_thread, Oissuing_pc->after_save());
1885 
1886   // The caller's SP was adjusted upon method entry to accomodate
1887   // the callee's non-argument locals. Undo that adjustment.
1888   __ JMP(O0, 0);                         // return exception handler in caller
1889   __ delayed()->restore(I5_savedSP, G0, SP);
1890 
1891   // (same old exception object is already in Oexception; see above)
1892   // Note that an "issuing PC" is actually the next PC after the call
1893 }
1894 
1895 
1896 //
1897 // JVMTI ForceEarlyReturn support
1898 //
1899 
1900 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1901   address entry = __ pc();
1902 
1903   __ empty_expression_stack();
1904   __ load_earlyret_value(state);
1905 
1906   __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch);
1907   Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset());
1908 
1909   // Clear the earlyret state
1910   __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr);
1911 
1912   __ remove_activation(state,
1913                        /* throw_monitor_exception */ false,
1914                        /* install_monitor_exception */ false);
1915 
1916   // The caller's SP was adjusted upon method entry to accomodate
1917   // the callee's non-argument locals. Undo that adjustment.
1918   __ ret();                             // return to caller
1919   __ delayed()->restore(I5_savedSP, G0, SP);
1920 
1921   return entry;
1922 } // end of JVMTI ForceEarlyReturn support
1923 
1924 
1925 //------------------------------------------------------------------------------------------------------------------------
1926 // Helper for vtos entry point generation
1927 
1928 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
1929   assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1930   Label L;
1931   aep = __ pc(); __ push_ptr(); __ ba(false, L); __ delayed()->nop();
1932   fep = __ pc(); __ push_f();   __ ba(false, L); __ delayed()->nop();
1933   dep = __ pc(); __ push_d();   __ ba(false, L); __ delayed()->nop();
1934   lep = __ pc(); __ push_l();   __ ba(false, L); __ delayed()->nop();
1935   iep = __ pc(); __ push_i();
1936   bep = cep = sep = iep;                        // there aren't any
1937   vep = __ pc(); __ bind(L);                    // fall through
1938   generate_and_dispatch(t);
1939 }
1940 
1941 // --------------------------------------------------------------------------------
1942 
1943 
1944 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
1945  : TemplateInterpreterGenerator(code) {
1946    generate_all(); // down here so it can be "virtual"
1947 }
1948 
1949 // --------------------------------------------------------------------------------
1950 
1951 // Non-product code
1952 #ifndef PRODUCT
1953 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1954   address entry = __ pc();
1955 
1956   __ push(state);
1957   __ mov(O7, Lscratch); // protect return address within interpreter
1958 
1959   // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer
1960   __ mov( Otos_l2, G3_scratch );
1961   __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch);
1962   __ mov(Lscratch, O7); // restore return address
1963   __ pop(state);
1964   __ retl();
1965   __ delayed()->nop();
1966 
1967   return entry;
1968 }
1969 
1970 
1971 // helpers for generate_and_dispatch
1972 
1973 void TemplateInterpreterGenerator::count_bytecode() {
1974   __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch);
1975 }
1976 
1977 
1978 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1979   __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch);
1980 }
1981 
1982 
1983 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1984   AddressLiteral index   (&BytecodePairHistogram::_index);
1985   AddressLiteral counters((address) &BytecodePairHistogram::_counters);
1986 
1987   // get index, shift out old bytecode, bring in new bytecode, and store it
1988   // _index = (_index >> log2_number_of_codes) |
1989   //          (bytecode << log2_number_of_codes);
1990 
1991   __ load_contents(index, G4_scratch);
1992   __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch );
1993   __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes,  G3_scratch );
1994   __ or3( G3_scratch,  G4_scratch, G4_scratch );
1995   __ store_contents(G4_scratch, index, G3_scratch);
1996 
1997   // bump bucket contents
1998   // _counters[_index] ++;
1999 
2000   __ set(counters, G3_scratch);                       // loads into G3_scratch
2001   __ sll( G4_scratch, LogBytesPerWord, G4_scratch );  // Index is word address
2002   __ add (G3_scratch, G4_scratch, G3_scratch);        // Add in index
2003   __ ld (G3_scratch, 0, G4_scratch);
2004   __ inc (G4_scratch);
2005   __ st (G4_scratch, 0, G3_scratch);
2006 }
2007 
2008 
2009 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
2010   // Call a little run-time stub to avoid blow-up for each bytecode.
2011   // The run-time runtime saves the right registers, depending on
2012   // the tosca in-state for the given template.
2013   address entry = Interpreter::trace_code(t->tos_in());
2014   guarantee(entry != NULL, "entry must have been generated");
2015   __ call(entry, relocInfo::none);
2016   __ delayed()->nop();
2017 }
2018 
2019 
2020 void TemplateInterpreterGenerator::stop_interpreter_at() {
2021   AddressLiteral counter(&BytecodeCounter::_counter_value);
2022   __ load_contents(counter, G3_scratch);
2023   AddressLiteral stop_at(&StopInterpreterAt);
2024   __ load_ptr_contents(stop_at, G4_scratch);
2025   __ cmp(G3_scratch, G4_scratch);
2026   __ breakpoint_trap(Assembler::equal);
2027 }
2028 #endif // not PRODUCT
2029 #endif // !CC_INTERP