1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_templateInterpreter_sparc.cpp.incl"
  27 
  28 #ifndef CC_INTERP
  29 #ifndef FAST_DISPATCH
  30 #define FAST_DISPATCH 1
  31 #endif
  32 #undef FAST_DISPATCH
  33 
  34 
  35 // Generation of Interpreter
  36 //
  37 // The InterpreterGenerator generates the interpreter into Interpreter::_code.
  38 
  39 
  40 #define __ _masm->
  41 
  42 
  43 //----------------------------------------------------------------------------------------------------
  44 
  45 
  46 void InterpreterGenerator::save_native_result(void) {
  47   // result potentially in O0/O1: save it across calls
  48   const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
  49 
  50   // result potentially in F0/F1: save it across calls
  51   const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
  52 
  53   // save and restore any potential method result value around the unlocking operation
  54   __ stf(FloatRegisterImpl::D, F0, d_tmp);
  55 #ifdef _LP64
  56   __ stx(O0, l_tmp);
  57 #else
  58   __ std(O0, l_tmp);
  59 #endif
  60 }
  61 
  62 void InterpreterGenerator::restore_native_result(void) {
  63   const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
  64   const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
  65 
  66   // Restore any method result value
  67   __ ldf(FloatRegisterImpl::D, d_tmp, F0);
  68 #ifdef _LP64
  69   __ ldx(l_tmp, O0);
  70 #else
  71   __ ldd(l_tmp, O0);
  72 #endif
  73 }
  74 
  75 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
  76   assert(!pass_oop || message == NULL, "either oop or message but not both");
  77   address entry = __ pc();
  78   // expression stack must be empty before entering the VM if an exception happened
  79   __ empty_expression_stack();
  80   // load exception object
  81   __ set((intptr_t)name, G3_scratch);
  82   if (pass_oop) {
  83     __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i);
  84   } else {
  85     __ set((intptr_t)message, G4_scratch);
  86     __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch);
  87   }
  88   // throw exception
  89   assert(Interpreter::throw_exception_entry() != NULL, "generate it first");
  90   AddressLiteral thrower(Interpreter::throw_exception_entry());
  91   __ jump_to(thrower, G3_scratch);
  92   __ delayed()->nop();
  93   return entry;
  94 }
  95 
  96 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
  97   address entry = __ pc();
  98   // expression stack must be empty before entering the VM if an exception
  99   // happened
 100   __ empty_expression_stack();
 101   // load exception object
 102   __ call_VM(Oexception,
 103              CAST_FROM_FN_PTR(address,
 104                               InterpreterRuntime::throw_ClassCastException),
 105              Otos_i);
 106   __ should_not_reach_here();
 107   return entry;
 108 }
 109 
 110 
 111 // Arguments are: required type in G5_method_type, and
 112 // failing object (or NULL) in G3_method_handle.
 113 address TemplateInterpreterGenerator::generate_WrongMethodType_handler() {
 114   address entry = __ pc();
 115   // expression stack must be empty before entering the VM if an exception
 116   // happened
 117   __ empty_expression_stack();
 118   // load exception object
 119   __ call_VM(Oexception,
 120              CAST_FROM_FN_PTR(address,
 121                               InterpreterRuntime::throw_WrongMethodTypeException),
 122              G5_method_type,    // required
 123              G3_method_handle); // actual
 124   __ should_not_reach_here();
 125   return entry;
 126 }
 127 
 128 
 129 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
 130   address entry = __ pc();
 131   // expression stack must be empty before entering the VM if an exception happened
 132   __ empty_expression_stack();
 133   // convention: expect aberrant index in register G3_scratch, then shuffle the
 134   // index to G4_scratch for the VM call
 135   __ mov(G3_scratch, G4_scratch);
 136   __ set((intptr_t)name, G3_scratch);
 137   __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch);
 138   __ should_not_reach_here();
 139   return entry;
 140 }
 141 
 142 
 143 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
 144   address entry = __ pc();
 145   // expression stack must be empty before entering the VM if an exception happened
 146   __ empty_expression_stack();
 147   __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
 148   __ should_not_reach_here();
 149   return entry;
 150 }
 151 
 152 
 153 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
 154   TosState incoming_state = state;
 155 
 156   Label cont;
 157   address compiled_entry = __ pc();
 158 
 159   address entry = __ pc();
 160 #if !defined(_LP64) && defined(COMPILER2)
 161   // All return values are where we want them, except for Longs.  C2 returns
 162   // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
 163   // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
 164   // build even if we are returning from interpreted we just do a little
 165   // stupid shuffing.
 166   // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
 167   // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
 168   // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
 169 
 170   if (incoming_state == ltos) {
 171     __ srl (G1,  0, O1);
 172     __ srlx(G1, 32, O0);
 173   }
 174 #endif // !_LP64 && COMPILER2
 175 
 176   __ bind(cont);
 177 
 178   // The callee returns with the stack possibly adjusted by adapter transition
 179   // We remove that possible adjustment here.
 180   // All interpreter local registers are untouched. Any result is passed back
 181   // in the O0/O1 or float registers. Before continuing, the arguments must be
 182   // popped from the java expression stack; i.e., Lesp must be adjusted.
 183 
 184   __ mov(Llast_SP, SP);   // Remove any adapter added stack space.
 185 
 186   Label L_got_cache, L_giant_index;
 187   const Register cache = G3_scratch;
 188   const Register size  = G1_scratch;
 189   if (EnableInvokeDynamic) {
 190     __ ldub(Address(Lbcp, 0), G1_scratch);  // Load current bytecode.
 191     __ cmp(G1_scratch, Bytecodes::_invokedynamic);
 192     __ br(Assembler::equal, false, Assembler::pn, L_giant_index);
 193     __ delayed()->nop();
 194   }
 195   __ get_cache_and_index_at_bcp(cache, G1_scratch, 1);
 196   __ bind(L_got_cache);
 197   __ ld_ptr(cache, constantPoolCacheOopDesc::base_offset() +
 198                    ConstantPoolCacheEntry::flags_offset(), size);
 199   __ and3(size, 0xFF, size);                   // argument size in words
 200   __ sll(size, Interpreter::logStackElementSize, size); // each argument size in bytes
 201   __ add(Lesp, size, Lesp);                    // pop arguments
 202   __ dispatch_next(state, step);
 203 
 204   // out of the main line of code...
 205   if (EnableInvokeDynamic) {
 206     __ bind(L_giant_index);
 207     __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4));
 208     __ ba(false, L_got_cache);
 209     __ delayed()->nop();
 210   }
 211 
 212   return entry;
 213 }
 214 
 215 
 216 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
 217   address entry = __ pc();
 218   __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
 219   { Label L;
 220     Address exception_addr(G2_thread, Thread::pending_exception_offset());
 221     __ ld_ptr(exception_addr, Gtemp);  // Load pending exception.
 222     __ tst(Gtemp);
 223     __ brx(Assembler::equal, false, Assembler::pt, L);
 224     __ delayed()->nop();
 225     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
 226     __ should_not_reach_here();
 227     __ bind(L);
 228   }
 229   __ dispatch_next(state, step);
 230   return entry;
 231 }
 232 
 233 // A result handler converts/unboxes a native call result into
 234 // a java interpreter/compiler result. The current frame is an
 235 // interpreter frame. The activation frame unwind code must be
 236 // consistent with that of TemplateTable::_return(...). In the
 237 // case of native methods, the caller's SP was not modified.
 238 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
 239   address entry = __ pc();
 240   Register Itos_i  = Otos_i ->after_save();
 241   Register Itos_l  = Otos_l ->after_save();
 242   Register Itos_l1 = Otos_l1->after_save();
 243   Register Itos_l2 = Otos_l2->after_save();
 244   switch (type) {
 245     case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
 246     case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i);   break; // cannot use and3, 0xFFFF too big as immediate value!
 247     case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i);   break;
 248     case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i);   break;
 249     case T_LONG   :
 250 #ifndef _LP64
 251                     __ mov(O1, Itos_l2);  // move other half of long
 252 #endif              // ifdef or no ifdef, fall through to the T_INT case
 253     case T_INT    : __ mov(O0, Itos_i);                         break;
 254     case T_VOID   : /* nothing to do */                         break;
 255     case T_FLOAT  : assert(F0 == Ftos_f, "fix this code" );     break;
 256     case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" );     break;
 257     case T_OBJECT :
 258       __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i);
 259       __ verify_oop(Itos_i);
 260       break;
 261     default       : ShouldNotReachHere();
 262   }
 263   __ ret();                           // return from interpreter activation
 264   __ delayed()->restore(I5_savedSP, G0, SP);  // remove interpreter frame
 265   NOT_PRODUCT(__ emit_long(0);)       // marker for disassembly
 266   return entry;
 267 }
 268 
 269 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
 270   address entry = __ pc();
 271   __ push(state);
 272   __ call_VM(noreg, runtime_entry);
 273   __ dispatch_via(vtos, Interpreter::normal_table(vtos));
 274   return entry;
 275 }
 276 
 277 
 278 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
 279   address entry = __ pc();
 280   __ dispatch_next(state);
 281   return entry;
 282 }
 283 
 284 //
 285 // Helpers for commoning out cases in the various type of method entries.
 286 //
 287 
 288 // increment invocation count & check for overflow
 289 //
 290 // Note: checking for negative value instead of overflow
 291 //       so we have a 'sticky' overflow test
 292 //
 293 // Lmethod: method
 294 // ??: invocation counter
 295 //
 296 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
 297   // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
 298   if (TieredCompilation) {
 299     const int increment = InvocationCounter::count_increment;
 300     const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
 301     Label no_mdo, done;
 302     if (ProfileInterpreter) {
 303       // If no method data exists, go to profile_continue.
 304       __ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch);
 305       __ br_null(G4_scratch, false, Assembler::pn, no_mdo);
 306       __ delayed()->nop();
 307       // Increment counter
 308       Address mdo_invocation_counter(G4_scratch,
 309                                      in_bytes(methodDataOopDesc::invocation_counter_offset()) +
 310                                      in_bytes(InvocationCounter::counter_offset()));
 311       __ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
 312                                  G3_scratch, Lscratch,
 313                                  Assembler::zero, overflow);
 314       __ ba(false, done);
 315       __ delayed()->nop();
 316     }
 317 
 318     // Increment counter in methodOop
 319     __ bind(no_mdo);
 320     Address invocation_counter(Lmethod,
 321                                in_bytes(methodOopDesc::invocation_counter_offset()) +
 322                                in_bytes(InvocationCounter::counter_offset()));
 323     __ increment_mask_and_jump(invocation_counter, increment, mask,
 324                                G3_scratch, Lscratch,
 325                                Assembler::zero, overflow);
 326     __ bind(done);
 327   } else {
 328     // Update standard invocation counters
 329     __ increment_invocation_counter(O0, G3_scratch);
 330     if (ProfileInterpreter) {  // %%% Merge this into methodDataOop
 331       Address interpreter_invocation_counter(Lmethod,in_bytes(methodOopDesc::interpreter_invocation_counter_offset()));
 332       __ ld(interpreter_invocation_counter, G3_scratch);
 333       __ inc(G3_scratch);
 334       __ st(G3_scratch, interpreter_invocation_counter);
 335     }
 336 
 337     if (ProfileInterpreter && profile_method != NULL) {
 338       // Test to see if we should create a method data oop
 339       AddressLiteral profile_limit((address)&InvocationCounter::InterpreterProfileLimit);
 340       __ load_contents(profile_limit, G3_scratch);
 341       __ cmp(O0, G3_scratch);
 342       __ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue);
 343       __ delayed()->nop();
 344 
 345       // if no method data exists, go to profile_method
 346       __ test_method_data_pointer(*profile_method);
 347     }
 348 
 349     AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
 350     __ load_contents(invocation_limit, G3_scratch);
 351     __ cmp(O0, G3_scratch);
 352     __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
 353     __ delayed()->nop();
 354   }
 355 
 356 }
 357 
 358 // Allocate monitor and lock method (asm interpreter)
 359 // ebx - methodOop
 360 //
 361 void InterpreterGenerator::lock_method(void) {
 362   __ ld(Lmethod, in_bytes(methodOopDesc::access_flags_offset()), O0);  // Load access flags.
 363 
 364 #ifdef ASSERT
 365  { Label ok;
 366    __ btst(JVM_ACC_SYNCHRONIZED, O0);
 367    __ br( Assembler::notZero, false, Assembler::pt, ok);
 368    __ delayed()->nop();
 369    __ stop("method doesn't need synchronization");
 370    __ bind(ok);
 371   }
 372 #endif // ASSERT
 373 
 374   // get synchronization object to O0
 375   { Label done;
 376     const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
 377     __ btst(JVM_ACC_STATIC, O0);
 378     __ br( Assembler::zero, true, Assembler::pt, done);
 379     __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
 380 
 381     __ ld_ptr( Lmethod, in_bytes(methodOopDesc::constants_offset()), O0);
 382     __ ld_ptr( O0, constantPoolOopDesc::pool_holder_offset_in_bytes(), O0);
 383 
 384     // lock the mirror, not the klassOop
 385     __ ld_ptr( O0, mirror_offset, O0);
 386 
 387 #ifdef ASSERT
 388     __ tst(O0);
 389     __ breakpoint_trap(Assembler::zero);
 390 #endif // ASSERT
 391 
 392     __ bind(done);
 393   }
 394 
 395   __ add_monitor_to_stack(true, noreg, noreg);  // allocate monitor elem
 396   __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes());   // store object
 397   // __ untested("lock_object from method entry");
 398   __ lock_object(Lmonitors, O0);
 399 }
 400 
 401 
 402 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size,
 403                                                          Register Rscratch,
 404                                                          Register Rscratch2) {
 405   const int page_size = os::vm_page_size();
 406   Address saved_exception_pc(G2_thread, JavaThread::saved_exception_pc_offset());
 407   Label after_frame_check;
 408 
 409   assert_different_registers(Rframe_size, Rscratch, Rscratch2);
 410 
 411   __ set( page_size,   Rscratch );
 412   __ cmp( Rframe_size, Rscratch );
 413 
 414   __ br( Assembler::lessEqual, false, Assembler::pt, after_frame_check );
 415   __ delayed()->nop();
 416 
 417   // get the stack base, and in debug, verify it is non-zero
 418   __ ld_ptr( G2_thread, Thread::stack_base_offset(), Rscratch );
 419 #ifdef ASSERT
 420   Label base_not_zero;
 421   __ cmp( Rscratch, G0 );
 422   __ brx( Assembler::notEqual, false, Assembler::pn, base_not_zero );
 423   __ delayed()->nop();
 424   __ stop("stack base is zero in generate_stack_overflow_check");
 425   __ bind(base_not_zero);
 426 #endif
 427 
 428   // get the stack size, and in debug, verify it is non-zero
 429   assert( sizeof(size_t) == sizeof(intptr_t), "wrong load size" );
 430   __ ld_ptr( G2_thread, Thread::stack_size_offset(), Rscratch2 );
 431 #ifdef ASSERT
 432   Label size_not_zero;
 433   __ cmp( Rscratch2, G0 );
 434   __ brx( Assembler::notEqual, false, Assembler::pn, size_not_zero );
 435   __ delayed()->nop();
 436   __ stop("stack size is zero in generate_stack_overflow_check");
 437   __ bind(size_not_zero);
 438 #endif
 439 
 440   // compute the beginning of the protected zone minus the requested frame size
 441   __ sub( Rscratch, Rscratch2,   Rscratch );
 442   __ set( (StackRedPages+StackYellowPages) * page_size, Rscratch2 );
 443   __ add( Rscratch, Rscratch2,   Rscratch );
 444 
 445   // Add in the size of the frame (which is the same as subtracting it from the
 446   // SP, which would take another register
 447   __ add( Rscratch, Rframe_size, Rscratch );
 448 
 449   // the frame is greater than one page in size, so check against
 450   // the bottom of the stack
 451   __ cmp( SP, Rscratch );
 452   __ brx( Assembler::greater, false, Assembler::pt, after_frame_check );
 453   __ delayed()->nop();
 454 
 455   // Save the return address as the exception pc
 456   __ st_ptr(O7, saved_exception_pc);
 457 
 458   // the stack will overflow, throw an exception
 459   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
 460 
 461   // if you get to here, then there is enough stack space
 462   __ bind( after_frame_check );
 463 }
 464 
 465 
 466 //
 467 // Generate a fixed interpreter frame. This is identical setup for interpreted
 468 // methods and for native methods hence the shared code.
 469 
 470 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 471   //
 472   //
 473   // The entry code sets up a new interpreter frame in 4 steps:
 474   //
 475   // 1) Increase caller's SP by for the extra local space needed:
 476   //    (check for overflow)
 477   //    Efficient implementation of xload/xstore bytecodes requires
 478   //    that arguments and non-argument locals are in a contigously
 479   //    addressable memory block => non-argument locals must be
 480   //    allocated in the caller's frame.
 481   //
 482   // 2) Create a new stack frame and register window:
 483   //    The new stack frame must provide space for the standard
 484   //    register save area, the maximum java expression stack size,
 485   //    the monitor slots (0 slots initially), and some frame local
 486   //    scratch locations.
 487   //
 488   // 3) The following interpreter activation registers must be setup:
 489   //    Lesp       : expression stack pointer
 490   //    Lbcp       : bytecode pointer
 491   //    Lmethod    : method
 492   //    Llocals    : locals pointer
 493   //    Lmonitors  : monitor pointer
 494   //    LcpoolCache: constant pool cache
 495   //
 496   // 4) Initialize the non-argument locals if necessary:
 497   //    Non-argument locals may need to be initialized to NULL
 498   //    for GC to work. If the oop-map information is accurate
 499   //    (in the absence of the JSR problem), no initialization
 500   //    is necessary.
 501   //
 502   // (gri - 2/25/2000)
 503 
 504 
 505   const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset());
 506   const Address size_of_locals    (G5_method, methodOopDesc::size_of_locals_offset());
 507   const Address max_stack         (G5_method, methodOopDesc::max_stack_offset());
 508   int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
 509 
 510   const int extra_space =
 511     rounded_vm_local_words +                   // frame local scratch space
 512     //6815692//methodOopDesc::extra_stack_words() +       // extra push slots for MH adapters
 513     frame::memory_parameter_word_sp_offset +   // register save area
 514     (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
 515 
 516   const Register Glocals_size = G3;
 517   const Register Otmp1 = O3;
 518   const Register Otmp2 = O4;
 519   // Lscratch can't be used as a temporary because the call_stub uses
 520   // it to assert that the stack frame was setup correctly.
 521 
 522   __ lduh( size_of_parameters, Glocals_size);
 523 
 524   // Gargs points to first local + BytesPerWord
 525   // Set the saved SP after the register window save
 526   //
 527   assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP);
 528   __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1);
 529   __ add(Gargs, Otmp1, Gargs);
 530 
 531   if (native_call) {
 532     __ calc_mem_param_words( Glocals_size, Gframe_size );
 533     __ add( Gframe_size,  extra_space, Gframe_size);
 534     __ round_to( Gframe_size, WordsPerLong );
 535     __ sll( Gframe_size, LogBytesPerWord, Gframe_size );
 536   } else {
 537 
 538     //
 539     // Compute number of locals in method apart from incoming parameters
 540     //
 541     __ lduh( size_of_locals, Otmp1 );
 542     __ sub( Otmp1, Glocals_size, Glocals_size );
 543     __ round_to( Glocals_size, WordsPerLong );
 544     __ sll( Glocals_size, Interpreter::logStackElementSize, Glocals_size );
 545 
 546     // see if the frame is greater than one page in size. If so,
 547     // then we need to verify there is enough stack space remaining
 548     // Frame_size = (max_stack + extra_space) * BytesPerWord;
 549     __ lduh( max_stack, Gframe_size );
 550     __ add( Gframe_size, extra_space, Gframe_size );
 551     __ round_to( Gframe_size, WordsPerLong );
 552     __ sll( Gframe_size, Interpreter::logStackElementSize, Gframe_size);
 553 
 554     // Add in java locals size for stack overflow check only
 555     __ add( Gframe_size, Glocals_size, Gframe_size );
 556 
 557     const Register Otmp2 = O4;
 558     assert_different_registers(Otmp1, Otmp2, O5_savedSP);
 559     generate_stack_overflow_check(Gframe_size, Otmp1, Otmp2);
 560 
 561     __ sub( Gframe_size, Glocals_size, Gframe_size);
 562 
 563     //
 564     // bump SP to accomodate the extra locals
 565     //
 566     __ sub( SP, Glocals_size, SP );
 567   }
 568 
 569   //
 570   // now set up a stack frame with the size computed above
 571   //
 572   __ neg( Gframe_size );
 573   __ save( SP, Gframe_size, SP );
 574 
 575   //
 576   // now set up all the local cache registers
 577   //
 578   // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note
 579   // that all present references to Lbyte_code initialize the register
 580   // immediately before use
 581   if (native_call) {
 582     __ mov(G0, Lbcp);
 583   } else {
 584     __ ld_ptr(G5_method, methodOopDesc::const_offset(), Lbcp);
 585     __ add(Lbcp, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
 586   }
 587   __ mov( G5_method, Lmethod);                 // set Lmethod
 588   __ get_constant_pool_cache( LcpoolCache );   // set LcpoolCache
 589   __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
 590 #ifdef _LP64
 591   __ add( Lmonitors, STACK_BIAS, Lmonitors );   // Account for 64 bit stack bias
 592 #endif
 593   __ sub(Lmonitors, BytesPerWord, Lesp);       // set Lesp
 594 
 595   // setup interpreter activation registers
 596   __ sub(Gargs, BytesPerWord, Llocals);        // set Llocals
 597 
 598   if (ProfileInterpreter) {
 599 #ifdef FAST_DISPATCH
 600     // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
 601     // they both use I2.
 602     assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
 603 #endif // FAST_DISPATCH
 604     __ set_method_data_pointer();
 605   }
 606 
 607 }
 608 
 609 // Empty method, generate a very fast return.
 610 
 611 address InterpreterGenerator::generate_empty_entry(void) {
 612 
 613   // A method that does nother but return...
 614 
 615   address entry = __ pc();
 616   Label slow_path;
 617 
 618   __ verify_oop(G5_method);
 619 
 620   // do nothing for empty methods (do not even increment invocation counter)
 621   if ( UseFastEmptyMethods) {
 622     // If we need a safepoint check, generate full interpreter entry.
 623     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
 624     __ set(sync_state, G3_scratch);
 625     __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
 626     __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
 627     __ delayed()->nop();
 628 
 629     // Code: _return
 630     __ retl();
 631     __ delayed()->mov(O5_savedSP, SP);
 632 
 633     __ bind(slow_path);
 634     (void) generate_normal_entry(false);
 635 
 636     return entry;
 637   }
 638   return NULL;
 639 }
 640 
 641 // Call an accessor method (assuming it is resolved, otherwise drop into
 642 // vanilla (slow path) entry
 643 
 644 // Generates code to elide accessor methods
 645 // Uses G3_scratch and G1_scratch as scratch
 646 address InterpreterGenerator::generate_accessor_entry(void) {
 647 
 648   // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
 649   // parameter size = 1
 650   // Note: We can only use this code if the getfield has been resolved
 651   //       and if we don't have a null-pointer exception => check for
 652   //       these conditions first and use slow path if necessary.
 653   address entry = __ pc();
 654   Label slow_path;
 655 
 656 
 657   // XXX: for compressed oops pointer loading and decoding doesn't fit in
 658   // delay slot and damages G1
 659   if ( UseFastAccessorMethods && !UseCompressedOops ) {
 660     // Check if we need to reach a safepoint and generate full interpreter
 661     // frame if so.
 662     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
 663     __ load_contents(sync_state, G3_scratch);
 664     __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
 665     __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
 666     __ delayed()->nop();
 667 
 668     // Check if local 0 != NULL
 669     __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
 670     __ tst(Otos_i);  // check if local 0 == NULL and go the slow path
 671     __ brx(Assembler::zero, false, Assembler::pn, slow_path);
 672     __ delayed()->nop();
 673 
 674 
 675     // read first instruction word and extract bytecode @ 1 and index @ 2
 676     // get first 4 bytes of the bytecodes (big endian!)
 677     __ ld_ptr(G5_method, methodOopDesc::const_offset(), G1_scratch);
 678     __ ld(G1_scratch, constMethodOopDesc::codes_offset(), G1_scratch);
 679 
 680     // move index @ 2 far left then to the right most two bytes.
 681     __ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
 682     __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
 683                       ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
 684 
 685     // get constant pool cache
 686     __ ld_ptr(G5_method, methodOopDesc::constants_offset(), G3_scratch);
 687     __ ld_ptr(G3_scratch, constantPoolOopDesc::cache_offset_in_bytes(), G3_scratch);
 688 
 689     // get specific constant pool cache entry
 690     __ add(G3_scratch, G1_scratch, G3_scratch);
 691 
 692     // Check the constant Pool cache entry to see if it has been resolved.
 693     // If not, need the slow path.
 694     ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
 695     __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch);
 696     __ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
 697     __ and3(G1_scratch, 0xFF, G1_scratch);
 698     __ cmp(G1_scratch, Bytecodes::_getfield);
 699     __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
 700     __ delayed()->nop();
 701 
 702     // Get the type and return field offset from the constant pool cache
 703     __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch);
 704     __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch);
 705 
 706     Label xreturn_path;
 707     // Need to differentiate between igetfield, agetfield, bgetfield etc.
 708     // because they are different sizes.
 709     // Get the type from the constant pool cache
 710     __ srl(G1_scratch, ConstantPoolCacheEntry::tosBits, G1_scratch);
 711     // Make sure we don't need to mask G1_scratch for tosBits after the above shift
 712     ConstantPoolCacheEntry::verify_tosBits();
 713     __ cmp(G1_scratch, atos );
 714     __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
 715     __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
 716     __ cmp(G1_scratch, itos);
 717     __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
 718     __ delayed()->ld(Otos_i, G3_scratch, Otos_i);
 719     __ cmp(G1_scratch, stos);
 720     __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
 721     __ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
 722     __ cmp(G1_scratch, ctos);
 723     __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
 724     __ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
 725 #ifdef ASSERT
 726     __ cmp(G1_scratch, btos);
 727     __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
 728     __ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
 729     __ should_not_reach_here();
 730 #endif
 731     __ ldsb(Otos_i, G3_scratch, Otos_i);
 732     __ bind(xreturn_path);
 733 
 734     // _ireturn/_areturn
 735     __ retl();                      // return from leaf routine
 736     __ delayed()->mov(O5_savedSP, SP);
 737 
 738     // Generate regular method entry
 739     __ bind(slow_path);
 740     (void) generate_normal_entry(false);
 741     return entry;
 742   }
 743   return NULL;
 744 }
 745 
 746 //
 747 // Interpreter stub for calling a native method. (asm interpreter)
 748 // This sets up a somewhat different looking stack for calling the native method
 749 // than the typical interpreter frame setup.
 750 //
 751 
 752 address InterpreterGenerator::generate_native_entry(bool synchronized) {
 753   address entry = __ pc();
 754 
 755   // the following temporary registers are used during frame creation
 756   const Register Gtmp1 = G3_scratch ;
 757   const Register Gtmp2 = G1_scratch;
 758   bool inc_counter  = UseCompiler || CountCompiledCalls;
 759 
 760   // make sure registers are different!
 761   assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
 762 
 763   const Address Laccess_flags(Lmethod, methodOopDesc::access_flags_offset());
 764 
 765   __ verify_oop(G5_method);
 766 
 767   const Register Glocals_size = G3;
 768   assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
 769 
 770   // make sure method is native & not abstract
 771   // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
 772 #ifdef ASSERT
 773   __ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1);
 774   {
 775     Label L;
 776     __ btst(JVM_ACC_NATIVE, Gtmp1);
 777     __ br(Assembler::notZero, false, Assembler::pt, L);
 778     __ delayed()->nop();
 779     __ stop("tried to execute non-native method as native");
 780     __ bind(L);
 781   }
 782   { Label L;
 783     __ btst(JVM_ACC_ABSTRACT, Gtmp1);
 784     __ br(Assembler::zero, false, Assembler::pt, L);
 785     __ delayed()->nop();
 786     __ stop("tried to execute abstract method as non-abstract");
 787     __ bind(L);
 788   }
 789 #endif // ASSERT
 790 
 791  // generate the code to allocate the interpreter stack frame
 792   generate_fixed_frame(true);
 793 
 794   //
 795   // No locals to initialize for native method
 796   //
 797 
 798   // this slot will be set later, we initialize it to null here just in
 799   // case we get a GC before the actual value is stored later
 800   __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS);
 801 
 802   const Address do_not_unlock_if_synchronized(G2_thread,
 803     JavaThread::do_not_unlock_if_synchronized_offset());
 804   // Since at this point in the method invocation the exception handler
 805   // would try to exit the monitor of synchronized methods which hasn't
 806   // been entered yet, we set the thread local variable
 807   // _do_not_unlock_if_synchronized to true. If any exception was thrown by
 808   // runtime, exception handling i.e. unlock_if_synchronized_method will
 809   // check this thread local flag.
 810   // This flag has two effects, one is to force an unwind in the topmost
 811   // interpreter frame and not perform an unlock while doing so.
 812 
 813   __ movbool(true, G3_scratch);
 814   __ stbool(G3_scratch, do_not_unlock_if_synchronized);
 815 
 816   // increment invocation counter and check for overflow
 817   //
 818   // Note: checking for negative value instead of overflow
 819   //       so we have a 'sticky' overflow test (may be of
 820   //       importance as soon as we have true MT/MP)
 821   Label invocation_counter_overflow;
 822   Label Lcontinue;
 823   if (inc_counter) {
 824     generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
 825 
 826   }
 827   __ bind(Lcontinue);
 828 
 829   bang_stack_shadow_pages(true);
 830 
 831   // reset the _do_not_unlock_if_synchronized flag
 832   __ stbool(G0, do_not_unlock_if_synchronized);
 833 
 834   // check for synchronized methods
 835   // Must happen AFTER invocation_counter check and stack overflow check,
 836   // so method is not locked if overflows.
 837 
 838   if (synchronized) {
 839     lock_method();
 840   } else {
 841 #ifdef ASSERT
 842     { Label ok;
 843       __ ld(Laccess_flags, O0);
 844       __ btst(JVM_ACC_SYNCHRONIZED, O0);
 845       __ br( Assembler::zero, false, Assembler::pt, ok);
 846       __ delayed()->nop();
 847       __ stop("method needs synchronization");
 848       __ bind(ok);
 849     }
 850 #endif // ASSERT
 851   }
 852 
 853 
 854   // start execution
 855   __ verify_thread();
 856 
 857   // JVMTI support
 858   __ notify_method_entry();
 859 
 860   // native call
 861 
 862   // (note that O0 is never an oop--at most it is a handle)
 863   // It is important not to smash any handles created by this call,
 864   // until any oop handle in O0 is dereferenced.
 865 
 866   // (note that the space for outgoing params is preallocated)
 867 
 868   // get signature handler
 869   { Label L;
 870     Address signature_handler(Lmethod, methodOopDesc::signature_handler_offset());
 871     __ ld_ptr(signature_handler, G3_scratch);
 872     __ tst(G3_scratch);
 873     __ brx(Assembler::notZero, false, Assembler::pt, L);
 874     __ delayed()->nop();
 875     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod);
 876     __ ld_ptr(signature_handler, G3_scratch);
 877     __ bind(L);
 878   }
 879 
 880   // Push a new frame so that the args will really be stored in
 881   // Copy a few locals across so the new frame has the variables
 882   // we need but these values will be dead at the jni call and
 883   // therefore not gc volatile like the values in the current
 884   // frame (Lmethod in particular)
 885 
 886   // Flush the method pointer to the register save area
 887   __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
 888   __ mov(Llocals, O1);
 889 
 890   // calculate where the mirror handle body is allocated in the interpreter frame:
 891   __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2);
 892 
 893   // Calculate current frame size
 894   __ sub(SP, FP, O3);         // Calculate negative of current frame size
 895   __ save(SP, O3, SP);        // Allocate an identical sized frame
 896 
 897   // Note I7 has leftover trash. Slow signature handler will fill it in
 898   // should we get there. Normal jni call will set reasonable last_Java_pc
 899   // below (and fix I7 so the stack trace doesn't have a meaningless frame
 900   // in it).
 901 
 902   // Load interpreter frame's Lmethod into same register here
 903 
 904   __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
 905 
 906   __ mov(I1, Llocals);
 907   __ mov(I2, Lscratch2);     // save the address of the mirror
 908 
 909 
 910   // ONLY Lmethod and Llocals are valid here!
 911 
 912   // call signature handler, It will move the arg properly since Llocals in current frame
 913   // matches that in outer frame
 914 
 915   __ callr(G3_scratch, 0);
 916   __ delayed()->nop();
 917 
 918   // Result handler is in Lscratch
 919 
 920   // Reload interpreter frame's Lmethod since slow signature handler may block
 921   __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
 922 
 923   { Label not_static;
 924 
 925     __ ld(Laccess_flags, O0);
 926     __ btst(JVM_ACC_STATIC, O0);
 927     __ br( Assembler::zero, false, Assembler::pt, not_static);
 928     // get native function entry point(O0 is a good temp until the very end)
 929     __ delayed()->ld_ptr(Lmethod, in_bytes(methodOopDesc::native_function_offset()), O0);
 930     // for static methods insert the mirror argument
 931     const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
 932 
 933     __ ld_ptr(Lmethod, methodOopDesc:: constants_offset(), O1);
 934     __ ld_ptr(O1, constantPoolOopDesc::pool_holder_offset_in_bytes(), O1);
 935     __ ld_ptr(O1, mirror_offset, O1);
 936 #ifdef ASSERT
 937     if (!PrintSignatureHandlers)  // do not dirty the output with this
 938     { Label L;
 939       __ tst(O1);
 940       __ brx(Assembler::notZero, false, Assembler::pt, L);
 941       __ delayed()->nop();
 942       __ stop("mirror is missing");
 943       __ bind(L);
 944     }
 945 #endif // ASSERT
 946     __ st_ptr(O1, Lscratch2, 0);
 947     __ mov(Lscratch2, O1);
 948     __ bind(not_static);
 949   }
 950 
 951   // At this point, arguments have been copied off of stack into
 952   // their JNI positions, which are O1..O5 and SP[68..].
 953   // Oops are boxed in-place on the stack, with handles copied to arguments.
 954   // The result handler is in Lscratch.  O0 will shortly hold the JNIEnv*.
 955 
 956 #ifdef ASSERT
 957   { Label L;
 958     __ tst(O0);
 959     __ brx(Assembler::notZero, false, Assembler::pt, L);
 960     __ delayed()->nop();
 961     __ stop("native entry point is missing");
 962     __ bind(L);
 963   }
 964 #endif // ASSERT
 965 
 966   //
 967   // setup the frame anchor
 968   //
 969   // The scavenge function only needs to know that the PC of this frame is
 970   // in the interpreter method entry code, it doesn't need to know the exact
 971   // PC and hence we can use O7 which points to the return address from the
 972   // previous call in the code stream (signature handler function)
 973   //
 974   // The other trick is we set last_Java_sp to FP instead of the usual SP because
 975   // we have pushed the extra frame in order to protect the volatile register(s)
 976   // in that frame when we return from the jni call
 977   //
 978 
 979   __ set_last_Java_frame(FP, O7);
 980   __ mov(O7, I7);  // make dummy interpreter frame look like one above,
 981                    // not meaningless information that'll confuse me.
 982 
 983   // flush the windows now. We don't care about the current (protection) frame
 984   // only the outer frames
 985 
 986   __ flush_windows();
 987 
 988   // mark windows as flushed
 989   Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
 990   __ set(JavaFrameAnchor::flushed, G3_scratch);
 991   __ st(G3_scratch, flags);
 992 
 993   // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
 994 
 995   Address thread_state(G2_thread, JavaThread::thread_state_offset());
 996 #ifdef ASSERT
 997   { Label L;
 998     __ ld(thread_state, G3_scratch);
 999     __ cmp(G3_scratch, _thread_in_Java);
1000     __ br(Assembler::equal, false, Assembler::pt, L);
1001     __ delayed()->nop();
1002     __ stop("Wrong thread state in native stub");
1003     __ bind(L);
1004   }
1005 #endif // ASSERT
1006   __ set(_thread_in_native, G3_scratch);
1007   __ st(G3_scratch, thread_state);
1008 
1009   // Call the jni method, using the delay slot to set the JNIEnv* argument.
1010   __ save_thread(L7_thread_cache); // save Gthread
1011   __ callr(O0, 0);
1012   __ delayed()->
1013      add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0);
1014 
1015   // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD
1016 
1017   __ restore_thread(L7_thread_cache); // restore G2_thread
1018   __ reinit_heapbase();
1019 
1020   // must we block?
1021 
1022   // Block, if necessary, before resuming in _thread_in_Java state.
1023   // In order for GC to work, don't clear the last_Java_sp until after blocking.
1024   { Label no_block;
1025     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
1026 
1027     // Switch thread to "native transition" state before reading the synchronization state.
1028     // This additional state is necessary because reading and testing the synchronization
1029     // state is not atomic w.r.t. GC, as this scenario demonstrates:
1030     //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1031     //     VM thread changes sync state to synchronizing and suspends threads for GC.
1032     //     Thread A is resumed to finish this native method, but doesn't block here since it
1033     //     didn't see any synchronization is progress, and escapes.
1034     __ set(_thread_in_native_trans, G3_scratch);
1035     __ st(G3_scratch, thread_state);
1036     if(os::is_MP()) {
1037       if (UseMembar) {
1038         // Force this write out before the read below
1039         __ membar(Assembler::StoreLoad);
1040       } else {
1041         // Write serialization page so VM thread can do a pseudo remote membar.
1042         // We use the current thread pointer to calculate a thread specific
1043         // offset to write to within the page. This minimizes bus traffic
1044         // due to cache line collision.
1045         __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
1046       }
1047     }
1048     __ load_contents(sync_state, G3_scratch);
1049     __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
1050 
1051     Label L;
1052     __ br(Assembler::notEqual, false, Assembler::pn, L);
1053     __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
1054     __ cmp(G3_scratch, 0);
1055     __ br(Assembler::equal, false, Assembler::pt, no_block);
1056     __ delayed()->nop();
1057     __ bind(L);
1058 
1059     // Block.  Save any potential method result value before the operation and
1060     // use a leaf call to leave the last_Java_frame setup undisturbed.
1061     save_native_result();
1062     __ call_VM_leaf(L7_thread_cache,
1063                     CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
1064                     G2_thread);
1065 
1066     // Restore any method result value
1067     restore_native_result();
1068     __ bind(no_block);
1069   }
1070 
1071   // Clear the frame anchor now
1072 
1073   __ reset_last_Java_frame();
1074 
1075   // Move the result handler address
1076   __ mov(Lscratch, G3_scratch);
1077   // return possible result to the outer frame
1078 #ifndef __LP64
1079   __ mov(O0, I0);
1080   __ restore(O1, G0, O1);
1081 #else
1082   __ restore(O0, G0, O0);
1083 #endif /* __LP64 */
1084 
1085   // Move result handler to expected register
1086   __ mov(G3_scratch, Lscratch);
1087 
1088   // Back in normal (native) interpreter frame. State is thread_in_native_trans
1089   // switch to thread_in_Java.
1090 
1091   __ set(_thread_in_Java, G3_scratch);
1092   __ st(G3_scratch, thread_state);
1093 
1094   // reset handle block
1095   __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch);
1096   __ st_ptr(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
1097 
1098   // If we have an oop result store it where it will be safe for any further gc
1099   // until we return now that we've released the handle it might be protected by
1100 
1101   {
1102     Label no_oop, store_result;
1103 
1104     __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
1105     __ cmp(G3_scratch, Lscratch);
1106     __ brx(Assembler::notEqual, false, Assembler::pt, no_oop);
1107     __ delayed()->nop();
1108     __ addcc(G0, O0, O0);
1109     __ brx(Assembler::notZero, true, Assembler::pt, store_result);     // if result is not NULL:
1110     __ delayed()->ld_ptr(O0, 0, O0);                                   // unbox it
1111     __ mov(G0, O0);
1112 
1113     __ bind(store_result);
1114     // Store it where gc will look for it and result handler expects it.
1115     __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS);
1116 
1117     __ bind(no_oop);
1118 
1119   }
1120 
1121 
1122   // handle exceptions (exception handling will handle unlocking!)
1123   { Label L;
1124     Address exception_addr(G2_thread, Thread::pending_exception_offset());
1125     __ ld_ptr(exception_addr, Gtemp);
1126     __ tst(Gtemp);
1127     __ brx(Assembler::equal, false, Assembler::pt, L);
1128     __ delayed()->nop();
1129     // Note: This could be handled more efficiently since we know that the native
1130     //       method doesn't have an exception handler. We could directly return
1131     //       to the exception handler for the caller.
1132     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
1133     __ should_not_reach_here();
1134     __ bind(L);
1135   }
1136 
1137   // JVMTI support (preserves thread register)
1138   __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
1139 
1140   if (synchronized) {
1141     // save and restore any potential method result value around the unlocking operation
1142     save_native_result();
1143 
1144     __ add( __ top_most_monitor(), O1);
1145     __ unlock_object(O1);
1146 
1147     restore_native_result();
1148   }
1149 
1150 #if defined(COMPILER2) && !defined(_LP64)
1151 
1152   // C2 expects long results in G1 we can't tell if we're returning to interpreted
1153   // or compiled so just be safe.
1154 
1155   __ sllx(O0, 32, G1);          // Shift bits into high G1
1156   __ srl (O1, 0, O1);           // Zero extend O1
1157   __ or3 (O1, G1, G1);          // OR 64 bits into G1
1158 
1159 #endif /* COMPILER2 && !_LP64 */
1160 
1161   // dispose of return address and remove activation
1162 #ifdef ASSERT
1163   {
1164     Label ok;
1165     __ cmp(I5_savedSP, FP);
1166     __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, ok);
1167     __ delayed()->nop();
1168     __ stop("bad I5_savedSP value");
1169     __ should_not_reach_here();
1170     __ bind(ok);
1171   }
1172 #endif
1173   if (TraceJumps) {
1174     // Move target to register that is recordable
1175     __ mov(Lscratch, G3_scratch);
1176     __ JMP(G3_scratch, 0);
1177   } else {
1178     __ jmp(Lscratch, 0);
1179   }
1180   __ delayed()->nop();
1181 
1182 
1183   if (inc_counter) {
1184     // handle invocation counter overflow
1185     __ bind(invocation_counter_overflow);
1186     generate_counter_overflow(Lcontinue);
1187   }
1188 
1189 
1190 
1191   return entry;
1192 }
1193 
1194 
1195 // Generic method entry to (asm) interpreter
1196 //------------------------------------------------------------------------------------------------------------------------
1197 //
1198 address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1199   address entry = __ pc();
1200 
1201   bool inc_counter  = UseCompiler || CountCompiledCalls;
1202 
1203   // the following temporary registers are used during frame creation
1204   const Register Gtmp1 = G3_scratch ;
1205   const Register Gtmp2 = G1_scratch;
1206 
1207   // make sure registers are different!
1208   assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
1209 
1210   const Address size_of_parameters(G5_method, methodOopDesc::size_of_parameters_offset());
1211   const Address size_of_locals    (G5_method, methodOopDesc::size_of_locals_offset());
1212   // Seems like G5_method is live at the point this is used. So we could make this look consistent
1213   // and use in the asserts.
1214   const Address access_flags      (Lmethod,   methodOopDesc::access_flags_offset());
1215 
1216   __ verify_oop(G5_method);
1217 
1218   const Register Glocals_size = G3;
1219   assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
1220 
1221   // make sure method is not native & not abstract
1222   // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
1223 #ifdef ASSERT
1224   __ ld(G5_method, methodOopDesc::access_flags_offset(), Gtmp1);
1225   {
1226     Label L;
1227     __ btst(JVM_ACC_NATIVE, Gtmp1);
1228     __ br(Assembler::zero, false, Assembler::pt, L);
1229     __ delayed()->nop();
1230     __ stop("tried to execute native method as non-native");
1231     __ bind(L);
1232   }
1233   { Label L;
1234     __ btst(JVM_ACC_ABSTRACT, Gtmp1);
1235     __ br(Assembler::zero, false, Assembler::pt, L);
1236     __ delayed()->nop();
1237     __ stop("tried to execute abstract method as non-abstract");
1238     __ bind(L);
1239   }
1240 #endif // ASSERT
1241 
1242   // generate the code to allocate the interpreter stack frame
1243 
1244   generate_fixed_frame(false);
1245 
1246 #ifdef FAST_DISPATCH
1247   __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables);
1248                                           // set bytecode dispatch table base
1249 #endif
1250 
1251   //
1252   // Code to initialize the extra (i.e. non-parm) locals
1253   //
1254   Register init_value = noreg;    // will be G0 if we must clear locals
1255   // The way the code was setup before zerolocals was always true for vanilla java entries.
1256   // It could only be false for the specialized entries like accessor or empty which have
1257   // no extra locals so the testing was a waste of time and the extra locals were always
1258   // initialized. We removed this extra complication to already over complicated code.
1259 
1260   init_value = G0;
1261   Label clear_loop;
1262 
1263   // NOTE: If you change the frame layout, this code will need to
1264   // be updated!
1265   __ lduh( size_of_locals, O2 );
1266   __ lduh( size_of_parameters, O1 );
1267   __ sll( O2, Interpreter::logStackElementSize, O2);
1268   __ sll( O1, Interpreter::logStackElementSize, O1 );
1269   __ sub( Llocals, O2, O2 );
1270   __ sub( Llocals, O1, O1 );
1271 
1272   __ bind( clear_loop );
1273   __ inc( O2, wordSize );
1274 
1275   __ cmp( O2, O1 );
1276   __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
1277   __ delayed()->st_ptr( init_value, O2, 0 );
1278 
1279   const Address do_not_unlock_if_synchronized(G2_thread,
1280     JavaThread::do_not_unlock_if_synchronized_offset());
1281   // Since at this point in the method invocation the exception handler
1282   // would try to exit the monitor of synchronized methods which hasn't
1283   // been entered yet, we set the thread local variable
1284   // _do_not_unlock_if_synchronized to true. If any exception was thrown by
1285   // runtime, exception handling i.e. unlock_if_synchronized_method will
1286   // check this thread local flag.
1287   __ movbool(true, G3_scratch);
1288   __ stbool(G3_scratch, do_not_unlock_if_synchronized);
1289 
1290   // increment invocation counter and check for overflow
1291   //
1292   // Note: checking for negative value instead of overflow
1293   //       so we have a 'sticky' overflow test (may be of
1294   //       importance as soon as we have true MT/MP)
1295   Label invocation_counter_overflow;
1296   Label profile_method;
1297   Label profile_method_continue;
1298   Label Lcontinue;
1299   if (inc_counter) {
1300     generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
1301     if (ProfileInterpreter) {
1302       __ bind(profile_method_continue);
1303     }
1304   }
1305   __ bind(Lcontinue);
1306 
1307   bang_stack_shadow_pages(false);
1308 
1309   // reset the _do_not_unlock_if_synchronized flag
1310   __ stbool(G0, do_not_unlock_if_synchronized);
1311 
1312   // check for synchronized methods
1313   // Must happen AFTER invocation_counter check and stack overflow check,
1314   // so method is not locked if overflows.
1315 
1316   if (synchronized) {
1317     lock_method();
1318   } else {
1319 #ifdef ASSERT
1320     { Label ok;
1321       __ ld(access_flags, O0);
1322       __ btst(JVM_ACC_SYNCHRONIZED, O0);
1323       __ br( Assembler::zero, false, Assembler::pt, ok);
1324       __ delayed()->nop();
1325       __ stop("method needs synchronization");
1326       __ bind(ok);
1327     }
1328 #endif // ASSERT
1329   }
1330 
1331   // start execution
1332 
1333   __ verify_thread();
1334 
1335   // jvmti support
1336   __ notify_method_entry();
1337 
1338   // start executing instructions
1339   __ dispatch_next(vtos);
1340 
1341 
1342   if (inc_counter) {
1343     if (ProfileInterpreter) {
1344       // We have decided to profile this method in the interpreter
1345       __ bind(profile_method);
1346 
1347       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), Lbcp, true);
1348 
1349 #ifdef ASSERT
1350       __ tst(O0);
1351       __ breakpoint_trap(Assembler::notEqual);
1352 #endif
1353 
1354       __ set_method_data_pointer();
1355 
1356       __ ba(false, profile_method_continue);
1357       __ delayed()->nop();
1358     }
1359 
1360     // handle invocation counter overflow
1361     __ bind(invocation_counter_overflow);
1362     generate_counter_overflow(Lcontinue);
1363   }
1364 
1365 
1366   return entry;
1367 }
1368 
1369 
1370 //----------------------------------------------------------------------------------------------------
1371 // Entry points & stack frame layout
1372 //
1373 // Here we generate the various kind of entries into the interpreter.
1374 // The two main entry type are generic bytecode methods and native call method.
1375 // These both come in synchronized and non-synchronized versions but the
1376 // frame layout they create is very similar. The other method entry
1377 // types are really just special purpose entries that are really entry
1378 // and interpretation all in one. These are for trivial methods like
1379 // accessor, empty, or special math methods.
1380 //
1381 // When control flow reaches any of the entry types for the interpreter
1382 // the following holds ->
1383 //
1384 // C2 Calling Conventions:
1385 //
1386 // The entry code below assumes that the following registers are set
1387 // when coming in:
1388 //    G5_method: holds the methodOop of the method to call
1389 //    Lesp:    points to the TOS of the callers expression stack
1390 //             after having pushed all the parameters
1391 //
1392 // The entry code does the following to setup an interpreter frame
1393 //   pop parameters from the callers stack by adjusting Lesp
1394 //   set O0 to Lesp
1395 //   compute X = (max_locals - num_parameters)
1396 //   bump SP up by X to accomadate the extra locals
1397 //   compute X = max_expression_stack
1398 //               + vm_local_words
1399 //               + 16 words of register save area
1400 //   save frame doing a save sp, -X, sp growing towards lower addresses
1401 //   set Lbcp, Lmethod, LcpoolCache
1402 //   set Llocals to i0
1403 //   set Lmonitors to FP - rounded_vm_local_words
1404 //   set Lesp to Lmonitors - 4
1405 //
1406 //  The frame has now been setup to do the rest of the entry code
1407 
1408 // Try this optimization:  Most method entries could live in a
1409 // "one size fits all" stack frame without all the dynamic size
1410 // calculations.  It might be profitable to do all this calculation
1411 // statically and approximately for "small enough" methods.
1412 
1413 //-----------------------------------------------------------------------------------------------
1414 
1415 // C1 Calling conventions
1416 //
1417 // Upon method entry, the following registers are setup:
1418 //
1419 // g2 G2_thread: current thread
1420 // g5 G5_method: method to activate
1421 // g4 Gargs  : pointer to last argument
1422 //
1423 //
1424 // Stack:
1425 //
1426 // +---------------+ <--- sp
1427 // |               |
1428 // : reg save area :
1429 // |               |
1430 // +---------------+ <--- sp + 0x40
1431 // |               |
1432 // : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
1433 // |               |
1434 // +---------------+ <--- sp + 0x5c
1435 // |               |
1436 // :     free      :
1437 // |               |
1438 // +---------------+ <--- Gargs
1439 // |               |
1440 // :   arguments   :
1441 // |               |
1442 // +---------------+
1443 // |               |
1444 //
1445 //
1446 //
1447 // AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
1448 //
1449 // +---------------+ <--- sp
1450 // |               |
1451 // : reg save area :
1452 // |               |
1453 // +---------------+ <--- sp + 0x40
1454 // |               |
1455 // : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
1456 // |               |
1457 // +---------------+ <--- sp + 0x5c
1458 // |               |
1459 // :               :
1460 // |               | <--- Lesp
1461 // +---------------+ <--- Lmonitors (fp - 0x18)
1462 // |   VM locals   |
1463 // +---------------+ <--- fp
1464 // |               |
1465 // : reg save area :
1466 // |               |
1467 // +---------------+ <--- fp + 0x40
1468 // |               |
1469 // : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
1470 // |               |
1471 // +---------------+ <--- fp + 0x5c
1472 // |               |
1473 // :     free      :
1474 // |               |
1475 // +---------------+
1476 // |               |
1477 // : nonarg locals :
1478 // |               |
1479 // +---------------+
1480 // |               |
1481 // :   arguments   :
1482 // |               | <--- Llocals
1483 // +---------------+ <--- Gargs
1484 // |               |
1485 
1486 static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
1487 
1488   // Figure out the size of an interpreter frame (in words) given that we have a fully allocated
1489   // expression stack, the callee will have callee_extra_locals (so we can account for
1490   // frame extension) and monitor_size for monitors. Basically we need to calculate
1491   // this exactly like generate_fixed_frame/generate_compute_interpreter_state.
1492   //
1493   //
1494   // The big complicating thing here is that we must ensure that the stack stays properly
1495   // aligned. This would be even uglier if monitor size wasn't modulo what the stack
1496   // needs to be aligned for). We are given that the sp (fp) is already aligned by
1497   // the caller so we must ensure that it is properly aligned for our callee.
1498   //
1499   const int rounded_vm_local_words =
1500        round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
1501   // callee_locals and max_stack are counts, not the size in frame.
1502   const int locals_size =
1503        round_to(callee_extra_locals * Interpreter::stackElementWords, WordsPerLong);
1504   const int max_stack_words = max_stack * Interpreter::stackElementWords;
1505   return (round_to((max_stack_words
1506                    //6815692//+ methodOopDesc::extra_stack_words()
1507                    + rounded_vm_local_words
1508                    + frame::memory_parameter_word_sp_offset), WordsPerLong)
1509                    // already rounded
1510                    + locals_size + monitor_size);
1511 }
1512 
1513 // How much stack a method top interpreter activation needs in words.
1514 int AbstractInterpreter::size_top_interpreter_activation(methodOop method) {
1515 
1516   // See call_stub code
1517   int call_stub_size  = round_to(7 + frame::memory_parameter_word_sp_offset,
1518                                  WordsPerLong);    // 7 + register save area
1519 
1520   // Save space for one monitor to get into the interpreted method in case
1521   // the method is synchronized
1522   int monitor_size    = method->is_synchronized() ?
1523                                 1*frame::interpreter_frame_monitor_size() : 0;
1524   return size_activation_helper(method->max_locals(), method->max_stack(),
1525                                  monitor_size) + call_stub_size;
1526 }
1527 
1528 int AbstractInterpreter::layout_activation(methodOop method,
1529                                            int tempcount,
1530                                            int popframe_extra_args,
1531                                            int moncount,
1532                                            int callee_param_count,
1533                                            int callee_local_count,
1534                                            frame* caller,
1535                                            frame* interpreter_frame,
1536                                            bool is_top_frame) {
1537   // Note: This calculation must exactly parallel the frame setup
1538   // in InterpreterGenerator::generate_fixed_frame.
1539   // If f!=NULL, set up the following variables:
1540   //   - Lmethod
1541   //   - Llocals
1542   //   - Lmonitors (to the indicated number of monitors)
1543   //   - Lesp (to the indicated number of temps)
1544   // The frame f (if not NULL) on entry is a description of the caller of the frame
1545   // we are about to layout. We are guaranteed that we will be able to fill in a
1546   // new interpreter frame as its callee (i.e. the stack space is allocated and
1547   // the amount was determined by an earlier call to this method with f == NULL).
1548   // On return f (if not NULL) while describe the interpreter frame we just layed out.
1549 
1550   int monitor_size           = moncount * frame::interpreter_frame_monitor_size();
1551   int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words,WordsPerLong);
1552 
1553   assert(monitor_size == round_to(monitor_size, WordsPerLong), "must align");
1554   //
1555   // Note: if you look closely this appears to be doing something much different
1556   // than generate_fixed_frame. What is happening is this. On sparc we have to do
1557   // this dance with interpreter_sp_adjustment because the window save area would
1558   // appear just below the bottom (tos) of the caller's java expression stack. Because
1559   // the interpreter want to have the locals completely contiguous generate_fixed_frame
1560   // will adjust the caller's sp for the "extra locals" (max_locals - parameter_size).
1561   // Now in generate_fixed_frame the extension of the caller's sp happens in the callee.
1562   // In this code the opposite occurs the caller adjusts it's own stack base on the callee.
1563   // This is mostly ok but it does cause a problem when we get to the initial frame (the oldest)
1564   // because the oldest frame would have adjust its callers frame and yet that frame
1565   // already exists and isn't part of this array of frames we are unpacking. So at first
1566   // glance this would seem to mess up that frame. However Deoptimization::fetch_unroll_info_helper()
1567   // will after it calculates all of the frame's on_stack_size()'s will then figure out the
1568   // amount to adjust the caller of the initial (oldest) frame and the calculation will all
1569   // add up. It does seem like it simpler to account for the adjustment here (and remove the
1570   // callee... parameters here). However this would mean that this routine would have to take
1571   // the caller frame as input so we could adjust its sp (and set it's interpreter_sp_adjustment)
1572   // and run the calling loop in the reverse order. This would also would appear to mean making
1573   // this code aware of what the interactions are when that initial caller fram was an osr or
1574   // other adapter frame. deoptimization is complicated enough and  hard enough to debug that
1575   // there is no sense in messing working code.
1576   //
1577 
1578   int rounded_cls = round_to((callee_local_count - callee_param_count), WordsPerLong);
1579   assert(rounded_cls == round_to(rounded_cls, WordsPerLong), "must align");
1580 
1581   int raw_frame_size = size_activation_helper(rounded_cls, method->max_stack(),
1582                                               monitor_size);
1583 
1584   if (interpreter_frame != NULL) {
1585     // The skeleton frame must already look like an interpreter frame
1586     // even if not fully filled out.
1587     assert(interpreter_frame->is_interpreted_frame(), "Must be interpreted frame");
1588 
1589     intptr_t* fp = interpreter_frame->fp();
1590 
1591     JavaThread* thread = JavaThread::current();
1592     RegisterMap map(thread, false);
1593     // More verification that skeleton frame is properly walkable
1594     assert(fp == caller->sp(), "fp must match");
1595 
1596     intptr_t* montop     = fp - rounded_vm_local_words;
1597 
1598     // preallocate monitors (cf. __ add_monitor_to_stack)
1599     intptr_t* monitors = montop - monitor_size;
1600 
1601     // preallocate stack space
1602     intptr_t*  esp = monitors - 1 -
1603                      (tempcount * Interpreter::stackElementWords) -
1604                      popframe_extra_args;
1605 
1606     int local_words = method->max_locals() * Interpreter::stackElementWords;
1607     int parm_words  = method->size_of_parameters() * Interpreter::stackElementWords;
1608     NEEDS_CLEANUP;
1609     intptr_t* locals;
1610     if (caller->is_interpreted_frame()) {
1611       // Can force the locals area to end up properly overlapping the top of the expression stack.
1612       intptr_t* Lesp_ptr = caller->interpreter_frame_tos_address() - 1;
1613       // Note that this computation means we replace size_of_parameters() values from the caller
1614       // interpreter frame's expression stack with our argument locals
1615       locals = Lesp_ptr + parm_words;
1616       int delta = local_words - parm_words;
1617       int computed_sp_adjustment = (delta > 0) ? round_to(delta, WordsPerLong) : 0;
1618       *interpreter_frame->register_addr(I5_savedSP)    = (intptr_t) (fp + computed_sp_adjustment) - STACK_BIAS;
1619     } else {
1620       assert(caller->is_compiled_frame() || caller->is_entry_frame(), "only possible cases");
1621       // Don't have Lesp available; lay out locals block in the caller
1622       // adjacent to the register window save area.
1623       //
1624       // Compiled frames do not allocate a varargs area which is why this if
1625       // statement is needed.
1626       //
1627       if (caller->is_compiled_frame()) {
1628         locals = fp + frame::register_save_words + local_words - 1;
1629       } else {
1630         locals = fp + frame::memory_parameter_word_sp_offset + local_words - 1;
1631       }
1632       if (!caller->is_entry_frame()) {
1633         // Caller wants his own SP back
1634         int caller_frame_size = caller->cb()->frame_size();
1635         *interpreter_frame->register_addr(I5_savedSP) = (intptr_t)(caller->fp() - caller_frame_size) - STACK_BIAS;
1636       }
1637     }
1638     if (TraceDeoptimization) {
1639       if (caller->is_entry_frame()) {
1640         // make sure I5_savedSP and the entry frames notion of saved SP
1641         // agree.  This assertion duplicate a check in entry frame code
1642         // but catches the failure earlier.
1643         assert(*caller->register_addr(Lscratch) == *interpreter_frame->register_addr(I5_savedSP),
1644                "would change callers SP");
1645       }
1646       if (caller->is_entry_frame()) {
1647         tty->print("entry ");
1648       }
1649       if (caller->is_compiled_frame()) {
1650         tty->print("compiled ");
1651         if (caller->is_deoptimized_frame()) {
1652           tty->print("(deopt) ");
1653         }
1654       }
1655       if (caller->is_interpreted_frame()) {
1656         tty->print("interpreted ");
1657       }
1658       tty->print_cr("caller fp=0x%x sp=0x%x", caller->fp(), caller->sp());
1659       tty->print_cr("save area = 0x%x, 0x%x", caller->sp(), caller->sp() + 16);
1660       tty->print_cr("save area = 0x%x, 0x%x", caller->fp(), caller->fp() + 16);
1661       tty->print_cr("interpreter fp=0x%x sp=0x%x", interpreter_frame->fp(), interpreter_frame->sp());
1662       tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->sp(), interpreter_frame->sp() + 16);
1663       tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->fp(), interpreter_frame->fp() + 16);
1664       tty->print_cr("Llocals = 0x%x", locals);
1665       tty->print_cr("Lesp = 0x%x", esp);
1666       tty->print_cr("Lmonitors = 0x%x", monitors);
1667     }
1668 
1669     if (method->max_locals() > 0) {
1670       assert(locals < caller->sp() || locals >= (caller->sp() + 16), "locals in save area");
1671       assert(locals < caller->fp() || locals > (caller->fp() + 16), "locals in save area");
1672       assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
1673       assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
1674     }
1675 #ifdef _LP64
1676     assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
1677 #endif
1678 
1679     *interpreter_frame->register_addr(Lmethod)     = (intptr_t) method;
1680     *interpreter_frame->register_addr(Llocals)     = (intptr_t) locals;
1681     *interpreter_frame->register_addr(Lmonitors)   = (intptr_t) monitors;
1682     *interpreter_frame->register_addr(Lesp)        = (intptr_t) esp;
1683     // Llast_SP will be same as SP as there is no adapter space
1684     *interpreter_frame->register_addr(Llast_SP)    = (intptr_t) interpreter_frame->sp() - STACK_BIAS;
1685     *interpreter_frame->register_addr(LcpoolCache) = (intptr_t) method->constants()->cache();
1686 #ifdef FAST_DISPATCH
1687     *interpreter_frame->register_addr(IdispatchTables) = (intptr_t) Interpreter::dispatch_table();
1688 #endif
1689 
1690 
1691 #ifdef ASSERT
1692     BasicObjectLock* mp = (BasicObjectLock*)monitors;
1693 
1694     assert(interpreter_frame->interpreter_frame_method() == method, "method matches");
1695     assert(interpreter_frame->interpreter_frame_local_at(9) == (intptr_t *)((intptr_t)locals - (9 * Interpreter::stackElementSize)), "locals match");
1696     assert(interpreter_frame->interpreter_frame_monitor_end()   == mp, "monitor_end matches");
1697     assert(((intptr_t *)interpreter_frame->interpreter_frame_monitor_begin()) == ((intptr_t *)mp)+monitor_size, "monitor_begin matches");
1698     assert(interpreter_frame->interpreter_frame_tos_address()-1 == esp, "esp matches");
1699 
1700     // check bounds
1701     intptr_t* lo = interpreter_frame->sp() + (frame::memory_parameter_word_sp_offset - 1);
1702     intptr_t* hi = interpreter_frame->fp() - rounded_vm_local_words;
1703     assert(lo < monitors && montop <= hi, "monitors in bounds");
1704     assert(lo <= esp && esp < monitors, "esp in bounds");
1705 #endif // ASSERT
1706   }
1707 
1708   return raw_frame_size;
1709 }
1710 
1711 //----------------------------------------------------------------------------------------------------
1712 // Exceptions
1713 void TemplateInterpreterGenerator::generate_throw_exception() {
1714 
1715   // Entry point in previous activation (i.e., if the caller was interpreted)
1716   Interpreter::_rethrow_exception_entry = __ pc();
1717   // O0: exception
1718 
1719   // entry point for exceptions thrown within interpreter code
1720   Interpreter::_throw_exception_entry = __ pc();
1721   __ verify_thread();
1722   // expression stack is undefined here
1723   // O0: exception, i.e. Oexception
1724   // Lbcp: exception bcx
1725   __ verify_oop(Oexception);
1726 
1727 
1728   // expression stack must be empty before entering the VM in case of an exception
1729   __ empty_expression_stack();
1730   // find exception handler address and preserve exception oop
1731   // call C routine to find handler and jump to it
1732   __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception);
1733   __ push_ptr(O1); // push exception for exception handler bytecodes
1734 
1735   __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!)
1736   __ delayed()->nop();
1737 
1738 
1739   // if the exception is not handled in the current frame
1740   // the frame is removed and the exception is rethrown
1741   // (i.e. exception continuation is _rethrow_exception)
1742   //
1743   // Note: At this point the bci is still the bxi for the instruction which caused
1744   //       the exception and the expression stack is empty. Thus, for any VM calls
1745   //       at this point, GC will find a legal oop map (with empty expression stack).
1746 
1747   // in current activation
1748   // tos: exception
1749   // Lbcp: exception bcp
1750 
1751   //
1752   // JVMTI PopFrame support
1753   //
1754 
1755   Interpreter::_remove_activation_preserving_args_entry = __ pc();
1756   Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
1757   // Set the popframe_processing bit in popframe_condition indicating that we are
1758   // currently handling popframe, so that call_VMs that may happen later do not trigger new
1759   // popframe handling cycles.
1760 
1761   __ ld(popframe_condition_addr, G3_scratch);
1762   __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch);
1763   __ stw(G3_scratch, popframe_condition_addr);
1764 
1765   // Empty the expression stack, as in normal exception handling
1766   __ empty_expression_stack();
1767   __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
1768 
1769   {
1770     // Check to see whether we are returning to a deoptimized frame.
1771     // (The PopFrame call ensures that the caller of the popped frame is
1772     // either interpreted or compiled and deoptimizes it if compiled.)
1773     // In this case, we can't call dispatch_next() after the frame is
1774     // popped, but instead must save the incoming arguments and restore
1775     // them after deoptimization has occurred.
1776     //
1777     // Note that we don't compare the return PC against the
1778     // deoptimization blob's unpack entry because of the presence of
1779     // adapter frames in C2.
1780     Label caller_not_deoptimized;
1781     __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7);
1782     __ tst(O0);
1783     __ brx(Assembler::notEqual, false, Assembler::pt, caller_not_deoptimized);
1784     __ delayed()->nop();
1785 
1786     const Register Gtmp1 = G3_scratch;
1787     const Register Gtmp2 = G1_scratch;
1788 
1789     // Compute size of arguments for saving when returning to deoptimized caller
1790     __ lduh(Lmethod, in_bytes(methodOopDesc::size_of_parameters_offset()), Gtmp1);
1791     __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
1792     __ sub(Llocals, Gtmp1, Gtmp2);
1793     __ add(Gtmp2, wordSize, Gtmp2);
1794     // Save these arguments
1795     __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2);
1796     // Inform deoptimization that it is responsible for restoring these arguments
1797     __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1);
1798     Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
1799     __ st(Gtmp1, popframe_condition_addr);
1800 
1801     // Return from the current method
1802     // The caller's SP was adjusted upon method entry to accomodate
1803     // the callee's non-argument locals. Undo that adjustment.
1804     __ ret();
1805     __ delayed()->restore(I5_savedSP, G0, SP);
1806 
1807     __ bind(caller_not_deoptimized);
1808   }
1809 
1810   // Clear the popframe condition flag
1811   __ stw(G0 /* popframe_inactive */, popframe_condition_addr);
1812 
1813   // Get out of the current method (how this is done depends on the particular compiler calling
1814   // convention that the interpreter currently follows)
1815   // The caller's SP was adjusted upon method entry to accomodate
1816   // the callee's non-argument locals. Undo that adjustment.
1817   __ restore(I5_savedSP, G0, SP);
1818   // The method data pointer was incremented already during
1819   // call profiling. We have to restore the mdp for the current bcp.
1820   if (ProfileInterpreter) {
1821     __ set_method_data_pointer_for_bcp();
1822   }
1823   // Resume bytecode interpretation at the current bcp
1824   __ dispatch_next(vtos);
1825   // end of JVMTI PopFrame support
1826 
1827   Interpreter::_remove_activation_entry = __ pc();
1828 
1829   // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here)
1830   __ pop_ptr(Oexception);                                  // get exception
1831 
1832   // Intel has the following comment:
1833   //// remove the activation (without doing throws on illegalMonitorExceptions)
1834   // They remove the activation without checking for bad monitor state.
1835   // %%% We should make sure this is the right semantics before implementing.
1836 
1837   // %%% changed set_vm_result_2 to set_vm_result and get_vm_result_2 to get_vm_result. Is there a bug here?
1838   __ set_vm_result(Oexception);
1839   __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false);
1840 
1841   __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI);
1842 
1843   __ get_vm_result(Oexception);
1844   __ verify_oop(Oexception);
1845 
1846     const int return_reg_adjustment = frame::pc_return_offset;
1847   Address issuing_pc_addr(I7, return_reg_adjustment);
1848 
1849   // We are done with this activation frame; find out where to go next.
1850   // The continuation point will be an exception handler, which expects
1851   // the following registers set up:
1852   //
1853   // Oexception: exception
1854   // Oissuing_pc: the local call that threw exception
1855   // Other On: garbage
1856   // In/Ln:  the contents of the caller's register window
1857   //
1858   // We do the required restore at the last possible moment, because we
1859   // need to preserve some state across a runtime call.
1860   // (Remember that the caller activation is unknown--it might not be
1861   // interpreted, so things like Lscratch are useless in the caller.)
1862 
1863   // Although the Intel version uses call_C, we can use the more
1864   // compact call_VM.  (The only real difference on SPARC is a
1865   // harmlessly ignored [re]set_last_Java_frame, compared with
1866   // the Intel code which lacks this.)
1867   __ mov(Oexception,      Oexception ->after_save());  // get exception in I0 so it will be on O0 after restore
1868   __ add(issuing_pc_addr, Oissuing_pc->after_save());  // likewise set I1 to a value local to the caller
1869   __ super_call_VM_leaf(L7_thread_cache,
1870                         CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
1871                         G2_thread, Oissuing_pc->after_save());
1872 
1873   // The caller's SP was adjusted upon method entry to accomodate
1874   // the callee's non-argument locals. Undo that adjustment.
1875   __ JMP(O0, 0);                         // return exception handler in caller
1876   __ delayed()->restore(I5_savedSP, G0, SP);
1877 
1878   // (same old exception object is already in Oexception; see above)
1879   // Note that an "issuing PC" is actually the next PC after the call
1880 }
1881 
1882 
1883 //
1884 // JVMTI ForceEarlyReturn support
1885 //
1886 
1887 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1888   address entry = __ pc();
1889 
1890   __ empty_expression_stack();
1891   __ load_earlyret_value(state);
1892 
1893   __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch);
1894   Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset());
1895 
1896   // Clear the earlyret state
1897   __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr);
1898 
1899   __ remove_activation(state,
1900                        /* throw_monitor_exception */ false,
1901                        /* install_monitor_exception */ false);
1902 
1903   // The caller's SP was adjusted upon method entry to accomodate
1904   // the callee's non-argument locals. Undo that adjustment.
1905   __ ret();                             // return to caller
1906   __ delayed()->restore(I5_savedSP, G0, SP);
1907 
1908   return entry;
1909 } // end of JVMTI ForceEarlyReturn support
1910 
1911 
1912 //------------------------------------------------------------------------------------------------------------------------
1913 // Helper for vtos entry point generation
1914 
1915 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
1916   assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1917   Label L;
1918   aep = __ pc(); __ push_ptr(); __ ba(false, L); __ delayed()->nop();
1919   fep = __ pc(); __ push_f();   __ ba(false, L); __ delayed()->nop();
1920   dep = __ pc(); __ push_d();   __ ba(false, L); __ delayed()->nop();
1921   lep = __ pc(); __ push_l();   __ ba(false, L); __ delayed()->nop();
1922   iep = __ pc(); __ push_i();
1923   bep = cep = sep = iep;                        // there aren't any
1924   vep = __ pc(); __ bind(L);                    // fall through
1925   generate_and_dispatch(t);
1926 }
1927 
1928 // --------------------------------------------------------------------------------
1929 
1930 
1931 InterpreterGenerator::InterpreterGenerator(StubQueue* code)
1932  : TemplateInterpreterGenerator(code) {
1933    generate_all(); // down here so it can be "virtual"
1934 }
1935 
1936 // --------------------------------------------------------------------------------
1937 
1938 // Non-product code
1939 #ifndef PRODUCT
1940 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1941   address entry = __ pc();
1942 
1943   __ push(state);
1944   __ mov(O7, Lscratch); // protect return address within interpreter
1945 
1946   // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer
1947   __ mov( Otos_l2, G3_scratch );
1948   __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), G0, Otos_l1, G3_scratch);
1949   __ mov(Lscratch, O7); // restore return address
1950   __ pop(state);
1951   __ retl();
1952   __ delayed()->nop();
1953 
1954   return entry;
1955 }
1956 
1957 
1958 // helpers for generate_and_dispatch
1959 
1960 void TemplateInterpreterGenerator::count_bytecode() {
1961   __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch);
1962 }
1963 
1964 
1965 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1966   __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch);
1967 }
1968 
1969 
1970 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1971   AddressLiteral index   (&BytecodePairHistogram::_index);
1972   AddressLiteral counters((address) &BytecodePairHistogram::_counters);
1973 
1974   // get index, shift out old bytecode, bring in new bytecode, and store it
1975   // _index = (_index >> log2_number_of_codes) |
1976   //          (bytecode << log2_number_of_codes);
1977 
1978   __ load_contents(index, G4_scratch);
1979   __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch );
1980   __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes,  G3_scratch );
1981   __ or3( G3_scratch,  G4_scratch, G4_scratch );
1982   __ store_contents(G4_scratch, index, G3_scratch);
1983 
1984   // bump bucket contents
1985   // _counters[_index] ++;
1986 
1987   __ set(counters, G3_scratch);                       // loads into G3_scratch
1988   __ sll( G4_scratch, LogBytesPerWord, G4_scratch );  // Index is word address
1989   __ add (G3_scratch, G4_scratch, G3_scratch);        // Add in index
1990   __ ld (G3_scratch, 0, G4_scratch);
1991   __ inc (G4_scratch);
1992   __ st (G4_scratch, 0, G3_scratch);
1993 }
1994 
1995 
1996 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1997   // Call a little run-time stub to avoid blow-up for each bytecode.
1998   // The run-time runtime saves the right registers, depending on
1999   // the tosca in-state for the given template.
2000   address entry = Interpreter::trace_code(t->tos_in());
2001   guarantee(entry != NULL, "entry must have been generated");
2002   __ call(entry, relocInfo::none);
2003   __ delayed()->nop();
2004 }
2005 
2006 
2007 void TemplateInterpreterGenerator::stop_interpreter_at() {
2008   AddressLiteral counter(&BytecodeCounter::_counter_value);
2009   __ load_contents(counter, G3_scratch);
2010   AddressLiteral stop_at(&StopInterpreterAt);
2011   __ load_ptr_contents(stop_at, G4_scratch);
2012   __ cmp(G3_scratch, G4_scratch);
2013   __ breakpoint_trap(Assembler::equal);
2014 }
2015 #endif // not PRODUCT
2016 #endif // !CC_INTERP