1 /*
   2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "gc/shared/barrierSetCodeGen.hpp"
  28 #include "interpreter/bytecodeHistogram.hpp"
  29 #include "interpreter/interpreter.hpp"
  30 #include "interpreter/interpreterRuntime.hpp"
  31 #include "interpreter/interp_masm.hpp"
  32 #include "interpreter/templateInterpreterGenerator.hpp"
  33 #include "interpreter/templateTable.hpp"
  34 #include "oops/arrayOop.hpp"
  35 #include "oops/methodData.hpp"
  36 #include "oops/method.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "prims/jvmtiExport.hpp"
  39 #include "prims/jvmtiThreadState.hpp"
  40 #include "runtime/arguments.hpp"
  41 #include "runtime/deoptimization.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "runtime/stubRoutines.hpp"
  45 #include "runtime/synchronizer.hpp"
  46 #include "runtime/timer.hpp"
  47 #include "runtime/vframeArray.hpp"
  48 #include "utilities/debug.hpp"
  49 #include "utilities/macros.hpp"
  50 
  51 #ifndef FAST_DISPATCH
  52 #define FAST_DISPATCH 1
  53 #endif
  54 #undef FAST_DISPATCH
  55 
  56 // Size of interpreter code.  Increase if too small.  Interpreter will
  57 // fail with a guarantee ("not enough space for interpreter generation");
  58 // if too small.
  59 // Run with +PrintInterpreter to get the VM to print out the size.
  60 // Max size with JVMTI
  61 // The sethi() instruction generates lots more instructions when shell
  62 // stack limit is unlimited, so that's why this is much bigger.
  63 int TemplateInterpreter::InterpreterCodeSize = 260 * K;
  64 
  65 // Generation of Interpreter
  66 //
  67 // The TemplateInterpreterGenerator generates the interpreter into Interpreter::_code.
  68 
  69 
  70 #define __ _masm->
  71 
  72 
  73 //----------------------------------------------------------------------------------------------------
  74 
  75 // LP64 passes floating point arguments in F1, F3, F5, etc. instead of
  76 // O0, O1, O2 etc..
  77 // Doubles are passed in D0, D2, D4
  78 // We store the signature of the first 16 arguments in the first argument
  79 // slot because it will be overwritten prior to calling the native
  80 // function, with the pointer to the JNIEnv.
  81 // If LP64 there can be up to 16 floating point arguments in registers
  82 // or 6 integer registers.
  83 address TemplateInterpreterGenerator::generate_slow_signature_handler() {
  84 
  85   enum {
  86     non_float  = 0,
  87     float_sig  = 1,
  88     double_sig = 2,
  89     sig_mask   = 3
  90   };
  91 
  92   address entry = __ pc();
  93   Argument argv(0, true);
  94 
  95   // We are in the jni transition frame. Save the last_java_frame corresponding to the
  96   // outer interpreter frame
  97   //
  98   __ set_last_Java_frame(FP, noreg);
  99   // make sure the interpreter frame we've pushed has a valid return pc
 100   __ mov(O7, I7);
 101   __ mov(Lmethod, G3_scratch);
 102   __ mov(Llocals, G4_scratch);
 103   __ save_frame(0);
 104   __ mov(G2_thread, L7_thread_cache);
 105   __ add(argv.address_in_frame(), O3);
 106   __ mov(G2_thread, O0);
 107   __ mov(G3_scratch, O1);
 108   __ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type);
 109   __ delayed()->mov(G4_scratch, O2);
 110   __ mov(L7_thread_cache, G2_thread);
 111   __ reset_last_Java_frame();
 112 
 113 
 114   // load the register arguments (the C code packed them as varargs)
 115   Address Sig = argv.address_in_frame();        // Argument 0 holds the signature
 116   __ ld_ptr( Sig, G3_scratch );                   // Get register argument signature word into G3_scratch
 117   __ mov( G3_scratch, G4_scratch);
 118   __ srl( G4_scratch, 2, G4_scratch);             // Skip Arg 0
 119   Label done;
 120   for (Argument ldarg = argv.successor(); ldarg.is_float_register(); ldarg = ldarg.successor()) {
 121     Label NonFloatArg;
 122     Label LoadFloatArg;
 123     Label LoadDoubleArg;
 124     Label NextArg;
 125     Address a = ldarg.address_in_frame();
 126     __ andcc(G4_scratch, sig_mask, G3_scratch);
 127     __ br(Assembler::zero, false, Assembler::pt, NonFloatArg);
 128     __ delayed()->nop();
 129 
 130     __ cmp(G3_scratch, float_sig );
 131     __ br(Assembler::equal, false, Assembler::pt, LoadFloatArg);
 132     __ delayed()->nop();
 133 
 134     __ cmp(G3_scratch, double_sig );
 135     __ br(Assembler::equal, false, Assembler::pt, LoadDoubleArg);
 136     __ delayed()->nop();
 137 
 138     __ bind(NonFloatArg);
 139     // There are only 6 integer register arguments!
 140     if ( ldarg.is_register() )
 141       __ ld_ptr(ldarg.address_in_frame(), ldarg.as_register());
 142     else {
 143     // Optimization, see if there are any more args and get out prior to checking
 144     // all 16 float registers.  My guess is that this is rare.
 145     // If is_register is false, then we are done the first six integer args.
 146       __ br_null_short(G4_scratch, Assembler::pt, done);
 147     }
 148     __ ba(NextArg);
 149     __ delayed()->srl( G4_scratch, 2, G4_scratch );
 150 
 151     __ bind(LoadFloatArg);
 152     __ ldf( FloatRegisterImpl::S, a, ldarg.as_float_register(), 4);
 153     __ ba(NextArg);
 154     __ delayed()->srl( G4_scratch, 2, G4_scratch );
 155 
 156     __ bind(LoadDoubleArg);
 157     __ ldf( FloatRegisterImpl::D, a, ldarg.as_double_register() );
 158     __ ba(NextArg);
 159     __ delayed()->srl( G4_scratch, 2, G4_scratch );
 160 
 161     __ bind(NextArg);
 162 
 163   }
 164 
 165   __ bind(done);
 166   __ ret();
 167   __ delayed()->
 168      restore(O0, 0, Lscratch);  // caller's Lscratch gets the result handler
 169   return entry;
 170 }
 171 
 172 void TemplateInterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
 173 
 174   // Generate code to initiate compilation on the counter overflow.
 175 
 176   // InterpreterRuntime::frequency_counter_overflow takes two arguments,
 177   // the first indicates if the counter overflow occurs at a backwards branch (NULL bcp)
 178   // and the second is only used when the first is true.  We pass zero for both.
 179   // The call returns the address of the verified entry point for the method or NULL
 180   // if the compilation did not complete (either went background or bailed out).
 181   __ set((int)false, O2);
 182   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O2, O2, true);
 183   // returns verified_entry_point or NULL
 184   // we ignore it in any case
 185   __ ba_short(Lcontinue);
 186 
 187 }
 188 
 189 
 190 // End of helpers
 191 
 192 // Various method entries
 193 
 194 // Abstract method entry
 195 // Attempt to execute abstract method. Throw exception
 196 //
 197 address TemplateInterpreterGenerator::generate_abstract_entry(void) {
 198   address entry = __ pc();
 199   // abstract method entry
 200   // throw exception
 201   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
 202   // the call_VM checks for exception, so we should never return here.
 203   __ should_not_reach_here();
 204   return entry;
 205 
 206 }
 207 
 208 void TemplateInterpreterGenerator::save_native_result(void) {
 209   // result potentially in O0/O1: save it across calls
 210   const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
 211 
 212   // result potentially in F0/F1: save it across calls
 213   const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
 214 
 215   // save and restore any potential method result value around the unlocking operation
 216   __ stf(FloatRegisterImpl::D, F0, d_tmp);
 217   __ stx(O0, l_tmp);
 218 }
 219 
 220 void TemplateInterpreterGenerator::restore_native_result(void) {
 221   const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
 222   const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
 223 
 224   // Restore any method result value
 225   __ ldf(FloatRegisterImpl::D, d_tmp, F0);
 226   __ ldx(l_tmp, O0);
 227 }
 228 
 229 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
 230   assert(!pass_oop || message == NULL, "either oop or message but not both");
 231   address entry = __ pc();
 232   // expression stack must be empty before entering the VM if an exception happened
 233   __ empty_expression_stack();
 234   // load exception object
 235   __ set((intptr_t)name, G3_scratch);
 236   if (pass_oop) {
 237     __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i);
 238   } else {
 239     __ set((intptr_t)message, G4_scratch);
 240     __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch);
 241   }
 242   // throw exception
 243   assert(Interpreter::throw_exception_entry() != NULL, "generate it first");
 244   AddressLiteral thrower(Interpreter::throw_exception_entry());
 245   __ jump_to(thrower, G3_scratch);
 246   __ delayed()->nop();
 247   return entry;
 248 }
 249 
 250 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
 251   address entry = __ pc();
 252   // expression stack must be empty before entering the VM if an exception
 253   // happened
 254   __ empty_expression_stack();
 255   // load exception object
 256   __ call_VM(Oexception,
 257              CAST_FROM_FN_PTR(address,
 258                               InterpreterRuntime::throw_ClassCastException),
 259              Otos_i);
 260   __ should_not_reach_here();
 261   return entry;
 262 }
 263 
 264 
 265 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
 266   address entry = __ pc();
 267   // expression stack must be empty before entering the VM if an exception happened
 268   __ empty_expression_stack();
 269   // convention: expect aberrant index in register G3_scratch, then shuffle the
 270   // index to G4_scratch for the VM call
 271   __ mov(G3_scratch, G4_scratch);
 272   __ set((intptr_t)name, G3_scratch);
 273   __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch);
 274   __ should_not_reach_here();
 275   return entry;
 276 }
 277 
 278 
 279 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
 280   address entry = __ pc();
 281   // expression stack must be empty before entering the VM if an exception happened
 282   __ empty_expression_stack();
 283   __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
 284   __ should_not_reach_here();
 285   return entry;
 286 }
 287 
 288 
 289 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
 290   address entry = __ pc();
 291 
 292   if (state == atos) {
 293     __ profile_return_type(O0, G3_scratch, G1_scratch);
 294   }
 295 
 296   // The callee returns with the stack possibly adjusted by adapter transition
 297   // We remove that possible adjustment here.
 298   // All interpreter local registers are untouched. Any result is passed back
 299   // in the O0/O1 or float registers. Before continuing, the arguments must be
 300   // popped from the java expression stack; i.e., Lesp must be adjusted.
 301 
 302   __ mov(Llast_SP, SP);   // Remove any adapter added stack space.
 303 
 304   const Register cache = G3_scratch;
 305   const Register index  = G1_scratch;
 306   __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
 307 
 308   const Register flags = cache;
 309   __ ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), flags);
 310   const Register parameter_size = flags;
 311   __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size);  // argument size in words
 312   __ sll(parameter_size, Interpreter::logStackElementSize, parameter_size);     // each argument size in bytes
 313   __ add(Lesp, parameter_size, Lesp);                                           // pop arguments
 314 
 315   __ check_and_handle_popframe(Gtemp);
 316   __ check_and_handle_earlyret(Gtemp);
 317 
 318   __ dispatch_next(state, step);
 319 
 320   return entry;
 321 }
 322 
 323 
 324 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
 325   address entry = __ pc();
 326   __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
 327 #if INCLUDE_JVMCI
 328   // Check if we need to take lock at entry of synchronized method.  This can
 329   // only occur on method entry so emit it only for vtos with step 0.
 330   if (UseJVMCICompiler && state == vtos && step == 0) {
 331     Label L;
 332     Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset());
 333     __ ldbool(pending_monitor_enter_addr, Gtemp);  // Load if pending monitor enter
 334     __ cmp_and_br_short(Gtemp, G0, Assembler::equal, Assembler::pn, L);
 335     // Clear flag.
 336     __ stbool(G0, pending_monitor_enter_addr);
 337     // Take lock.
 338     lock_method();
 339     __ bind(L);
 340   } else {
 341 #ifdef ASSERT
 342     if (UseJVMCICompiler) {
 343       Label L;
 344       Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset());
 345       __ ldbool(pending_monitor_enter_addr, Gtemp);  // Load if pending monitor enter
 346       __ cmp_and_br_short(Gtemp, G0, Assembler::equal, Assembler::pn, L);
 347       __ stop("unexpected pending monitor in deopt entry");
 348       __ bind(L);
 349     }
 350 #endif
 351   }
 352 #endif
 353   { Label L;
 354     Address exception_addr(G2_thread, Thread::pending_exception_offset());
 355     __ ld_ptr(exception_addr, Gtemp);  // Load pending exception.
 356     __ br_null_short(Gtemp, Assembler::pt, L);
 357     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
 358     __ should_not_reach_here();
 359     __ bind(L);
 360   }
 361   __ dispatch_next(state, step);
 362   return entry;
 363 }
 364 
 365 // A result handler converts/unboxes a native call result into
 366 // a java interpreter/compiler result. The current frame is an
 367 // interpreter frame. The activation frame unwind code must be
 368 // consistent with that of TemplateTable::_return(...). In the
 369 // case of native methods, the caller's SP was not modified.
 370 address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
 371   address entry = __ pc();
 372   Register Itos_i  = Otos_i ->after_save();
 373   Register Itos_l  = Otos_l ->after_save();
 374   Register Itos_l1 = Otos_l1->after_save();
 375   Register Itos_l2 = Otos_l2->after_save();
 376   switch (type) {
 377     case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
 378     case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i);   break; // cannot use and3, 0xFFFF too big as immediate value!
 379     case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i);   break;
 380     case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i);   break;
 381     case T_LONG   :
 382     case T_INT    : __ mov(O0, Itos_i);                         break;
 383     case T_VOID   : /* nothing to do */                         break;
 384     case T_FLOAT  : assert(F0 == Ftos_f, "fix this code" );     break;
 385     case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" );     break;
 386     case T_OBJECT :
 387       __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i);
 388       __ verify_oop(Itos_i);
 389       break;
 390     default       : ShouldNotReachHere();
 391   }
 392   __ ret();                           // return from interpreter activation
 393   __ delayed()->restore(I5_savedSP, G0, SP);  // remove interpreter frame
 394   NOT_PRODUCT(__ emit_int32(0);)       // marker for disassembly
 395   return entry;
 396 }
 397 
 398 address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
 399   address entry = __ pc();
 400   __ push(state);
 401   __ call_VM(noreg, runtime_entry);
 402   __ dispatch_via(vtos, Interpreter::normal_table(vtos));
 403   return entry;
 404 }
 405 
 406 
 407 //
 408 // Helpers for commoning out cases in the various type of method entries.
 409 //
 410 
 411 // increment invocation count & check for overflow
 412 //
 413 // Note: checking for negative value instead of overflow
 414 //       so we have a 'sticky' overflow test
 415 //
 416 // Lmethod: method
 417 // ??: invocation counter
 418 //
 419 void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
 420   // Note: In tiered we increment either counters in MethodCounters* or in
 421   // MDO depending if we're profiling or not.
 422   const Register G3_method_counters = G3_scratch;
 423   Label done;
 424 
 425   if (TieredCompilation) {
 426     const int increment = InvocationCounter::count_increment;
 427     Label no_mdo;
 428     if (ProfileInterpreter) {
 429       // If no method data exists, go to profile_continue.
 430       __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
 431       __ br_null_short(G4_scratch, Assembler::pn, no_mdo);
 432       // Increment counter
 433       Address mdo_invocation_counter(G4_scratch,
 434                                      in_bytes(MethodData::invocation_counter_offset()) +
 435                                      in_bytes(InvocationCounter::counter_offset()));
 436       Address mask(G4_scratch, in_bytes(MethodData::invoke_mask_offset()));
 437       __ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
 438                                  G3_scratch, Lscratch,
 439                                  Assembler::zero, overflow);
 440       __ ba_short(done);
 441     }
 442 
 443     // Increment counter in MethodCounters*
 444     __ bind(no_mdo);
 445     Address invocation_counter(G3_method_counters,
 446             in_bytes(MethodCounters::invocation_counter_offset()) +
 447             in_bytes(InvocationCounter::counter_offset()));
 448     __ get_method_counters(Lmethod, G3_method_counters, done);
 449     Address mask(G3_method_counters, in_bytes(MethodCounters::invoke_mask_offset()));
 450     __ increment_mask_and_jump(invocation_counter, increment, mask,
 451                                G4_scratch, Lscratch,
 452                                Assembler::zero, overflow);
 453     __ bind(done);
 454   } else { // not TieredCompilation
 455     // Update standard invocation counters
 456     __ get_method_counters(Lmethod, G3_method_counters, done);
 457     __ increment_invocation_counter(G3_method_counters, O0, G4_scratch);
 458     if (ProfileInterpreter) {
 459       Address interpreter_invocation_counter(G3_method_counters,
 460             in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
 461       __ ld(interpreter_invocation_counter, G4_scratch);
 462       __ inc(G4_scratch);
 463       __ st(G4_scratch, interpreter_invocation_counter);
 464     }
 465 
 466     if (ProfileInterpreter && profile_method != NULL) {
 467       // Test to see if we should create a method data oop
 468       Address profile_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
 469       __ ld(profile_limit, G1_scratch);
 470       __ cmp_and_br_short(O0, G1_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue);
 471 
 472       // if no method data exists, go to profile_method
 473       __ test_method_data_pointer(*profile_method);
 474     }
 475 
 476     Address invocation_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_invocation_limit_offset()));
 477     __ ld(invocation_limit, G3_scratch);
 478     __ cmp(O0, G3_scratch);
 479     __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance
 480     __ delayed()->nop();
 481     __ bind(done);
 482   }
 483 
 484 }
 485 
 486 // Allocate monitor and lock method (asm interpreter)
 487 // ebx - Method*
 488 //
 489 void TemplateInterpreterGenerator::lock_method() {
 490   __ ld(Lmethod, in_bytes(Method::access_flags_offset()), O0);  // Load access flags.
 491 
 492 #ifdef ASSERT
 493  { Label ok;
 494    __ btst(JVM_ACC_SYNCHRONIZED, O0);
 495    __ br( Assembler::notZero, false, Assembler::pt, ok);
 496    __ delayed()->nop();
 497    __ stop("method doesn't need synchronization");
 498    __ bind(ok);
 499   }
 500 #endif // ASSERT
 501 
 502   // get synchronization object to O0
 503   { Label done;
 504     __ btst(JVM_ACC_STATIC, O0);
 505     __ br( Assembler::zero, true, Assembler::pt, done);
 506     __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
 507 
 508     // lock the mirror, not the Klass*
 509     __ load_mirror(O0, Lmethod);
 510 
 511 #ifdef ASSERT
 512     __ tst(O0);
 513     __ breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
 514 #endif // ASSERT
 515 
 516     __ bind(done);
 517   }
 518 
 519   __ add_monitor_to_stack(true, noreg, noreg);  // allocate monitor elem
 520   __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes());   // store object
 521   // __ untested("lock_object from method entry");
 522   __ lock_object(Lmonitors, O0);
 523 }
 524 
 525 // See if we've got enough room on the stack for locals plus overhead below
 526 // JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError
 527 // without going through the signal handler, i.e., reserved and yellow zones
 528 // will not be made usable. The shadow zone must suffice to handle the
 529 // overflow.
 530 void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size,
 531                                                                  Register Rscratch) {
 532   const int page_size = os::vm_page_size();
 533   Label after_frame_check;
 534 
 535   assert_different_registers(Rframe_size, Rscratch);
 536 
 537   __ set(page_size, Rscratch);
 538   __ cmp_and_br_short(Rframe_size, Rscratch, Assembler::lessEqual, Assembler::pt, after_frame_check);
 539 
 540   // Get the stack overflow limit, and in debug, verify it is non-zero.
 541   __ ld_ptr(G2_thread, JavaThread::stack_overflow_limit_offset(), Rscratch);
 542 #ifdef ASSERT
 543   Label limit_ok;
 544   __ br_notnull_short(Rscratch, Assembler::pn, limit_ok);
 545   __ stop("stack overflow limit is zero in generate_stack_overflow_check");
 546   __ bind(limit_ok);
 547 #endif
 548 
 549   // Add in the size of the frame (which is the same as subtracting it from the
 550   // SP, which would take another register.
 551   __ add(Rscratch, Rframe_size, Rscratch);
 552 
 553   // The frame is greater than one page in size, so check against
 554   // the bottom of the stack.
 555   __ cmp_and_brx_short(SP, Rscratch, Assembler::greaterUnsigned, Assembler::pt, after_frame_check);
 556 
 557   // The stack will overflow, throw an exception.
 558 
 559   // Note that SP is restored to sender's sp (in the delay slot). This
 560   // is necessary if the sender's frame is an extended compiled frame
 561   // (see gen_c2i_adapter()) and safer anyway in case of JSR292
 562   // adaptations.
 563 
 564   // Note also that the restored frame is not necessarily interpreted.
 565   // Use the shared runtime version of the StackOverflowError.
 566   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
 567   AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry());
 568   __ jump_to(stub, Rscratch);
 569   __ delayed()->mov(O5_savedSP, SP);
 570 
 571   // If you get to here, then there is enough stack space.
 572   __ bind(after_frame_check);
 573 }
 574 
 575 
 576 //
 577 // Generate a fixed interpreter frame. This is identical setup for interpreted
 578 // methods and for native methods hence the shared code.
 579 
 580 
 581 //----------------------------------------------------------------------------------------------------
 582 // Stack frame layout
 583 //
 584 // When control flow reaches any of the entry types for the interpreter
 585 // the following holds ->
 586 //
 587 // C2 Calling Conventions:
 588 //
 589 // The entry code below assumes that the following registers are set
 590 // when coming in:
 591 //    G5_method: holds the Method* of the method to call
 592 //    Lesp:    points to the TOS of the callers expression stack
 593 //             after having pushed all the parameters
 594 //
 595 // The entry code does the following to setup an interpreter frame
 596 //   pop parameters from the callers stack by adjusting Lesp
 597 //   set O0 to Lesp
 598 //   compute X = (max_locals - num_parameters)
 599 //   bump SP up by X to accomadate the extra locals
 600 //   compute X = max_expression_stack
 601 //               + vm_local_words
 602 //               + 16 words of register save area
 603 //   save frame doing a save sp, -X, sp growing towards lower addresses
 604 //   set Lbcp, Lmethod, LcpoolCache
 605 //   set Llocals to i0
 606 //   set Lmonitors to FP - rounded_vm_local_words
 607 //   set Lesp to Lmonitors - 4
 608 //
 609 //  The frame has now been setup to do the rest of the entry code
 610 
 611 // Try this optimization:  Most method entries could live in a
 612 // "one size fits all" stack frame without all the dynamic size
 613 // calculations.  It might be profitable to do all this calculation
 614 // statically and approximately for "small enough" methods.
 615 
 616 //-----------------------------------------------------------------------------------------------
 617 
 618 // C1 Calling conventions
 619 //
 620 // Upon method entry, the following registers are setup:
 621 //
 622 // g2 G2_thread: current thread
 623 // g5 G5_method: method to activate
 624 // g4 Gargs  : pointer to last argument
 625 //
 626 //
 627 // Stack:
 628 //
 629 // +---------------+ <--- sp
 630 // |               |
 631 // : reg save area :
 632 // |               |
 633 // +---------------+ <--- sp + 0x40
 634 // |               |
 635 // : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
 636 // |               |
 637 // +---------------+ <--- sp + 0x5c
 638 // |               |
 639 // :     free      :
 640 // |               |
 641 // +---------------+ <--- Gargs
 642 // |               |
 643 // :   arguments   :
 644 // |               |
 645 // +---------------+
 646 // |               |
 647 //
 648 //
 649 //
 650 // AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
 651 //
 652 // +---------------+ <--- sp
 653 // |               |
 654 // : reg save area :
 655 // |               |
 656 // +---------------+ <--- sp + 0x40
 657 // |               |
 658 // : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
 659 // |               |
 660 // +---------------+ <--- sp + 0x5c
 661 // |               |
 662 // :               :
 663 // |               | <--- Lesp
 664 // +---------------+ <--- Lmonitors (fp - 0x18)
 665 // |   VM locals   |
 666 // +---------------+ <--- fp
 667 // |               |
 668 // : reg save area :
 669 // |               |
 670 // +---------------+ <--- fp + 0x40
 671 // |               |
 672 // : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
 673 // |               |
 674 // +---------------+ <--- fp + 0x5c
 675 // |               |
 676 // :     free      :
 677 // |               |
 678 // +---------------+
 679 // |               |
 680 // : nonarg locals :
 681 // |               |
 682 // +---------------+
 683 // |               |
 684 // :   arguments   :
 685 // |               | <--- Llocals
 686 // +---------------+ <--- Gargs
 687 // |               |
 688 
 689 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
 690   //
 691   //
 692   // The entry code sets up a new interpreter frame in 4 steps:
 693   //
 694   // 1) Increase caller's SP by for the extra local space needed:
 695   //    (check for overflow)
 696   //    Efficient implementation of xload/xstore bytecodes requires
 697   //    that arguments and non-argument locals are in a contigously
 698   //    addressable memory block => non-argument locals must be
 699   //    allocated in the caller's frame.
 700   //
 701   // 2) Create a new stack frame and register window:
 702   //    The new stack frame must provide space for the standard
 703   //    register save area, the maximum java expression stack size,
 704   //    the monitor slots (0 slots initially), and some frame local
 705   //    scratch locations.
 706   //
 707   // 3) The following interpreter activation registers must be setup:
 708   //    Lesp       : expression stack pointer
 709   //    Lbcp       : bytecode pointer
 710   //    Lmethod    : method
 711   //    Llocals    : locals pointer
 712   //    Lmonitors  : monitor pointer
 713   //    LcpoolCache: constant pool cache
 714   //
 715   // 4) Initialize the non-argument locals if necessary:
 716   //    Non-argument locals may need to be initialized to NULL
 717   //    for GC to work. If the oop-map information is accurate
 718   //    (in the absence of the JSR problem), no initialization
 719   //    is necessary.
 720   //
 721   // (gri - 2/25/2000)
 722 
 723 
 724   int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
 725 
 726   const int extra_space =
 727     rounded_vm_local_words +                   // frame local scratch space
 728     Method::extra_stack_entries() +            // extra stack for jsr 292
 729     frame::memory_parameter_word_sp_offset +   // register save area
 730     (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
 731 
 732   const Register Glocals_size = G3;
 733   const Register RconstMethod = Glocals_size;
 734   const Register Otmp1 = O3;
 735   const Register Otmp2 = O4;
 736   // Lscratch can't be used as a temporary because the call_stub uses
 737   // it to assert that the stack frame was setup correctly.
 738   const Address constMethod       (G5_method, Method::const_offset());
 739   const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
 740 
 741   __ ld_ptr( constMethod, RconstMethod );
 742   __ lduh( size_of_parameters, Glocals_size);
 743 
 744   // Gargs points to first local + BytesPerWord
 745   // Set the saved SP after the register window save
 746   //
 747   assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP);
 748   __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1);
 749   __ add(Gargs, Otmp1, Gargs);
 750 
 751   if (native_call) {
 752     __ calc_mem_param_words( Glocals_size, Gframe_size );
 753     __ add( Gframe_size,  extra_space, Gframe_size);
 754     __ round_to( Gframe_size, WordsPerLong );
 755     __ sll( Gframe_size, LogBytesPerWord, Gframe_size );
 756 
 757     // Native calls don't need the stack size check since they have no
 758     // expression stack and the arguments are already on the stack and
 759     // we only add a handful of words to the stack.
 760   } else {
 761 
 762     //
 763     // Compute number of locals in method apart from incoming parameters
 764     //
 765     const Address size_of_locals(Otmp1, ConstMethod::size_of_locals_offset());
 766     __ ld_ptr(constMethod, Otmp1);
 767     __ lduh(size_of_locals, Otmp1);
 768     __ sub(Otmp1, Glocals_size, Glocals_size);
 769     __ round_to(Glocals_size, WordsPerLong);
 770     __ sll(Glocals_size, Interpreter::logStackElementSize, Glocals_size);
 771 
 772     // See if the frame is greater than one page in size. If so,
 773     // then we need to verify there is enough stack space remaining.
 774     // Frame_size = (max_stack + extra_space) * BytesPerWord;
 775     __ ld_ptr(constMethod, Gframe_size);
 776     __ lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size);
 777     __ add(Gframe_size, extra_space, Gframe_size);
 778     __ round_to(Gframe_size, WordsPerLong);
 779     __ sll(Gframe_size, Interpreter::logStackElementSize, Gframe_size);
 780 
 781     // Add in java locals size for stack overflow check only
 782     __ add(Gframe_size, Glocals_size, Gframe_size);
 783 
 784     const Register Otmp2 = O4;
 785     assert_different_registers(Otmp1, Otmp2, O5_savedSP);
 786     generate_stack_overflow_check(Gframe_size, Otmp1);
 787 
 788     __ sub(Gframe_size, Glocals_size, Gframe_size);
 789 
 790     //
 791     // bump SP to accomodate the extra locals
 792     //
 793     __ sub(SP, Glocals_size, SP);
 794   }
 795 
 796   //
 797   // now set up a stack frame with the size computed above
 798   //
 799   __ neg( Gframe_size );
 800   __ save( SP, Gframe_size, SP );
 801 
 802   //
 803   // now set up all the local cache registers
 804   //
 805   // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note
 806   // that all present references to Lbyte_code initialize the register
 807   // immediately before use
 808   if (native_call) {
 809     __ mov(G0, Lbcp);
 810   } else {
 811     __ ld_ptr(G5_method, Method::const_offset(), Lbcp);
 812     __ add(Lbcp, in_bytes(ConstMethod::codes_offset()), Lbcp);
 813   }
 814   __ mov( G5_method, Lmethod);                 // set Lmethod
 815   // Get mirror and store it in the frame as GC root for this Method*
 816   Register mirror = LcpoolCache;
 817   __ load_mirror(mirror, Lmethod);
 818   __ st_ptr(mirror, FP, (frame::interpreter_frame_mirror_offset * wordSize) + STACK_BIAS);
 819   __ get_constant_pool_cache( LcpoolCache );   // set LcpoolCache
 820   __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
 821   __ add( Lmonitors, STACK_BIAS, Lmonitors );   // Account for 64 bit stack bias
 822   __ sub(Lmonitors, BytesPerWord, Lesp);       // set Lesp
 823 
 824   // setup interpreter activation registers
 825   __ sub(Gargs, BytesPerWord, Llocals);        // set Llocals
 826 
 827   if (ProfileInterpreter) {
 828 #ifdef FAST_DISPATCH
 829     // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
 830     // they both use I2.
 831     assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
 832 #endif // FAST_DISPATCH
 833     __ set_method_data_pointer();
 834   }
 835 
 836 }
 837 
 838 // Method entry for java.lang.ref.Reference.get.
 839 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
 840   // Code: _aload_0, _getfield, _areturn
 841   // parameter size = 1
 842   //
 843   // The code that gets generated by this routine is split into 2 parts:
 844   //    1. The "intrinsified" code for G1 (or any SATB based GC),
 845   //    2. The slow path - which is an expansion of the regular method entry.
 846   //
 847   // Notes:-
 848   // * In the G1 code we do not check whether we need to block for
 849   //   a safepoint. If G1 is enabled then we must execute the specialized
 850   //   code for Reference.get (except when the Reference object is null)
 851   //   so that we can log the value in the referent field with an SATB
 852   //   update buffer.
 853   //   If the code for the getfield template is modified so that the
 854   //   G1 pre-barrier code is executed when the current method is
 855   //   Reference.get() then going through the normal method entry
 856   //   will be fine.
 857   // * The G1 code can, however, check the receiver object (the instance
 858   //   of java.lang.Reference) and jump to the slow path if null. If the
 859   //   Reference object is null then we obviously cannot fetch the referent
 860   //   and so we don't need to call the G1 pre-barrier. Thus we can use the
 861   //   regular method entry code to generate the NPE.
 862   //
 863   // This code is based on generate_accessor_enty.
 864 
 865   address entry = __ pc();
 866 
 867   const int referent_offset = java_lang_ref_Reference::referent_offset;
 868   guarantee(referent_offset > 0, "referent offset not initialized");
 869 
 870   Label slow_path;
 871 
 872   // In the G1 code we don't check if we need to reach a safepoint. We
 873   // continue and the thread will safepoint at the next bytecode dispatch.
 874 
 875   // Check if local 0 != NULL
 876   // If the receiver is null then it is OK to jump to the slow path.
 877   __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
 878   // check if local 0 == NULL and go the slow path
 879   __ cmp_and_brx_short(Otos_i, 0, Assembler::equal, Assembler::pn, slow_path);
 880 
 881   BarrierSetCodeGen *code_gen = Universe::heap()->barrier_set()->code_gen();
 882   code_gen->load_at(_masm, ACCESS_ON_HEAP | GC_ACCESS_ON_WEAK, T_OBJECT,
 883                     Otos_i, noreg, referent_offset, Otos_i, G3_scratch);
 884 
 885   // _areturn
 886   __ retl();                      // return from leaf routine
 887   __ delayed()->mov(O5_savedSP, SP);
 888 
 889   // Generate regular method entry
 890   __ bind(slow_path);
 891   __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
 892   return entry;
 893 }
 894 
 895 /**
 896  * Method entry for static native methods:
 897  *   int java.util.zip.CRC32.update(int crc, int b)
 898  */
 899 address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
 900 
 901   if (UseCRC32Intrinsics) {
 902     address entry = __ pc();
 903 
 904     Label L_slow_path;
 905     // If we need a safepoint check, generate full interpreter entry.
 906     ExternalAddress state(SafepointSynchronize::address_of_state());
 907     __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2);
 908     __ set(SafepointSynchronize::_not_synchronized, O3);
 909     __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path);
 910 
 911     // Load parameters
 912     const Register crc   = O0; // initial crc
 913     const Register val   = O1; // byte to update with
 914     const Register table = O2; // address of 256-entry lookup table
 915 
 916     __ ldub(Gargs, 3, val);
 917     __ lduw(Gargs, 8, crc);
 918 
 919     __ set(ExternalAddress(StubRoutines::crc_table_addr()), table);
 920 
 921     __ not1(crc); // ~crc
 922     __ clruwu(crc);
 923     __ update_byte_crc32(crc, val, table);
 924     __ not1(crc); // ~crc
 925 
 926     // result in O0
 927     __ retl();
 928     __ delayed()->nop();
 929 
 930     // generate a vanilla native entry as the slow path
 931     __ bind(L_slow_path);
 932     __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
 933     return entry;
 934   }
 935   return NULL;
 936 }
 937 
 938 /**
 939  * Method entry for static native methods:
 940  *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
 941  *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
 942  */
 943 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
 944 
 945   if (UseCRC32Intrinsics) {
 946     address entry = __ pc();
 947 
 948     Label L_slow_path;
 949     // If we need a safepoint check, generate full interpreter entry.
 950     ExternalAddress state(SafepointSynchronize::address_of_state());
 951     __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2);
 952     __ set(SafepointSynchronize::_not_synchronized, O3);
 953     __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path);
 954 
 955     // Load parameters from the stack
 956     const Register crc    = O0; // initial crc
 957     const Register buf    = O1; // source java byte array address
 958     const Register len    = O2; // len
 959     const Register offset = O3; // offset
 960 
 961     // Arguments are reversed on java expression stack
 962     // Calculate address of start element
 963     if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
 964       __ lduw(Gargs, 0,  len);
 965       __ lduw(Gargs, 8,  offset);
 966       __ ldx( Gargs, 16, buf);
 967       __ lduw(Gargs, 32, crc);
 968       __ add(buf, offset, buf);
 969     } else {
 970       __ lduw(Gargs, 0,  len);
 971       __ lduw(Gargs, 8,  offset);
 972       __ ldx( Gargs, 16, buf);
 973       __ lduw(Gargs, 24, crc);
 974       __ add(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE), buf); // account for the header size
 975       __ add(buf ,offset, buf);
 976     }
 977 
 978     // Call the crc32 kernel
 979     __ MacroAssembler::save_thread(L7_thread_cache);
 980     __ kernel_crc32(crc, buf, len, O3);
 981     __ MacroAssembler::restore_thread(L7_thread_cache);
 982 
 983     // result in O0
 984     __ retl();
 985     __ delayed()->nop();
 986 
 987     // generate a vanilla native entry as the slow path
 988     __ bind(L_slow_path);
 989     __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
 990     return entry;
 991   }
 992   return NULL;
 993 }
 994 
 995 /**
 996  * Method entry for intrinsic-candidate (non-native) methods:
 997  *   int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
 998  *   int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end)
 999  * Unlike CRC32, CRC32C does not have any methods marked as native
1000  * CRC32C also uses an "end" variable instead of the length variable CRC32 uses
1001  */
1002 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
1003 
1004   if (UseCRC32CIntrinsics) {
1005     address entry = __ pc();
1006 
1007     // Load parameters from the stack
1008     const Register crc    = O0; // initial crc
1009     const Register buf    = O1; // source java byte array address
1010     const Register offset = O2; // offset
1011     const Register end    = O3; // index of last element to process
1012     const Register len    = O2; // len argument to the kernel
1013     const Register table  = O3; // crc32c lookup table address
1014 
1015     // Arguments are reversed on java expression stack
1016     // Calculate address of start element
1017     if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) {
1018       __ lduw(Gargs, 0,  end);
1019       __ lduw(Gargs, 8,  offset);
1020       __ ldx( Gargs, 16, buf);
1021       __ lduw(Gargs, 32, crc);
1022       __ add(buf, offset, buf);
1023       __ sub(end, offset, len);
1024     } else {
1025       __ lduw(Gargs, 0,  end);
1026       __ lduw(Gargs, 8,  offset);
1027       __ ldx( Gargs, 16, buf);
1028       __ lduw(Gargs, 24, crc);
1029       __ add(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE), buf); // account for the header size
1030       __ add(buf, offset, buf);
1031       __ sub(end, offset, len);
1032     }
1033 
1034     // Call the crc32c kernel
1035     __ MacroAssembler::save_thread(L7_thread_cache);
1036     __ kernel_crc32c(crc, buf, len, table);
1037     __ MacroAssembler::restore_thread(L7_thread_cache);
1038 
1039     // result in O0
1040     __ retl();
1041     __ delayed()->nop();
1042 
1043     return entry;
1044   }
1045   return NULL;
1046 }
1047 
1048 // Not supported
1049 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
1050   return NULL;
1051 }
1052 
1053 // TODO: rather than touching all pages, check against stack_overflow_limit and bang yellow page to
1054 // generate exception
1055 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
1056   // Quick & dirty stack overflow checking: bang the stack & handle trap.
1057   // Note that we do the banging after the frame is setup, since the exception
1058   // handling code expects to find a valid interpreter frame on the stack.
1059   // Doing the banging earlier fails if the caller frame is not an interpreter
1060   // frame.
1061   // (Also, the exception throwing code expects to unlock any synchronized
1062   // method receiever, so do the banging after locking the receiver.)
1063 
1064   // Bang each page in the shadow zone. We can't assume it's been done for
1065   // an interpreter frame with greater than a page of locals, so each page
1066   // needs to be checked.  Only true for non-native.
1067   if (UseStackBanging) {
1068     const int page_size = os::vm_page_size();
1069     const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size;
1070     const int start_page = native_call ? n_shadow_pages : 1;
1071     for (int pages = start_page; pages <= n_shadow_pages; pages++) {
1072       __ bang_stack_with_offset(pages*page_size);
1073     }
1074   }
1075 }
1076 
1077 //
1078 // Interpreter stub for calling a native method. (asm interpreter)
1079 // This sets up a somewhat different looking stack for calling the native method
1080 // than the typical interpreter frame setup.
1081 //
1082 
1083 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
1084   address entry = __ pc();
1085 
1086   // the following temporary registers are used during frame creation
1087   const Register Gtmp1 = G3_scratch ;
1088   const Register Gtmp2 = G1_scratch;
1089   bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1090 
1091   // make sure registers are different!
1092   assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
1093 
1094   const Address Laccess_flags(Lmethod, Method::access_flags_offset());
1095 
1096   const Register Glocals_size = G3;
1097   assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
1098 
1099   // make sure method is native & not abstract
1100   // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
1101 #ifdef ASSERT
1102   __ ld(G5_method, Method::access_flags_offset(), Gtmp1);
1103   {
1104     Label L;
1105     __ btst(JVM_ACC_NATIVE, Gtmp1);
1106     __ br(Assembler::notZero, false, Assembler::pt, L);
1107     __ delayed()->nop();
1108     __ stop("tried to execute non-native method as native");
1109     __ bind(L);
1110   }
1111   { Label L;
1112     __ btst(JVM_ACC_ABSTRACT, Gtmp1);
1113     __ br(Assembler::zero, false, Assembler::pt, L);
1114     __ delayed()->nop();
1115     __ stop("tried to execute abstract method as non-abstract");
1116     __ bind(L);
1117   }
1118 #endif // ASSERT
1119 
1120  // generate the code to allocate the interpreter stack frame
1121   generate_fixed_frame(true);
1122 
1123   //
1124   // No locals to initialize for native method
1125   //
1126 
1127   // this slot will be set later, we initialize it to null here just in
1128   // case we get a GC before the actual value is stored later
1129   __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS);
1130 
1131   const Address do_not_unlock_if_synchronized(G2_thread,
1132     JavaThread::do_not_unlock_if_synchronized_offset());
1133   // Since at this point in the method invocation the exception handler
1134   // would try to exit the monitor of synchronized methods which hasn't
1135   // been entered yet, we set the thread local variable
1136   // _do_not_unlock_if_synchronized to true. If any exception was thrown by
1137   // runtime, exception handling i.e. unlock_if_synchronized_method will
1138   // check this thread local flag.
1139   // This flag has two effects, one is to force an unwind in the topmost
1140   // interpreter frame and not perform an unlock while doing so.
1141 
1142   __ movbool(true, G3_scratch);
1143   __ stbool(G3_scratch, do_not_unlock_if_synchronized);
1144 
1145   // increment invocation counter and check for overflow
1146   //
1147   // Note: checking for negative value instead of overflow
1148   //       so we have a 'sticky' overflow test (may be of
1149   //       importance as soon as we have true MT/MP)
1150   Label invocation_counter_overflow;
1151   Label Lcontinue;
1152   if (inc_counter) {
1153     generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
1154 
1155   }
1156   __ bind(Lcontinue);
1157 
1158   bang_stack_shadow_pages(true);
1159 
1160   // reset the _do_not_unlock_if_synchronized flag
1161   __ stbool(G0, do_not_unlock_if_synchronized);
1162 
1163   // check for synchronized methods
1164   // Must happen AFTER invocation_counter check and stack overflow check,
1165   // so method is not locked if overflows.
1166 
1167   if (synchronized) {
1168     lock_method();
1169   } else {
1170 #ifdef ASSERT
1171     { Label ok;
1172       __ ld(Laccess_flags, O0);
1173       __ btst(JVM_ACC_SYNCHRONIZED, O0);
1174       __ br( Assembler::zero, false, Assembler::pt, ok);
1175       __ delayed()->nop();
1176       __ stop("method needs synchronization");
1177       __ bind(ok);
1178     }
1179 #endif // ASSERT
1180   }
1181 
1182 
1183   // start execution
1184   __ verify_thread();
1185 
1186   // JVMTI support
1187   __ notify_method_entry();
1188 
1189   // native call
1190 
1191   // (note that O0 is never an oop--at most it is a handle)
1192   // It is important not to smash any handles created by this call,
1193   // until any oop handle in O0 is dereferenced.
1194 
1195   // (note that the space for outgoing params is preallocated)
1196 
1197   // get signature handler
1198   { Label L;
1199     Address signature_handler(Lmethod, Method::signature_handler_offset());
1200     __ ld_ptr(signature_handler, G3_scratch);
1201     __ br_notnull_short(G3_scratch, Assembler::pt, L);
1202     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod);
1203     __ ld_ptr(signature_handler, G3_scratch);
1204     __ bind(L);
1205   }
1206 
1207   // Push a new frame so that the args will really be stored in
1208   // Copy a few locals across so the new frame has the variables
1209   // we need but these values will be dead at the jni call and
1210   // therefore not gc volatile like the values in the current
1211   // frame (Lmethod in particular)
1212 
1213   // Flush the method pointer to the register save area
1214   __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
1215   __ mov(Llocals, O1);
1216 
1217   // calculate where the mirror handle body is allocated in the interpreter frame:
1218   __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2);
1219 
1220   // Calculate current frame size
1221   __ sub(SP, FP, O3);         // Calculate negative of current frame size
1222   __ save(SP, O3, SP);        // Allocate an identical sized frame
1223 
1224   // Note I7 has leftover trash. Slow signature handler will fill it in
1225   // should we get there. Normal jni call will set reasonable last_Java_pc
1226   // below (and fix I7 so the stack trace doesn't have a meaningless frame
1227   // in it).
1228 
1229   // Load interpreter frame's Lmethod into same register here
1230 
1231   __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
1232 
1233   __ mov(I1, Llocals);
1234   __ mov(I2, Lscratch2);     // save the address of the mirror
1235 
1236 
1237   // ONLY Lmethod and Llocals are valid here!
1238 
1239   // call signature handler, It will move the arg properly since Llocals in current frame
1240   // matches that in outer frame
1241 
1242   __ callr(G3_scratch, 0);
1243   __ delayed()->nop();
1244 
1245   // Result handler is in Lscratch
1246 
1247   // Reload interpreter frame's Lmethod since slow signature handler may block
1248   __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
1249 
1250   { Label not_static;
1251 
1252     __ ld(Laccess_flags, O0);
1253     __ btst(JVM_ACC_STATIC, O0);
1254     __ br( Assembler::zero, false, Assembler::pt, not_static);
1255     // get native function entry point(O0 is a good temp until the very end)
1256     __ delayed()->ld_ptr(Lmethod, in_bytes(Method::native_function_offset()), O0);
1257     // for static methods insert the mirror argument
1258     __ load_mirror(O1, Lmethod);
1259 #ifdef ASSERT
1260     if (!PrintSignatureHandlers)  // do not dirty the output with this
1261     { Label L;
1262       __ br_notnull_short(O1, Assembler::pt, L);
1263       __ stop("mirror is missing");
1264       __ bind(L);
1265     }
1266 #endif // ASSERT
1267     __ st_ptr(O1, Lscratch2, 0);
1268     __ mov(Lscratch2, O1);
1269     __ bind(not_static);
1270   }
1271 
1272   // At this point, arguments have been copied off of stack into
1273   // their JNI positions, which are O1..O5 and SP[68..].
1274   // Oops are boxed in-place on the stack, with handles copied to arguments.
1275   // The result handler is in Lscratch.  O0 will shortly hold the JNIEnv*.
1276 
1277 #ifdef ASSERT
1278   { Label L;
1279     __ br_notnull_short(O0, Assembler::pt, L);
1280     __ stop("native entry point is missing");
1281     __ bind(L);
1282   }
1283 #endif // ASSERT
1284 
1285   //
1286   // setup the frame anchor
1287   //
1288   // The scavenge function only needs to know that the PC of this frame is
1289   // in the interpreter method entry code, it doesn't need to know the exact
1290   // PC and hence we can use O7 which points to the return address from the
1291   // previous call in the code stream (signature handler function)
1292   //
1293   // The other trick is we set last_Java_sp to FP instead of the usual SP because
1294   // we have pushed the extra frame in order to protect the volatile register(s)
1295   // in that frame when we return from the jni call
1296   //
1297 
1298   __ set_last_Java_frame(FP, O7);
1299   __ mov(O7, I7);  // make dummy interpreter frame look like one above,
1300                    // not meaningless information that'll confuse me.
1301 
1302   // flush the windows now. We don't care about the current (protection) frame
1303   // only the outer frames
1304 
1305   __ flushw();
1306 
1307   // mark windows as flushed
1308   Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
1309   __ set(JavaFrameAnchor::flushed, G3_scratch);
1310   __ st(G3_scratch, flags);
1311 
1312   // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
1313 
1314   Address thread_state(G2_thread, JavaThread::thread_state_offset());
1315 #ifdef ASSERT
1316   { Label L;
1317     __ ld(thread_state, G3_scratch);
1318     __ cmp_and_br_short(G3_scratch, _thread_in_Java, Assembler::equal, Assembler::pt, L);
1319     __ stop("Wrong thread state in native stub");
1320     __ bind(L);
1321   }
1322 #endif // ASSERT
1323   __ set(_thread_in_native, G3_scratch);
1324   __ st(G3_scratch, thread_state);
1325 
1326   // Call the jni method, using the delay slot to set the JNIEnv* argument.
1327   __ save_thread(L7_thread_cache); // save Gthread
1328   __ callr(O0, 0);
1329   __ delayed()->
1330      add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0);
1331 
1332   // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD
1333 
1334   __ restore_thread(L7_thread_cache); // restore G2_thread
1335   __ reinit_heapbase();
1336 
1337   // must we block?
1338 
1339   // Block, if necessary, before resuming in _thread_in_Java state.
1340   // In order for GC to work, don't clear the last_Java_sp until after blocking.
1341   { Label no_block;
1342     AddressLiteral sync_state(SafepointSynchronize::address_of_state());
1343 
1344     // Switch thread to "native transition" state before reading the synchronization state.
1345     // This additional state is necessary because reading and testing the synchronization
1346     // state is not atomic w.r.t. GC, as this scenario demonstrates:
1347     //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1348     //     VM thread changes sync state to synchronizing and suspends threads for GC.
1349     //     Thread A is resumed to finish this native method, but doesn't block here since it
1350     //     didn't see any synchronization is progress, and escapes.
1351     __ set(_thread_in_native_trans, G3_scratch);
1352     __ st(G3_scratch, thread_state);
1353     if(os::is_MP()) {
1354       if (UseMembar) {
1355         // Force this write out before the read below
1356         __ membar(Assembler::StoreLoad);
1357       } else {
1358         // Write serialization page so VM thread can do a pseudo remote membar.
1359         // We use the current thread pointer to calculate a thread specific
1360         // offset to write to within the page. This minimizes bus traffic
1361         // due to cache line collision.
1362         __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
1363       }
1364     }
1365     __ load_contents(sync_state, G3_scratch);
1366     __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
1367 
1368     Label L;
1369     __ br(Assembler::notEqual, false, Assembler::pn, L);
1370     __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
1371     __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
1372     __ bind(L);
1373 
1374     // Block.  Save any potential method result value before the operation and
1375     // use a leaf call to leave the last_Java_frame setup undisturbed.
1376     save_native_result();
1377     __ call_VM_leaf(L7_thread_cache,
1378                     CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
1379                     G2_thread);
1380 
1381     // Restore any method result value
1382     restore_native_result();
1383     __ bind(no_block);
1384   }
1385 
1386   // Clear the frame anchor now
1387 
1388   __ reset_last_Java_frame();
1389 
1390   // Move the result handler address
1391   __ mov(Lscratch, G3_scratch);
1392   // return possible result to the outer frame
1393   __ restore(O0, G0, O0);
1394 
1395   // Move result handler to expected register
1396   __ mov(G3_scratch, Lscratch);
1397 
1398   // Back in normal (native) interpreter frame. State is thread_in_native_trans
1399   // switch to thread_in_Java.
1400 
1401   __ set(_thread_in_Java, G3_scratch);
1402   __ st(G3_scratch, thread_state);
1403 
1404   if (CheckJNICalls) {
1405     // clear_pending_jni_exception_check
1406     __ st_ptr(G0, G2_thread, JavaThread::pending_jni_exception_check_fn_offset());
1407   }
1408 
1409   // reset handle block
1410   __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch);
1411   __ st(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
1412 
1413   // If we have an oop result store it where it will be safe for any further gc
1414   // until we return now that we've released the handle it might be protected by
1415 
1416   {
1417     Label no_oop;
1418 
1419     __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
1420     __ cmp_and_brx_short(G3_scratch, Lscratch, Assembler::notEqual, Assembler::pt, no_oop);
1421     __ resolve_jobject(O0, G3_scratch);
1422     // Store it where gc will look for it and result handler expects it.
1423     __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS);
1424 
1425     __ bind(no_oop);
1426   }
1427 
1428 
1429   // handle exceptions (exception handling will handle unlocking!)
1430   { Label L;
1431     Address exception_addr(G2_thread, Thread::pending_exception_offset());
1432     __ ld_ptr(exception_addr, Gtemp);
1433     __ br_null_short(Gtemp, Assembler::pt, L);
1434     // Note: This could be handled more efficiently since we know that the native
1435     //       method doesn't have an exception handler. We could directly return
1436     //       to the exception handler for the caller.
1437     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
1438     __ should_not_reach_here();
1439     __ bind(L);
1440   }
1441 
1442   // JVMTI support (preserves thread register)
1443   __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
1444 
1445   if (synchronized) {
1446     // save and restore any potential method result value around the unlocking operation
1447     save_native_result();
1448 
1449     __ add( __ top_most_monitor(), O1);
1450     __ unlock_object(O1);
1451 
1452     restore_native_result();
1453   }
1454 
1455   // dispose of return address and remove activation
1456 #ifdef ASSERT
1457   {
1458     Label ok;
1459     __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok);
1460     __ stop("bad I5_savedSP value");
1461     __ should_not_reach_here();
1462     __ bind(ok);
1463   }
1464 #endif
1465   __ jmp(Lscratch, 0);
1466   __ delayed()->nop();
1467 
1468 
1469   if (inc_counter) {
1470     // handle invocation counter overflow
1471     __ bind(invocation_counter_overflow);
1472     generate_counter_overflow(Lcontinue);
1473   }
1474 
1475 
1476 
1477   return entry;
1478 }
1479 
1480 
1481 // Generic method entry to (asm) interpreter
1482 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1483   address entry = __ pc();
1484 
1485   bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1486 
1487   // the following temporary registers are used during frame creation
1488   const Register Gtmp1 = G3_scratch ;
1489   const Register Gtmp2 = G1_scratch;
1490 
1491   // make sure registers are different!
1492   assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
1493 
1494   const Address constMethod       (G5_method, Method::const_offset());
1495   // Seems like G5_method is live at the point this is used. So we could make this look consistent
1496   // and use in the asserts.
1497   const Address access_flags      (Lmethod,   Method::access_flags_offset());
1498 
1499   const Register Glocals_size = G3;
1500   assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
1501 
1502   // make sure method is not native & not abstract
1503   // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
1504 #ifdef ASSERT
1505   __ ld(G5_method, Method::access_flags_offset(), Gtmp1);
1506   {
1507     Label L;
1508     __ btst(JVM_ACC_NATIVE, Gtmp1);
1509     __ br(Assembler::zero, false, Assembler::pt, L);
1510     __ delayed()->nop();
1511     __ stop("tried to execute native method as non-native");
1512     __ bind(L);
1513   }
1514   { Label L;
1515     __ btst(JVM_ACC_ABSTRACT, Gtmp1);
1516     __ br(Assembler::zero, false, Assembler::pt, L);
1517     __ delayed()->nop();
1518     __ stop("tried to execute abstract method as non-abstract");
1519     __ bind(L);
1520   }
1521 #endif // ASSERT
1522 
1523   // generate the code to allocate the interpreter stack frame
1524 
1525   generate_fixed_frame(false);
1526 
1527 #ifdef FAST_DISPATCH
1528   __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables);
1529                                           // set bytecode dispatch table base
1530 #endif
1531 
1532   //
1533   // Code to initialize the extra (i.e. non-parm) locals
1534   //
1535   Register init_value = noreg;    // will be G0 if we must clear locals
1536   // The way the code was setup before zerolocals was always true for vanilla java entries.
1537   // It could only be false for the specialized entries like accessor or empty which have
1538   // no extra locals so the testing was a waste of time and the extra locals were always
1539   // initialized. We removed this extra complication to already over complicated code.
1540 
1541   init_value = G0;
1542   Label clear_loop;
1543 
1544   const Register RconstMethod = O1;
1545   const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
1546   const Address size_of_locals    (RconstMethod, ConstMethod::size_of_locals_offset());
1547 
1548   // NOTE: If you change the frame layout, this code will need to
1549   // be updated!
1550   __ ld_ptr( constMethod, RconstMethod );
1551   __ lduh( size_of_locals, O2 );
1552   __ lduh( size_of_parameters, O1 );
1553   __ sll( O2, Interpreter::logStackElementSize, O2);
1554   __ sll( O1, Interpreter::logStackElementSize, O1 );
1555   __ sub( Llocals, O2, O2 );
1556   __ sub( Llocals, O1, O1 );
1557 
1558   __ bind( clear_loop );
1559   __ inc( O2, wordSize );
1560 
1561   __ cmp( O2, O1 );
1562   __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
1563   __ delayed()->st_ptr( init_value, O2, 0 );
1564 
1565   const Address do_not_unlock_if_synchronized(G2_thread,
1566     JavaThread::do_not_unlock_if_synchronized_offset());
1567   // Since at this point in the method invocation the exception handler
1568   // would try to exit the monitor of synchronized methods which hasn't
1569   // been entered yet, we set the thread local variable
1570   // _do_not_unlock_if_synchronized to true. If any exception was thrown by
1571   // runtime, exception handling i.e. unlock_if_synchronized_method will
1572   // check this thread local flag.
1573   __ movbool(true, G3_scratch);
1574   __ stbool(G3_scratch, do_not_unlock_if_synchronized);
1575 
1576   __ profile_parameters_type(G1_scratch, G3_scratch, G4_scratch, Lscratch);
1577   // increment invocation counter and check for overflow
1578   //
1579   // Note: checking for negative value instead of overflow
1580   //       so we have a 'sticky' overflow test (may be of
1581   //       importance as soon as we have true MT/MP)
1582   Label invocation_counter_overflow;
1583   Label profile_method;
1584   Label profile_method_continue;
1585   Label Lcontinue;
1586   if (inc_counter) {
1587     generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
1588     if (ProfileInterpreter) {
1589       __ bind(profile_method_continue);
1590     }
1591   }
1592   __ bind(Lcontinue);
1593 
1594   bang_stack_shadow_pages(false);
1595 
1596   // reset the _do_not_unlock_if_synchronized flag
1597   __ stbool(G0, do_not_unlock_if_synchronized);
1598 
1599   // check for synchronized methods
1600   // Must happen AFTER invocation_counter check and stack overflow check,
1601   // so method is not locked if overflows.
1602 
1603   if (synchronized) {
1604     lock_method();
1605   } else {
1606 #ifdef ASSERT
1607     { Label ok;
1608       __ ld(access_flags, O0);
1609       __ btst(JVM_ACC_SYNCHRONIZED, O0);
1610       __ br( Assembler::zero, false, Assembler::pt, ok);
1611       __ delayed()->nop();
1612       __ stop("method needs synchronization");
1613       __ bind(ok);
1614     }
1615 #endif // ASSERT
1616   }
1617 
1618   // start execution
1619 
1620   __ verify_thread();
1621 
1622   // jvmti support
1623   __ notify_method_entry();
1624 
1625   // start executing instructions
1626   __ dispatch_next(vtos);
1627 
1628 
1629   if (inc_counter) {
1630     if (ProfileInterpreter) {
1631       // We have decided to profile this method in the interpreter
1632       __ bind(profile_method);
1633 
1634       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1635       __ set_method_data_pointer_for_bcp();
1636       __ ba_short(profile_method_continue);
1637     }
1638 
1639     // handle invocation counter overflow
1640     __ bind(invocation_counter_overflow);
1641     generate_counter_overflow(Lcontinue);
1642   }
1643 
1644 
1645   return entry;
1646 }
1647 
1648 //----------------------------------------------------------------------------------------------------
1649 // Exceptions
1650 void TemplateInterpreterGenerator::generate_throw_exception() {
1651 
1652   // Entry point in previous activation (i.e., if the caller was interpreted)
1653   Interpreter::_rethrow_exception_entry = __ pc();
1654   // O0: exception
1655 
1656   // entry point for exceptions thrown within interpreter code
1657   Interpreter::_throw_exception_entry = __ pc();
1658   __ verify_thread();
1659   // expression stack is undefined here
1660   // O0: exception, i.e. Oexception
1661   // Lbcp: exception bcp
1662   __ verify_oop(Oexception);
1663 
1664 
1665   // expression stack must be empty before entering the VM in case of an exception
1666   __ empty_expression_stack();
1667   // find exception handler address and preserve exception oop
1668   // call C routine to find handler and jump to it
1669   __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception);
1670   __ push_ptr(O1); // push exception for exception handler bytecodes
1671 
1672   __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!)
1673   __ delayed()->nop();
1674 
1675 
1676   // if the exception is not handled in the current frame
1677   // the frame is removed and the exception is rethrown
1678   // (i.e. exception continuation is _rethrow_exception)
1679   //
1680   // Note: At this point the bci is still the bxi for the instruction which caused
1681   //       the exception and the expression stack is empty. Thus, for any VM calls
1682   //       at this point, GC will find a legal oop map (with empty expression stack).
1683 
1684   // in current activation
1685   // tos: exception
1686   // Lbcp: exception bcp
1687 
1688   //
1689   // JVMTI PopFrame support
1690   //
1691 
1692   Interpreter::_remove_activation_preserving_args_entry = __ pc();
1693   Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
1694   // Set the popframe_processing bit in popframe_condition indicating that we are
1695   // currently handling popframe, so that call_VMs that may happen later do not trigger new
1696   // popframe handling cycles.
1697 
1698   __ ld(popframe_condition_addr, G3_scratch);
1699   __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch);
1700   __ stw(G3_scratch, popframe_condition_addr);
1701 
1702   // Empty the expression stack, as in normal exception handling
1703   __ empty_expression_stack();
1704   __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
1705 
1706   {
1707     // Check to see whether we are returning to a deoptimized frame.
1708     // (The PopFrame call ensures that the caller of the popped frame is
1709     // either interpreted or compiled and deoptimizes it if compiled.)
1710     // In this case, we can't call dispatch_next() after the frame is
1711     // popped, but instead must save the incoming arguments and restore
1712     // them after deoptimization has occurred.
1713     //
1714     // Note that we don't compare the return PC against the
1715     // deoptimization blob's unpack entry because of the presence of
1716     // adapter frames in C2.
1717     Label caller_not_deoptimized;
1718     __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7);
1719     __ br_notnull_short(O0, Assembler::pt, caller_not_deoptimized);
1720 
1721     const Register Gtmp1 = G3_scratch;
1722     const Register Gtmp2 = G1_scratch;
1723     const Register RconstMethod = Gtmp1;
1724     const Address constMethod(Lmethod, Method::const_offset());
1725     const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
1726 
1727     // Compute size of arguments for saving when returning to deoptimized caller
1728     __ ld_ptr(constMethod, RconstMethod);
1729     __ lduh(size_of_parameters, Gtmp1);
1730     __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
1731     __ sub(Llocals, Gtmp1, Gtmp2);
1732     __ add(Gtmp2, wordSize, Gtmp2);
1733     // Save these arguments
1734     __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2);
1735     // Inform deoptimization that it is responsible for restoring these arguments
1736     __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1);
1737     Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
1738     __ st(Gtmp1, popframe_condition_addr);
1739 
1740     // Return from the current method
1741     // The caller's SP was adjusted upon method entry to accomodate
1742     // the callee's non-argument locals. Undo that adjustment.
1743     __ ret();
1744     __ delayed()->restore(I5_savedSP, G0, SP);
1745 
1746     __ bind(caller_not_deoptimized);
1747   }
1748 
1749   // Clear the popframe condition flag
1750   __ stw(G0 /* popframe_inactive */, popframe_condition_addr);
1751 
1752   // Get out of the current method (how this is done depends on the particular compiler calling
1753   // convention that the interpreter currently follows)
1754   // The caller's SP was adjusted upon method entry to accomodate
1755   // the callee's non-argument locals. Undo that adjustment.
1756   __ restore(I5_savedSP, G0, SP);
1757   // The method data pointer was incremented already during
1758   // call profiling. We have to restore the mdp for the current bcp.
1759   if (ProfileInterpreter) {
1760     __ set_method_data_pointer_for_bcp();
1761   }
1762 
1763 #if INCLUDE_JVMTI
1764   {
1765     Label L_done;
1766 
1767     __ ldub(Address(Lbcp, 0), G1_scratch);  // Load current bytecode
1768     __ cmp_and_br_short(G1_scratch, Bytecodes::_invokestatic, Assembler::notEqual, Assembler::pn, L_done);
1769 
1770     // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1771     // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
1772 
1773     __ call_VM(G1_scratch, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), I0, Lmethod, Lbcp);
1774 
1775     __ br_null(G1_scratch, false, Assembler::pn, L_done);
1776     __ delayed()->nop();
1777 
1778     __ st_ptr(G1_scratch, Lesp, wordSize);
1779     __ bind(L_done);
1780   }
1781 #endif // INCLUDE_JVMTI
1782 
1783   // Resume bytecode interpretation at the current bcp
1784   __ dispatch_next(vtos);
1785   // end of JVMTI PopFrame support
1786 
1787   Interpreter::_remove_activation_entry = __ pc();
1788 
1789   // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here)
1790   __ pop_ptr(Oexception);                                  // get exception
1791 
1792   // Intel has the following comment:
1793   //// remove the activation (without doing throws on illegalMonitorExceptions)
1794   // They remove the activation without checking for bad monitor state.
1795   // %%% We should make sure this is the right semantics before implementing.
1796 
1797   __ set_vm_result(Oexception);
1798   __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false);
1799 
1800   __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI);
1801 
1802   __ get_vm_result(Oexception);
1803   __ verify_oop(Oexception);
1804 
1805     const int return_reg_adjustment = frame::pc_return_offset;
1806   Address issuing_pc_addr(I7, return_reg_adjustment);
1807 
1808   // We are done with this activation frame; find out where to go next.
1809   // The continuation point will be an exception handler, which expects
1810   // the following registers set up:
1811   //
1812   // Oexception: exception
1813   // Oissuing_pc: the local call that threw exception
1814   // Other On: garbage
1815   // In/Ln:  the contents of the caller's register window
1816   //
1817   // We do the required restore at the last possible moment, because we
1818   // need to preserve some state across a runtime call.
1819   // (Remember that the caller activation is unknown--it might not be
1820   // interpreted, so things like Lscratch are useless in the caller.)
1821 
1822   // Although the Intel version uses call_C, we can use the more
1823   // compact call_VM.  (The only real difference on SPARC is a
1824   // harmlessly ignored [re]set_last_Java_frame, compared with
1825   // the Intel code which lacks this.)
1826   __ mov(Oexception,      Oexception ->after_save());  // get exception in I0 so it will be on O0 after restore
1827   __ add(issuing_pc_addr, Oissuing_pc->after_save());  // likewise set I1 to a value local to the caller
1828   __ super_call_VM_leaf(L7_thread_cache,
1829                         CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
1830                         G2_thread, Oissuing_pc->after_save());
1831 
1832   // The caller's SP was adjusted upon method entry to accomodate
1833   // the callee's non-argument locals. Undo that adjustment.
1834   __ JMP(O0, 0);                         // return exception handler in caller
1835   __ delayed()->restore(I5_savedSP, G0, SP);
1836 
1837   // (same old exception object is already in Oexception; see above)
1838   // Note that an "issuing PC" is actually the next PC after the call
1839 }
1840 
1841 
1842 //
1843 // JVMTI ForceEarlyReturn support
1844 //
1845 
1846 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1847   address entry = __ pc();
1848 
1849   __ empty_expression_stack();
1850   __ load_earlyret_value(state);
1851 
1852   __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch);
1853   Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset());
1854 
1855   // Clear the earlyret state
1856   __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr);
1857 
1858   __ remove_activation(state,
1859                        /* throw_monitor_exception */ false,
1860                        /* install_monitor_exception */ false);
1861 
1862   // The caller's SP was adjusted upon method entry to accomodate
1863   // the callee's non-argument locals. Undo that adjustment.
1864   __ ret();                             // return to caller
1865   __ delayed()->restore(I5_savedSP, G0, SP);
1866 
1867   return entry;
1868 } // end of JVMTI ForceEarlyReturn support
1869 
1870 
1871 //------------------------------------------------------------------------------------------------------------------------
1872 // Helper for vtos entry point generation
1873 
1874 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
1875   assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1876   Label L;
1877   aep = __ pc(); __ push_ptr(); __ ba_short(L);
1878   fep = __ pc(); __ push_f();   __ ba_short(L);
1879   dep = __ pc(); __ push_d();   __ ba_short(L);
1880   lep = __ pc(); __ push_l();   __ ba_short(L);
1881   iep = __ pc(); __ push_i();
1882   bep = cep = sep = iep;                        // there aren't any
1883   vep = __ pc(); __ bind(L);                    // fall through
1884   generate_and_dispatch(t);
1885 }
1886 
1887 // --------------------------------------------------------------------------------
1888 
1889 // Non-product code
1890 #ifndef PRODUCT
1891 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1892   address entry = __ pc();
1893 
1894   __ push(state);
1895   __ mov(O7, Lscratch); // protect return address within interpreter
1896 
1897   // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer
1898   __ mov( Otos_l2, G3_scratch );
1899   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), G0, Otos_l1, G3_scratch);
1900   __ mov(Lscratch, O7); // restore return address
1901   __ pop(state);
1902   __ retl();
1903   __ delayed()->nop();
1904 
1905   return entry;
1906 }
1907 
1908 
1909 // helpers for generate_and_dispatch
1910 
1911 void TemplateInterpreterGenerator::count_bytecode() {
1912   __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch);
1913 }
1914 
1915 
1916 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1917   __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch);
1918 }
1919 
1920 
1921 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1922   AddressLiteral index   (&BytecodePairHistogram::_index);
1923   AddressLiteral counters((address) &BytecodePairHistogram::_counters);
1924 
1925   // get index, shift out old bytecode, bring in new bytecode, and store it
1926   // _index = (_index >> log2_number_of_codes) |
1927   //          (bytecode << log2_number_of_codes);
1928 
1929   __ load_contents(index, G4_scratch);
1930   __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch );
1931   __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes,  G3_scratch );
1932   __ or3( G3_scratch,  G4_scratch, G4_scratch );
1933   __ store_contents(G4_scratch, index, G3_scratch);
1934 
1935   // bump bucket contents
1936   // _counters[_index] ++;
1937 
1938   __ set(counters, G3_scratch);                       // loads into G3_scratch
1939   __ sll( G4_scratch, LogBytesPerWord, G4_scratch );  // Index is word address
1940   __ add (G3_scratch, G4_scratch, G3_scratch);        // Add in index
1941   __ ld (G3_scratch, 0, G4_scratch);
1942   __ inc (G4_scratch);
1943   __ st (G4_scratch, 0, G3_scratch);
1944 }
1945 
1946 
1947 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1948   // Call a little run-time stub to avoid blow-up for each bytecode.
1949   // The run-time runtime saves the right registers, depending on
1950   // the tosca in-state for the given template.
1951   address entry = Interpreter::trace_code(t->tos_in());
1952   guarantee(entry != NULL, "entry must have been generated");
1953   __ call(entry, relocInfo::none);
1954   __ delayed()->nop();
1955 }
1956 
1957 
1958 void TemplateInterpreterGenerator::stop_interpreter_at() {
1959   AddressLiteral counter(&BytecodeCounter::_counter_value);
1960   __ load_contents(counter, G3_scratch);
1961   AddressLiteral stop_at(&StopInterpreterAt);
1962   __ load_ptr_contents(stop_at, G4_scratch);
1963   __ cmp(G3_scratch, G4_scratch);
1964   __ breakpoint_trap(Assembler::equal, Assembler::icc);
1965 }
1966 #endif // not PRODUCT