1 /*
   2  * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // no precompiled headers
  26 #include "classfile/vmSymbols.hpp"
  27 #include "gc_interface/collectedHeap.hpp"
  28 #include "interpreter/bytecodeHistogram.hpp"
  29 #include "interpreter/bytecodeInterpreter.hpp"
  30 #include "interpreter/bytecodeInterpreter.inline.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "interpreter/interpreterRuntime.hpp"
  33 #include "memory/cardTableModRefBS.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "oops/methodCounters.hpp"
  36 #include "oops/objArrayKlass.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "prims/jvmtiExport.hpp"
  39 #include "runtime/frame.inline.hpp"
  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/interfaceSupport.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/threadCritical.hpp"
  44 #include "utilities/exceptions.hpp"
  45 #ifdef TARGET_OS_ARCH_linux_x86
  46 # include "orderAccess_linux_x86.inline.hpp"
  47 #endif
  48 #ifdef TARGET_OS_ARCH_linux_sparc
  49 # include "orderAccess_linux_sparc.inline.hpp"
  50 #endif
  51 #ifdef TARGET_OS_ARCH_linux_zero
  52 # include "orderAccess_linux_zero.inline.hpp"
  53 #endif
  54 #ifdef TARGET_OS_ARCH_solaris_x86
  55 # include "orderAccess_solaris_x86.inline.hpp"
  56 #endif
  57 #ifdef TARGET_OS_ARCH_solaris_sparc
  58 # include "orderAccess_solaris_sparc.inline.hpp"
  59 #endif
  60 #ifdef TARGET_OS_ARCH_windows_x86
  61 # include "orderAccess_windows_x86.inline.hpp"
  62 #endif
  63 #ifdef TARGET_OS_ARCH_linux_arm
  64 # include "orderAccess_linux_arm.inline.hpp"
  65 #endif
  66 #ifdef TARGET_OS_ARCH_linux_ppc
  67 # include "orderAccess_linux_ppc.inline.hpp"
  68 #endif
  69 #ifdef TARGET_OS_ARCH_bsd_x86
  70 # include "orderAccess_bsd_x86.inline.hpp"
  71 #endif
  72 #ifdef TARGET_OS_ARCH_bsd_zero
  73 # include "orderAccess_bsd_zero.inline.hpp"
  74 #endif
  75 
  76 
  77 // no precompiled headers
  78 #ifdef CC_INTERP
  79 
  80 /*
  81  * USELABELS - If using GCC, then use labels for the opcode dispatching
  82  * rather -then a switch statement. This improves performance because it
  83  * gives us the oportunity to have the instructions that calculate the
  84  * next opcode to jump to be intermixed with the rest of the instructions
  85  * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro).
  86  */
  87 #undef USELABELS
  88 #ifdef __GNUC__
  89 /*
  90    ASSERT signifies debugging. It is much easier to step thru bytecodes if we
  91    don't use the computed goto approach.
  92 */
  93 #ifndef ASSERT
  94 #define USELABELS
  95 #endif
  96 #endif
  97 
  98 #undef CASE
  99 #ifdef USELABELS
 100 #define CASE(opcode) opc ## opcode
 101 #define DEFAULT opc_default
 102 #else
 103 #define CASE(opcode) case Bytecodes:: opcode
 104 #define DEFAULT default
 105 #endif
 106 
 107 /*
 108  * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next
 109  * opcode before going back to the top of the while loop, rather then having
 110  * the top of the while loop handle it. This provides a better opportunity
 111  * for instruction scheduling. Some compilers just do this prefetch
 112  * automatically. Some actually end up with worse performance if you
 113  * force the prefetch. Solaris gcc seems to do better, but cc does worse.
 114  */
 115 #undef PREFETCH_OPCCODE
 116 #define PREFETCH_OPCCODE
 117 
 118 /*
 119   Interpreter safepoint: it is expected that the interpreter will have no live
 120   handles of its own creation live at an interpreter safepoint. Therefore we
 121   run a HandleMarkCleaner and trash all handles allocated in the call chain
 122   since the JavaCalls::call_helper invocation that initiated the chain.
 123   There really shouldn't be any handles remaining to trash but this is cheap
 124   in relation to a safepoint.
 125 */
 126 #define SAFEPOINT                                                                 \
 127     if ( SafepointSynchronize::is_synchronizing()) {                              \
 128         {                                                                         \
 129           /* zap freed handles rather than GC'ing them */                         \
 130           HandleMarkCleaner __hmc(THREAD);                                        \
 131         }                                                                         \
 132         CALL_VM(SafepointSynchronize::block(THREAD), handle_exception);           \
 133     }
 134 
 135 /*
 136  * VM_JAVA_ERROR - Macro for throwing a java exception from
 137  * the interpreter loop. Should really be a CALL_VM but there
 138  * is no entry point to do the transition to vm so we just
 139  * do it by hand here.
 140  */
 141 #define VM_JAVA_ERROR_NO_JUMP(name, msg)                                          \
 142     DECACHE_STATE();                                                              \
 143     SET_LAST_JAVA_FRAME();                                                        \
 144     {                                                                             \
 145        ThreadInVMfromJava trans(THREAD);                                          \
 146        Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg);             \
 147     }                                                                             \
 148     RESET_LAST_JAVA_FRAME();                                                      \
 149     CACHE_STATE();
 150 
 151 // Normal throw of a java error
 152 #define VM_JAVA_ERROR(name, msg)                                                  \
 153     VM_JAVA_ERROR_NO_JUMP(name, msg)                                              \
 154     goto handle_exception;
 155 
 156 #ifdef PRODUCT
 157 #define DO_UPDATE_INSTRUCTION_COUNT(opcode)
 158 #else
 159 #define DO_UPDATE_INSTRUCTION_COUNT(opcode)                                                          \
 160 {                                                                                                    \
 161     BytecodeCounter::_counter_value++;                                                               \
 162     BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++;                                         \
 163     if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \
 164     if (TraceBytecodes) {                                                                            \
 165       CALL_VM((void)SharedRuntime::trace_bytecode(THREAD, 0,               \
 166                                    topOfStack[Interpreter::expr_index_at(1)],   \
 167                                    topOfStack[Interpreter::expr_index_at(2)]),  \
 168                                    handle_exception);                      \
 169     }                                                                      \
 170 }
 171 #endif
 172 
 173 #undef DEBUGGER_SINGLE_STEP_NOTIFY
 174 #ifdef VM_JVMTI
 175 /* NOTE: (kbr) This macro must be called AFTER the PC has been
 176    incremented. JvmtiExport::at_single_stepping_point() may cause a
 177    breakpoint opcode to get inserted at the current PC to allow the
 178    debugger to coalesce single-step events.
 179 
 180    As a result if we call at_single_stepping_point() we refetch opcode
 181    to get the current opcode. This will override any other prefetching
 182    that might have occurred.
 183 */
 184 #define DEBUGGER_SINGLE_STEP_NOTIFY()                                            \
 185 {                                                                                \
 186       if (_jvmti_interp_events) {                                                \
 187         if (JvmtiExport::should_post_single_step()) {                            \
 188           DECACHE_STATE();                                                       \
 189           SET_LAST_JAVA_FRAME();                                                 \
 190           ThreadInVMfromJava trans(THREAD);                                      \
 191           JvmtiExport::at_single_stepping_point(THREAD,                          \
 192                                           istate->method(),                      \
 193                                           pc);                                   \
 194           RESET_LAST_JAVA_FRAME();                                               \
 195           CACHE_STATE();                                                         \
 196           if (THREAD->pop_frame_pending() &&                                     \
 197               !THREAD->pop_frame_in_process()) {                                 \
 198             goto handle_Pop_Frame;                                               \
 199           }                                                                      \
 200           opcode = *pc;                                                          \
 201         }                                                                        \
 202       }                                                                          \
 203 }
 204 #else
 205 #define DEBUGGER_SINGLE_STEP_NOTIFY()
 206 #endif
 207 
 208 /*
 209  * CONTINUE - Macro for executing the next opcode.
 210  */
 211 #undef CONTINUE
 212 #ifdef USELABELS
 213 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an
 214 // initialization (which is is the initialization of the table pointer...)
 215 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode]
 216 #define CONTINUE {                              \
 217         opcode = *pc;                           \
 218         DO_UPDATE_INSTRUCTION_COUNT(opcode);    \
 219         DEBUGGER_SINGLE_STEP_NOTIFY();          \
 220         DISPATCH(opcode);                       \
 221     }
 222 #else
 223 #ifdef PREFETCH_OPCCODE
 224 #define CONTINUE {                              \
 225         opcode = *pc;                           \
 226         DO_UPDATE_INSTRUCTION_COUNT(opcode);    \
 227         DEBUGGER_SINGLE_STEP_NOTIFY();          \
 228         continue;                               \
 229     }
 230 #else
 231 #define CONTINUE {                              \
 232         DO_UPDATE_INSTRUCTION_COUNT(opcode);    \
 233         DEBUGGER_SINGLE_STEP_NOTIFY();          \
 234         continue;                               \
 235     }
 236 #endif
 237 #endif
 238 
 239 
 240 #define UPDATE_PC(opsize) {pc += opsize; }
 241 /*
 242  * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack.
 243  */
 244 #undef UPDATE_PC_AND_TOS
 245 #define UPDATE_PC_AND_TOS(opsize, stack) \
 246     {pc += opsize; MORE_STACK(stack); }
 247 
 248 /*
 249  * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack,
 250  * and executing the next opcode. It's somewhat similar to the combination
 251  * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations.
 252  */
 253 #undef UPDATE_PC_AND_TOS_AND_CONTINUE
 254 #ifdef USELABELS
 255 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) {         \
 256         pc += opsize; opcode = *pc; MORE_STACK(stack);          \
 257         DO_UPDATE_INSTRUCTION_COUNT(opcode);                    \
 258         DEBUGGER_SINGLE_STEP_NOTIFY();                          \
 259         DISPATCH(opcode);                                       \
 260     }
 261 
 262 #define UPDATE_PC_AND_CONTINUE(opsize) {                        \
 263         pc += opsize; opcode = *pc;                             \
 264         DO_UPDATE_INSTRUCTION_COUNT(opcode);                    \
 265         DEBUGGER_SINGLE_STEP_NOTIFY();                          \
 266         DISPATCH(opcode);                                       \
 267     }
 268 #else
 269 #ifdef PREFETCH_OPCCODE
 270 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) {         \
 271         pc += opsize; opcode = *pc; MORE_STACK(stack);          \
 272         DO_UPDATE_INSTRUCTION_COUNT(opcode);                    \
 273         DEBUGGER_SINGLE_STEP_NOTIFY();                          \
 274         goto do_continue;                                       \
 275     }
 276 
 277 #define UPDATE_PC_AND_CONTINUE(opsize) {                        \
 278         pc += opsize; opcode = *pc;                             \
 279         DO_UPDATE_INSTRUCTION_COUNT(opcode);                    \
 280         DEBUGGER_SINGLE_STEP_NOTIFY();                          \
 281         goto do_continue;                                       \
 282     }
 283 #else
 284 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
 285         pc += opsize; MORE_STACK(stack);                \
 286         DO_UPDATE_INSTRUCTION_COUNT(opcode);            \
 287         DEBUGGER_SINGLE_STEP_NOTIFY();                  \
 288         goto do_continue;                               \
 289     }
 290 
 291 #define UPDATE_PC_AND_CONTINUE(opsize) {                \
 292         pc += opsize;                                   \
 293         DO_UPDATE_INSTRUCTION_COUNT(opcode);            \
 294         DEBUGGER_SINGLE_STEP_NOTIFY();                  \
 295         goto do_continue;                               \
 296     }
 297 #endif /* PREFETCH_OPCCODE */
 298 #endif /* USELABELS */
 299 
 300 // About to call a new method, update the save the adjusted pc and return to frame manager
 301 #define UPDATE_PC_AND_RETURN(opsize)  \
 302    DECACHE_TOS();                     \
 303    istate->set_bcp(pc+opsize);        \
 304    return;
 305 
 306 
 307 #define METHOD istate->method()
 308 #define GET_METHOD_COUNTERS(res)    \
 309   res = METHOD->method_counters();  \
 310   if (res == NULL) {                \
 311     CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \
 312   }
 313 
 314 #define OSR_REQUEST(res, branch_pc) \
 315             CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception);
 316 /*
 317  * For those opcodes that need to have a GC point on a backwards branch
 318  */
 319 
 320 // Backedge counting is kind of strange. The asm interpreter will increment
 321 // the backedge counter as a separate counter but it does it's comparisons
 322 // to the sum (scaled) of invocation counter and backedge count to make
 323 // a decision. Seems kind of odd to sum them together like that
 324 
 325 // skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp
 326 
 327 
 328 #define DO_BACKEDGE_CHECKS(skip, branch_pc)                                                         \
 329     if ((skip) <= 0) {                                                                              \
 330       MethodCounters* mcs;                                                                          \
 331       GET_METHOD_COUNTERS(mcs);                                                                     \
 332       if (UseLoopCounter) {                                                                         \
 333         bool do_OSR = UseOnStackReplacement;                                                        \
 334         mcs->backedge_counter()->increment();                                                       \
 335         if (do_OSR) do_OSR = mcs->backedge_counter()->reached_InvocationLimit();                    \
 336         if (do_OSR) {                                                                               \
 337           nmethod*  osr_nmethod;                                                                    \
 338           OSR_REQUEST(osr_nmethod, branch_pc);                                                      \
 339           if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) {          \
 340             intptr_t* buf = SharedRuntime::OSR_migration_begin(THREAD);                             \
 341             istate->set_msg(do_osr);                                                                \
 342             istate->set_osr_buf((address)buf);                                                      \
 343             istate->set_osr_entry(osr_nmethod->osr_entry());                                        \
 344             return;                                                                                 \
 345           }                                                                                         \
 346         }                                                                                           \
 347       }  /* UseCompiler ... */                                                                      \
 348       mcs->invocation_counter()->increment();                                                       \
 349       SAFEPOINT;                                                                                    \
 350     }
 351 
 352 /*
 353  * For those opcodes that need to have a GC point on a backwards branch
 354  */
 355 
 356 /*
 357  * Macros for caching and flushing the interpreter state. Some local
 358  * variables need to be flushed out to the frame before we do certain
 359  * things (like pushing frames or becomming gc safe) and some need to
 360  * be recached later (like after popping a frame). We could use one
 361  * macro to cache or decache everything, but this would be less then
 362  * optimal because we don't always need to cache or decache everything
 363  * because some things we know are already cached or decached.
 364  */
 365 #undef DECACHE_TOS
 366 #undef CACHE_TOS
 367 #undef CACHE_PREV_TOS
 368 #define DECACHE_TOS()    istate->set_stack(topOfStack);
 369 
 370 #define CACHE_TOS()      topOfStack = (intptr_t *)istate->stack();
 371 
 372 #undef DECACHE_PC
 373 #undef CACHE_PC
 374 #define DECACHE_PC()    istate->set_bcp(pc);
 375 #define CACHE_PC()      pc = istate->bcp();
 376 #define CACHE_CP()      cp = istate->constants();
 377 #define CACHE_LOCALS()  locals = istate->locals();
 378 #undef CACHE_FRAME
 379 #define CACHE_FRAME()
 380 
 381 /*
 382  * CHECK_NULL - Macro for throwing a NullPointerException if the object
 383  * passed is a null ref.
 384  * On some architectures/platforms it should be possible to do this implicitly
 385  */
 386 #undef CHECK_NULL
 387 #define CHECK_NULL(obj_)                                                 \
 388     if ((obj_) == NULL) {                                                \
 389         VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), "");  \
 390     }                                                                    \
 391     VERIFY_OOP(obj_)
 392 
 393 #define VMdoubleConstZero() 0.0
 394 #define VMdoubleConstOne() 1.0
 395 #define VMlongConstZero() (max_jlong-max_jlong)
 396 #define VMlongConstOne() ((max_jlong-max_jlong)+1)
 397 
 398 /*
 399  * Alignment
 400  */
 401 #define VMalignWordUp(val)          (((uintptr_t)(val) + 3) & ~3)
 402 
 403 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod)
 404 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS();
 405 
 406 // Reload interpreter state after calling the VM or a possible GC
 407 #define CACHE_STATE()   \
 408         CACHE_TOS();    \
 409         CACHE_PC();     \
 410         CACHE_CP();     \
 411         CACHE_LOCALS();
 412 
 413 // Call the VM don't check for pending exceptions
 414 #define CALL_VM_NOCHECK(func)                                     \
 415           DECACHE_STATE();                                        \
 416           SET_LAST_JAVA_FRAME();                                  \
 417           func;                                                   \
 418           RESET_LAST_JAVA_FRAME();                                \
 419           CACHE_STATE();                                          \
 420           if (THREAD->pop_frame_pending() &&                      \
 421               !THREAD->pop_frame_in_process()) {                  \
 422             goto handle_Pop_Frame;                                \
 423           }
 424 
 425 // Call the VM and check for pending exceptions
 426 #define CALL_VM(func, label) {                                    \
 427           CALL_VM_NOCHECK(func);                                  \
 428           if (THREAD->has_pending_exception()) goto label;        \
 429         }
 430 
 431 /*
 432  * BytecodeInterpreter::run(interpreterState istate)
 433  * BytecodeInterpreter::runWithChecks(interpreterState istate)
 434  *
 435  * The real deal. This is where byte codes actually get interpreted.
 436  * Basically it's a big while loop that iterates until we return from
 437  * the method passed in.
 438  *
 439  * The runWithChecks is used if JVMTI is enabled.
 440  *
 441  */
 442 #if defined(VM_JVMTI)
 443 void
 444 BytecodeInterpreter::runWithChecks(interpreterState istate) {
 445 #else
 446 void
 447 BytecodeInterpreter::run(interpreterState istate) {
 448 #endif
 449 
 450   // In order to simplify some tests based on switches set at runtime
 451   // we invoke the interpreter a single time after switches are enabled
 452   // and set simpler to to test variables rather than method calls or complex
 453   // boolean expressions.
 454 
 455   static int initialized = 0;
 456   static int checkit = 0;
 457   static intptr_t* c_addr = NULL;
 458   static intptr_t  c_value;
 459 
 460   if (checkit && *c_addr != c_value) {
 461     os::breakpoint();
 462   }
 463 #ifdef VM_JVMTI
 464   static bool _jvmti_interp_events = 0;
 465 #endif
 466 
 467   static int _compiling;  // (UseCompiler || CountCompiledCalls)
 468 
 469 #ifdef ASSERT
 470   if (istate->_msg != initialize) {
 471     // We have a problem here if we are running with a pre-hsx24 JDK (for example during bootstrap)
 472     // because in that case, EnableInvokeDynamic is true by default but will be later switched off
 473     // if java_lang_invoke_MethodHandle::compute_offsets() detects that the JDK only has the classes
 474     // for the old JSR292 implementation.
 475     // This leads to a situation where 'istate->_stack_limit' always accounts for
 476     // methodOopDesc::extra_stack_entries() because it is computed in
 477     // CppInterpreterGenerator::generate_compute_interpreter_state() which was generated while
 478     // EnableInvokeDynamic was still true. On the other hand, istate->_method->max_stack() doesn't
 479     // account for extra_stack_entries() anymore because at the time when it is called
 480     // EnableInvokeDynamic was already set to false.
 481     // So we have a second version of the assertion which handles the case where EnableInvokeDynamic was
 482     // switched off because of the wrong classes.
 483     if (EnableInvokeDynamic || FLAG_IS_CMDLINE(EnableInvokeDynamic)) {
 484       assert(abs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
 485     } else {
 486       const int extra_stack_entries = Method::extra_stack_entries_for_indy;
 487       assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + extra_stack_entries
 488                                                                                                + 1), "bad stack limit");
 489     }
 490 #ifndef SHARK
 491     IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong"));
 492 #endif // !SHARK
 493   }
 494   // Verify linkages.
 495   interpreterState l = istate;
 496   do {
 497     assert(l == l->_self_link, "bad link");
 498     l = l->_prev_link;
 499   } while (l != NULL);
 500   // Screwups with stack management usually cause us to overwrite istate
 501   // save a copy so we can verify it.
 502   interpreterState orig = istate;
 503 #endif
 504 
 505   static volatile jbyte* _byte_map_base; // adjusted card table base for oop store barrier
 506 
 507   register intptr_t*        topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */
 508   register address          pc = istate->bcp();
 509   register jubyte opcode;
 510   register intptr_t*        locals = istate->locals();
 511   register ConstantPoolCache*    cp = istate->constants(); // method()->constants()->cache()
 512 #ifdef LOTS_OF_REGS
 513   register JavaThread*      THREAD = istate->thread();
 514   register volatile jbyte*  BYTE_MAP_BASE = _byte_map_base;
 515 #else
 516 #undef THREAD
 517 #define THREAD istate->thread()
 518 #undef BYTE_MAP_BASE
 519 #define BYTE_MAP_BASE _byte_map_base
 520 #endif
 521 
 522 #ifdef USELABELS
 523   const static void* const opclabels_data[256] = {
 524 /* 0x00 */ &&opc_nop,     &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0,
 525 /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2,   &&opc_iconst_3, &&opc_iconst_4,
 526 /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0,   &&opc_lconst_1, &&opc_fconst_0,
 527 /* 0x0C */ &&opc_fconst_1,&&opc_fconst_2,   &&opc_dconst_0, &&opc_dconst_1,
 528 
 529 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc,    &&opc_ldc_w,
 530 /* 0x14 */ &&opc_ldc2_w, &&opc_iload,  &&opc_lload,  &&opc_fload,
 531 /* 0x18 */ &&opc_dload,  &&opc_aload,  &&opc_iload_0,&&opc_iload_1,
 532 /* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1,
 533 
 534 /* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1,
 535 /* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1,
 536 /* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1,
 537 /* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload,
 538 
 539 /* 0x30 */ &&opc_faload,  &&opc_daload,  &&opc_aaload,  &&opc_baload,
 540 /* 0x34 */ &&opc_caload,  &&opc_saload,  &&opc_istore,  &&opc_lstore,
 541 /* 0x38 */ &&opc_fstore,  &&opc_dstore,  &&opc_astore,  &&opc_istore_0,
 542 /* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0,
 543 
 544 /* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0,
 545 /* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0,
 546 /* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0,
 547 /* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore,
 548 
 549 /* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore,
 550 /* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop,
 551 /* 0x58 */ &&opc_pop2,   &&opc_dup,    &&opc_dup_x1, &&opc_dup_x2,
 552 /* 0x5C */ &&opc_dup2,   &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap,
 553 
 554 /* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd,
 555 /* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub,
 556 /* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul,
 557 /* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv,
 558 
 559 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem,
 560 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg,
 561 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr,
 562 /* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land,
 563 
 564 /* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor,
 565 /* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d,
 566 /* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i,
 567 /* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l,
 568 
 569 /* 0x90 */ &&opc_d2f,  &&opc_i2b,  &&opc_i2c,  &&opc_i2s,
 570 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl,
 571 /* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt,
 572 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq,
 573 
 574 /* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge,  &&opc_if_icmpgt,
 575 /* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne,  &&opc_goto,
 576 /* 0xA8 */ &&opc_jsr,      &&opc_ret,      &&opc_tableswitch,&&opc_lookupswitch,
 577 /* 0xAC */ &&opc_ireturn,  &&opc_lreturn,  &&opc_freturn,    &&opc_dreturn,
 578 
 579 /* 0xB0 */ &&opc_areturn,     &&opc_return,         &&opc_getstatic,    &&opc_putstatic,
 580 /* 0xB4 */ &&opc_getfield,    &&opc_putfield,       &&opc_invokevirtual,&&opc_invokespecial,
 581 /* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new,
 582 /* 0xBC */ &&opc_newarray,    &&opc_anewarray,      &&opc_arraylength,  &&opc_athrow,
 583 
 584 /* 0xC0 */ &&opc_checkcast,   &&opc_instanceof,     &&opc_monitorenter, &&opc_monitorexit,
 585 /* 0xC4 */ &&opc_wide,        &&opc_multianewarray, &&opc_ifnull,       &&opc_ifnonnull,
 586 /* 0xC8 */ &&opc_goto_w,      &&opc_jsr_w,          &&opc_breakpoint,   &&opc_default,
 587 /* 0xCC */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
 588 
 589 /* 0xD0 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
 590 /* 0xD4 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
 591 /* 0xD8 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
 592 /* 0xDC */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
 593 
 594 /* 0xE0 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
 595 /* 0xE4 */ &&opc_default,     &&opc_fast_aldc,      &&opc_fast_aldc_w,  &&opc_return_register_finalizer,
 596 /* 0xE8 */ &&opc_invokehandle,&&opc_default,        &&opc_default,      &&opc_default,
 597 /* 0xEC */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
 598 
 599 /* 0xF0 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
 600 /* 0xF4 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
 601 /* 0xF8 */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default,
 602 /* 0xFC */ &&opc_default,     &&opc_default,        &&opc_default,      &&opc_default
 603   };
 604   register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0];
 605 #endif /* USELABELS */
 606 
 607 #ifdef ASSERT
 608   // this will trigger a VERIFY_OOP on entry
 609   if (istate->msg() != initialize && ! METHOD->is_static()) {
 610     oop rcvr = LOCALS_OBJECT(0);
 611     VERIFY_OOP(rcvr);
 612   }
 613 #endif
 614 // #define HACK
 615 #ifdef HACK
 616   bool interesting = false;
 617 #endif // HACK
 618 
 619   /* QQQ this should be a stack method so we don't know actual direction */
 620   guarantee(istate->msg() == initialize ||
 621          topOfStack >= istate->stack_limit() &&
 622          topOfStack < istate->stack_base(),
 623          "Stack top out of range");
 624 
 625   switch (istate->msg()) {
 626     case initialize: {
 627       if (initialized++) ShouldNotReachHere(); // Only one initialize call
 628       _compiling = (UseCompiler || CountCompiledCalls);
 629 #ifdef VM_JVMTI
 630       _jvmti_interp_events = JvmtiExport::can_post_interpreter_events();
 631 #endif
 632       BarrierSet* bs = Universe::heap()->barrier_set();
 633       assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
 634       _byte_map_base = (volatile jbyte*)(((CardTableModRefBS*)bs)->byte_map_base);
 635       return;
 636     }
 637     break;
 638     case method_entry: {
 639       THREAD->set_do_not_unlock();
 640       // count invocations
 641       assert(initialized, "Interpreter not initialized");
 642       if (_compiling) {
 643         MethodCounters* mcs;
 644         GET_METHOD_COUNTERS(mcs);
 645         if (ProfileInterpreter) {
 646           METHOD->increment_interpreter_invocation_count(THREAD);
 647         }
 648         mcs->invocation_counter()->increment();
 649         if (mcs->invocation_counter()->reached_InvocationLimit()) {
 650             CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception);
 651 
 652             // We no longer retry on a counter overflow
 653 
 654             // istate->set_msg(retry_method);
 655             // THREAD->clr_do_not_unlock();
 656             // return;
 657         }
 658         SAFEPOINT;
 659       }
 660 
 661       if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
 662         // initialize
 663         os::breakpoint();
 664       }
 665 
 666 #ifdef HACK
 667       {
 668         ResourceMark rm;
 669         char *method_name = istate->method()->name_and_sig_as_C_string();
 670         if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) {
 671           tty->print_cr("entering: depth %d bci: %d",
 672                          (istate->_stack_base - istate->_stack),
 673                          istate->_bcp - istate->_method->code_base());
 674           interesting = true;
 675         }
 676       }
 677 #endif // HACK
 678 
 679 
 680       // lock method if synchronized
 681       if (METHOD->is_synchronized()) {
 682           // oop rcvr = locals[0].j.r;
 683           oop rcvr;
 684           if (METHOD->is_static()) {
 685             rcvr = METHOD->constants()->pool_holder()->java_mirror();
 686           } else {
 687             rcvr = LOCALS_OBJECT(0);
 688             VERIFY_OOP(rcvr);
 689           }
 690           // The initial monitor is ours for the taking
 691           BasicObjectLock* mon = &istate->monitor_base()[-1];
 692           oop monobj = mon->obj();
 693           assert(mon->obj() == rcvr, "method monitor mis-initialized");
 694 
 695           bool success = UseBiasedLocking;
 696           if (UseBiasedLocking) {
 697             markOop mark = rcvr->mark();
 698             if (mark->has_bias_pattern()) {
 699               // The bias pattern is present in the object's header. Need to check
 700               // whether the bias owner and the epoch are both still current.
 701               intptr_t xx = ((intptr_t) THREAD) ^ (intptr_t) mark;
 702               xx = (intptr_t) rcvr->klass()->prototype_header() ^ xx;
 703               intptr_t yy = (xx & ~((int) markOopDesc::age_mask_in_place));
 704               if (yy != 0 ) {
 705                 // At this point we know that the header has the bias pattern and
 706                 // that we are not the bias owner in the current epoch. We need to
 707                 // figure out more details about the state of the header in order to
 708                 // know what operations can be legally performed on the object's
 709                 // header.
 710 
 711                 // If the low three bits in the xor result aren't clear, that means
 712                 // the prototype header is no longer biased and we have to revoke
 713                 // the bias on this object.
 714 
 715                 if (yy & markOopDesc::biased_lock_mask_in_place == 0 ) {
 716                   // Biasing is still enabled for this data type. See whether the
 717                   // epoch of the current bias is still valid, meaning that the epoch
 718                   // bits of the mark word are equal to the epoch bits of the
 719                   // prototype header. (Note that the prototype header's epoch bits
 720                   // only change at a safepoint.) If not, attempt to rebias the object
 721                   // toward the current thread. Note that we must be absolutely sure
 722                   // that the current epoch is invalid in order to do this because
 723                   // otherwise the manipulations it performs on the mark word are
 724                   // illegal.
 725                   if (yy & markOopDesc::epoch_mask_in_place == 0) {
 726                     // The epoch of the current bias is still valid but we know nothing
 727                     // about the owner; it might be set or it might be clear. Try to
 728                     // acquire the bias of the object using an atomic operation. If this
 729                     // fails we will go in to the runtime to revoke the object's bias.
 730                     // Note that we first construct the presumed unbiased header so we
 731                     // don't accidentally blow away another thread's valid bias.
 732                     intptr_t unbiased = (intptr_t) mark & (markOopDesc::biased_lock_mask_in_place |
 733                                                            markOopDesc::age_mask_in_place |
 734                                                            markOopDesc::epoch_mask_in_place);
 735                     if (Atomic::cmpxchg_ptr((intptr_t)THREAD | unbiased, (intptr_t*) rcvr->mark_addr(), unbiased) != unbiased) {
 736                       CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
 737                     }
 738                   } else {
 739                     try_rebias:
 740                     // At this point we know the epoch has expired, meaning that the
 741                     // current "bias owner", if any, is actually invalid. Under these
 742                     // circumstances _only_, we are allowed to use the current header's
 743                     // value as the comparison value when doing the cas to acquire the
 744                     // bias in the current epoch. In other words, we allow transfer of
 745                     // the bias from one thread to another directly in this situation.
 746                     xx = (intptr_t) rcvr->klass()->prototype_header() | (intptr_t) THREAD;
 747                     if (Atomic::cmpxchg_ptr((intptr_t)THREAD | (intptr_t) rcvr->klass()->prototype_header(),
 748                                             (intptr_t*) rcvr->mark_addr(),
 749                                             (intptr_t) mark) != (intptr_t) mark) {
 750                       CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
 751                     }
 752                   }
 753                 } else {
 754                   try_revoke_bias:
 755                   // The prototype mark in the klass doesn't have the bias bit set any
 756                   // more, indicating that objects of this data type are not supposed
 757                   // to be biased any more. We are going to try to reset the mark of
 758                   // this object to the prototype value and fall through to the
 759                   // CAS-based locking scheme. Note that if our CAS fails, it means
 760                   // that another thread raced us for the privilege of revoking the
 761                   // bias of this particular object, so it's okay to continue in the
 762                   // normal locking code.
 763                   //
 764                   xx = (intptr_t) rcvr->klass()->prototype_header() | (intptr_t) THREAD;
 765                   if (Atomic::cmpxchg_ptr(rcvr->klass()->prototype_header(),
 766                                           (intptr_t*) rcvr->mark_addr(),
 767                                           mark) == mark) {
 768                     // (*counters->revoked_lock_entry_count_addr())++;
 769                   success = false;
 770                   }
 771                 }
 772               }
 773             } else {
 774               cas_label:
 775               success = false;
 776             }
 777           }
 778           if (!success) {
 779             markOop displaced = rcvr->mark()->set_unlocked();
 780             mon->lock()->set_displaced_header(displaced);
 781             if (Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
 782               // Is it simple recursive case?
 783               if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
 784                 mon->lock()->set_displaced_header(NULL);
 785               } else {
 786                 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
 787               }
 788             }
 789           }
 790       }
 791       THREAD->clr_do_not_unlock();
 792 
 793       // Notify jvmti
 794 #ifdef VM_JVMTI
 795       if (_jvmti_interp_events) {
 796         // Whenever JVMTI puts a thread in interp_only_mode, method
 797         // entry/exit events are sent for that thread to track stack depth.
 798         if (THREAD->is_interp_only_mode()) {
 799           CALL_VM(InterpreterRuntime::post_method_entry(THREAD),
 800                   handle_exception);
 801         }
 802       }
 803 #endif /* VM_JVMTI */
 804 
 805       goto run;
 806     }
 807 
 808     case popping_frame: {
 809       // returned from a java call to pop the frame, restart the call
 810       // clear the message so we don't confuse ourselves later
 811       ShouldNotReachHere();  // we don't return this.
 812       assert(THREAD->pop_frame_in_process(), "wrong frame pop state");
 813       istate->set_msg(no_request);
 814       THREAD->clr_pop_frame_in_process();
 815       goto run;
 816     }
 817 
 818     case method_resume: {
 819       if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
 820         // resume
 821         os::breakpoint();
 822       }
 823 #ifdef HACK
 824       {
 825         ResourceMark rm;
 826         char *method_name = istate->method()->name_and_sig_as_C_string();
 827         if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) {
 828           tty->print_cr("resume: depth %d bci: %d",
 829                          (istate->_stack_base - istate->_stack) ,
 830                          istate->_bcp - istate->_method->code_base());
 831           interesting = true;
 832         }
 833       }
 834 #endif // HACK
 835       // returned from a java call, continue executing.
 836       if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) {
 837         goto handle_Pop_Frame;
 838       }
 839 
 840       if (THREAD->has_pending_exception()) goto handle_exception;
 841       // Update the pc by the saved amount of the invoke bytecode size
 842       UPDATE_PC(istate->bcp_advance());
 843       goto run;
 844     }
 845 
 846     case deopt_resume2: {
 847       // Returned from an opcode that will reexecute. Deopt was
 848       // a result of a PopFrame request.
 849       //
 850       goto run;
 851     }
 852 
 853     case deopt_resume: {
 854       // Returned from an opcode that has completed. The stack has
 855       // the result all we need to do is skip across the bytecode
 856       // and continue (assuming there is no exception pending)
 857       //
 858       // compute continuation length
 859       //
 860       // Note: it is possible to deopt at a return_register_finalizer opcode
 861       // because this requires entering the vm to do the registering. While the
 862       // opcode is complete we can't advance because there are no more opcodes
 863       // much like trying to deopt at a poll return. In that has we simply
 864       // get out of here
 865       //
 866       if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) {
 867         // this will do the right thing even if an exception is pending.
 868         goto handle_return;
 869       }
 870       UPDATE_PC(Bytecodes::length_at(METHOD, pc));
 871       if (THREAD->has_pending_exception()) goto handle_exception;
 872       goto run;
 873     }
 874     case got_monitors: {
 875       // continue locking now that we have a monitor to use
 876       // we expect to find newly allocated monitor at the "top" of the monitor stack.
 877       oop lockee = STACK_OBJECT(-1);
 878       VERIFY_OOP(lockee);
 879       // derefing's lockee ought to provoke implicit null check
 880       // find a free monitor
 881       BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
 882       assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
 883       entry->set_obj(lockee);
 884 
 885       markOop displaced = lockee->mark()->set_unlocked();
 886       entry->lock()->set_displaced_header(displaced);
 887       if (Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
 888         // Is it simple recursive case?
 889         if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
 890           entry->lock()->set_displaced_header(NULL);
 891         } else {
 892           CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
 893         }
 894       }
 895       UPDATE_PC_AND_TOS(1, -1);
 896       goto run;
 897     }
 898     default: {
 899       fatal("Unexpected message from frame manager");
 900     }
 901   }
 902 
 903 run:
 904 
 905   DO_UPDATE_INSTRUCTION_COUNT(*pc)
 906   DEBUGGER_SINGLE_STEP_NOTIFY();
 907 #ifdef PREFETCH_OPCCODE
 908   opcode = *pc;  /* prefetch first opcode */
 909 #endif
 910 
 911 #ifndef USELABELS
 912   while (1)
 913 #endif
 914   {
 915 #ifndef PREFETCH_OPCCODE
 916       opcode = *pc;
 917 #endif
 918       // Seems like this happens twice per opcode. At worst this is only
 919       // need at entry to the loop.
 920       // DEBUGGER_SINGLE_STEP_NOTIFY();
 921       /* Using this labels avoids double breakpoints when quickening and
 922        * when returing from transition frames.
 923        */
 924   opcode_switch:
 925       assert(istate == orig, "Corrupted istate");
 926       /* QQQ Hmm this has knowledge of direction, ought to be a stack method */
 927       assert(topOfStack >= istate->stack_limit(), "Stack overrun");
 928       assert(topOfStack < istate->stack_base(), "Stack underrun");
 929 
 930 #ifdef USELABELS
 931       DISPATCH(opcode);
 932 #else
 933       switch (opcode)
 934 #endif
 935       {
 936       CASE(_nop):
 937           UPDATE_PC_AND_CONTINUE(1);
 938 
 939           /* Push miscellaneous constants onto the stack. */
 940 
 941       CASE(_aconst_null):
 942           SET_STACK_OBJECT(NULL, 0);
 943           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
 944 
 945 #undef  OPC_CONST_n
 946 #define OPC_CONST_n(opcode, const_type, value)                          \
 947       CASE(opcode):                                                     \
 948           SET_STACK_ ## const_type(value, 0);                           \
 949           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
 950 
 951           OPC_CONST_n(_iconst_m1,   INT,       -1);
 952           OPC_CONST_n(_iconst_0,    INT,        0);
 953           OPC_CONST_n(_iconst_1,    INT,        1);
 954           OPC_CONST_n(_iconst_2,    INT,        2);
 955           OPC_CONST_n(_iconst_3,    INT,        3);
 956           OPC_CONST_n(_iconst_4,    INT,        4);
 957           OPC_CONST_n(_iconst_5,    INT,        5);
 958           OPC_CONST_n(_fconst_0,    FLOAT,      0.0);
 959           OPC_CONST_n(_fconst_1,    FLOAT,      1.0);
 960           OPC_CONST_n(_fconst_2,    FLOAT,      2.0);
 961 
 962 #undef  OPC_CONST2_n
 963 #define OPC_CONST2_n(opcname, value, key, kind)                         \
 964       CASE(_##opcname):                                                 \
 965       {                                                                 \
 966           SET_STACK_ ## kind(VM##key##Const##value(), 1);               \
 967           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);                         \
 968       }
 969          OPC_CONST2_n(dconst_0, Zero, double, DOUBLE);
 970          OPC_CONST2_n(dconst_1, One,  double, DOUBLE);
 971          OPC_CONST2_n(lconst_0, Zero, long, LONG);
 972          OPC_CONST2_n(lconst_1, One,  long, LONG);
 973 
 974          /* Load constant from constant pool: */
 975 
 976           /* Push a 1-byte signed integer value onto the stack. */
 977       CASE(_bipush):
 978           SET_STACK_INT((jbyte)(pc[1]), 0);
 979           UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
 980 
 981           /* Push a 2-byte signed integer constant onto the stack. */
 982       CASE(_sipush):
 983           SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0);
 984           UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
 985 
 986           /* load from local variable */
 987 
 988       CASE(_aload):
 989           VERIFY_OOP(LOCALS_OBJECT(pc[1]));
 990           SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0);
 991           UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
 992 
 993       CASE(_iload):
 994       CASE(_fload):
 995           SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
 996           UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
 997 
 998       CASE(_lload):
 999           SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1);
1000           UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
1001 
1002       CASE(_dload):
1003           SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1);
1004           UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
1005 
1006 #undef  OPC_LOAD_n
1007 #define OPC_LOAD_n(num)                                                 \
1008       CASE(_aload_##num):                                               \
1009           VERIFY_OOP(LOCALS_OBJECT(num));                               \
1010           SET_STACK_OBJECT(LOCALS_OBJECT(num), 0);                      \
1011           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);                         \
1012                                                                         \
1013       CASE(_iload_##num):                                               \
1014       CASE(_fload_##num):                                               \
1015           SET_STACK_SLOT(LOCALS_SLOT(num), 0);                          \
1016           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);                         \
1017                                                                         \
1018       CASE(_lload_##num):                                               \
1019           SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1);             \
1020           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);                         \
1021       CASE(_dload_##num):                                               \
1022           SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1);         \
1023           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1024 
1025           OPC_LOAD_n(0);
1026           OPC_LOAD_n(1);
1027           OPC_LOAD_n(2);
1028           OPC_LOAD_n(3);
1029 
1030           /* store to a local variable */
1031 
1032       CASE(_astore):
1033           astore(topOfStack, -1, locals, pc[1]);
1034           UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
1035 
1036       CASE(_istore):
1037       CASE(_fstore):
1038           SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]);
1039           UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
1040 
1041       CASE(_lstore):
1042           SET_LOCALS_LONG(STACK_LONG(-1), pc[1]);
1043           UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
1044 
1045       CASE(_dstore):
1046           SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]);
1047           UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
1048 
1049       CASE(_wide): {
1050           uint16_t reg = Bytes::get_Java_u2(pc + 2);
1051 
1052           opcode = pc[1];
1053           switch(opcode) {
1054               case Bytecodes::_aload:
1055                   VERIFY_OOP(LOCALS_OBJECT(reg));
1056                   SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0);
1057                   UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
1058 
1059               case Bytecodes::_iload:
1060               case Bytecodes::_fload:
1061                   SET_STACK_SLOT(LOCALS_SLOT(reg), 0);
1062                   UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
1063 
1064               case Bytecodes::_lload:
1065                   SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
1066                   UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
1067 
1068               case Bytecodes::_dload:
1069                   SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
1070                   UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
1071 
1072               case Bytecodes::_astore:
1073                   astore(topOfStack, -1, locals, reg);
1074                   UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
1075 
1076               case Bytecodes::_istore:
1077               case Bytecodes::_fstore:
1078                   SET_LOCALS_SLOT(STACK_SLOT(-1), reg);
1079                   UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
1080 
1081               case Bytecodes::_lstore:
1082                   SET_LOCALS_LONG(STACK_LONG(-1), reg);
1083                   UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
1084 
1085               case Bytecodes::_dstore:
1086                   SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg);
1087                   UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
1088 
1089               case Bytecodes::_iinc: {
1090                   int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4);
1091                   // Be nice to see what this generates.... QQQ
1092                   SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg);
1093                   UPDATE_PC_AND_CONTINUE(6);
1094               }
1095               case Bytecodes::_ret:
1096                   pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg));
1097                   UPDATE_PC_AND_CONTINUE(0);
1098               default:
1099                   VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode");
1100           }
1101       }
1102 
1103 
1104 #undef  OPC_STORE_n
1105 #define OPC_STORE_n(num)                                                \
1106       CASE(_astore_##num):                                              \
1107           astore(topOfStack, -1, locals, num);                          \
1108           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);                        \
1109       CASE(_istore_##num):                                              \
1110       CASE(_fstore_##num):                                              \
1111           SET_LOCALS_SLOT(STACK_SLOT(-1), num);                         \
1112           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1113 
1114           OPC_STORE_n(0);
1115           OPC_STORE_n(1);
1116           OPC_STORE_n(2);
1117           OPC_STORE_n(3);
1118 
1119 #undef  OPC_DSTORE_n
1120 #define OPC_DSTORE_n(num)                                               \
1121       CASE(_dstore_##num):                                              \
1122           SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num);                     \
1123           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);                        \
1124       CASE(_lstore_##num):                                              \
1125           SET_LOCALS_LONG(STACK_LONG(-1), num);                         \
1126           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
1127 
1128           OPC_DSTORE_n(0);
1129           OPC_DSTORE_n(1);
1130           OPC_DSTORE_n(2);
1131           OPC_DSTORE_n(3);
1132 
1133           /* stack pop, dup, and insert opcodes */
1134 
1135 
1136       CASE(_pop):                /* Discard the top item on the stack */
1137           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1138 
1139 
1140       CASE(_pop2):               /* Discard the top 2 items on the stack */
1141           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
1142 
1143 
1144       CASE(_dup):               /* Duplicate the top item on the stack */
1145           dup(topOfStack);
1146           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1147 
1148       CASE(_dup2):              /* Duplicate the top 2 items on the stack */
1149           dup2(topOfStack);
1150           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1151 
1152       CASE(_dup_x1):    /* insert top word two down */
1153           dup_x1(topOfStack);
1154           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1155 
1156       CASE(_dup_x2):    /* insert top word three down  */
1157           dup_x2(topOfStack);
1158           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1159 
1160       CASE(_dup2_x1):   /* insert top 2 slots three down */
1161           dup2_x1(topOfStack);
1162           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1163 
1164       CASE(_dup2_x2):   /* insert top 2 slots four down */
1165           dup2_x2(topOfStack);
1166           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1167 
1168       CASE(_swap): {        /* swap top two elements on the stack */
1169           swap(topOfStack);
1170           UPDATE_PC_AND_CONTINUE(1);
1171       }
1172 
1173           /* Perform various binary integer operations */
1174 
1175 #undef  OPC_INT_BINARY
1176 #define OPC_INT_BINARY(opcname, opname, test)                           \
1177       CASE(_i##opcname):                                                \
1178           if (test && (STACK_INT(-1) == 0)) {                           \
1179               VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
1180                             "/ by zero");                               \
1181           }                                                             \
1182           SET_STACK_INT(VMint##opname(STACK_INT(-2),                    \
1183                                       STACK_INT(-1)),                   \
1184                                       -2);                              \
1185           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);                        \
1186       CASE(_l##opcname):                                                \
1187       {                                                                 \
1188           if (test) {                                                   \
1189             jlong l1 = STACK_LONG(-1);                                  \
1190             if (VMlongEqz(l1)) {                                        \
1191               VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
1192                             "/ by long zero");                          \
1193             }                                                           \
1194           }                                                             \
1195           /* First long at (-1,-2) next long at (-3,-4) */              \
1196           SET_STACK_LONG(VMlong##opname(STACK_LONG(-3),                 \
1197                                         STACK_LONG(-1)),                \
1198                                         -3);                            \
1199           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);                        \
1200       }
1201 
1202       OPC_INT_BINARY(add, Add, 0);
1203       OPC_INT_BINARY(sub, Sub, 0);
1204       OPC_INT_BINARY(mul, Mul, 0);
1205       OPC_INT_BINARY(and, And, 0);
1206       OPC_INT_BINARY(or,  Or,  0);
1207       OPC_INT_BINARY(xor, Xor, 0);
1208       OPC_INT_BINARY(div, Div, 1);
1209       OPC_INT_BINARY(rem, Rem, 1);
1210 
1211 
1212       /* Perform various binary floating number operations */
1213       /* On some machine/platforms/compilers div zero check can be implicit */
1214 
1215 #undef  OPC_FLOAT_BINARY
1216 #define OPC_FLOAT_BINARY(opcname, opname)                                  \
1217       CASE(_d##opcname): {                                                 \
1218           SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3),              \
1219                                             STACK_DOUBLE(-1)),             \
1220                                             -3);                           \
1221           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);                           \
1222       }                                                                    \
1223       CASE(_f##opcname):                                                   \
1224           SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2),                 \
1225                                           STACK_FLOAT(-1)),                \
1226                                           -2);                             \
1227           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1228 
1229 
1230      OPC_FLOAT_BINARY(add, Add);
1231      OPC_FLOAT_BINARY(sub, Sub);
1232      OPC_FLOAT_BINARY(mul, Mul);
1233      OPC_FLOAT_BINARY(div, Div);
1234      OPC_FLOAT_BINARY(rem, Rem);
1235 
1236       /* Shift operations
1237        * Shift left int and long: ishl, lshl
1238        * Logical shift right int and long w/zero extension: iushr, lushr
1239        * Arithmetic shift right int and long w/sign extension: ishr, lshr
1240        */
1241 
1242 #undef  OPC_SHIFT_BINARY
1243 #define OPC_SHIFT_BINARY(opcname, opname)                               \
1244       CASE(_i##opcname):                                                \
1245          SET_STACK_INT(VMint##opname(STACK_INT(-2),                     \
1246                                      STACK_INT(-1)),                    \
1247                                      -2);                               \
1248          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);                         \
1249       CASE(_l##opcname):                                                \
1250       {                                                                 \
1251          SET_STACK_LONG(VMlong##opname(STACK_LONG(-2),                  \
1252                                        STACK_INT(-1)),                  \
1253                                        -2);                             \
1254          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);                         \
1255       }
1256 
1257       OPC_SHIFT_BINARY(shl, Shl);
1258       OPC_SHIFT_BINARY(shr, Shr);
1259       OPC_SHIFT_BINARY(ushr, Ushr);
1260 
1261      /* Increment local variable by constant */
1262       CASE(_iinc):
1263       {
1264           // locals[pc[1]].j.i += (jbyte)(pc[2]);
1265           SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]);
1266           UPDATE_PC_AND_CONTINUE(3);
1267       }
1268 
1269      /* negate the value on the top of the stack */
1270 
1271       CASE(_ineg):
1272          SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1);
1273          UPDATE_PC_AND_CONTINUE(1);
1274 
1275       CASE(_fneg):
1276          SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1);
1277          UPDATE_PC_AND_CONTINUE(1);
1278 
1279       CASE(_lneg):
1280       {
1281          SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1);
1282          UPDATE_PC_AND_CONTINUE(1);
1283       }
1284 
1285       CASE(_dneg):
1286       {
1287          SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1);
1288          UPDATE_PC_AND_CONTINUE(1);
1289       }
1290 
1291       /* Conversion operations */
1292 
1293       CASE(_i2f):       /* convert top of stack int to float */
1294          SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1);
1295          UPDATE_PC_AND_CONTINUE(1);
1296 
1297       CASE(_i2l):       /* convert top of stack int to long */
1298       {
1299           // this is ugly QQQ
1300           jlong r = VMint2Long(STACK_INT(-1));
1301           MORE_STACK(-1); // Pop
1302           SET_STACK_LONG(r, 1);
1303 
1304           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1305       }
1306 
1307       CASE(_i2d):       /* convert top of stack int to double */
1308       {
1309           // this is ugly QQQ (why cast to jlong?? )
1310           jdouble r = (jlong)STACK_INT(-1);
1311           MORE_STACK(-1); // Pop
1312           SET_STACK_DOUBLE(r, 1);
1313 
1314           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1315       }
1316 
1317       CASE(_l2i):       /* convert top of stack long to int */
1318       {
1319           jint r = VMlong2Int(STACK_LONG(-1));
1320           MORE_STACK(-2); // Pop
1321           SET_STACK_INT(r, 0);
1322           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1323       }
1324 
1325       CASE(_l2f):   /* convert top of stack long to float */
1326       {
1327           jlong r = STACK_LONG(-1);
1328           MORE_STACK(-2); // Pop
1329           SET_STACK_FLOAT(VMlong2Float(r), 0);
1330           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1331       }
1332 
1333       CASE(_l2d):       /* convert top of stack long to double */
1334       {
1335           jlong r = STACK_LONG(-1);
1336           MORE_STACK(-2); // Pop
1337           SET_STACK_DOUBLE(VMlong2Double(r), 1);
1338           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1339       }
1340 
1341       CASE(_f2i):  /* Convert top of stack float to int */
1342           SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1);
1343           UPDATE_PC_AND_CONTINUE(1);
1344 
1345       CASE(_f2l):  /* convert top of stack float to long */
1346       {
1347           jlong r = SharedRuntime::f2l(STACK_FLOAT(-1));
1348           MORE_STACK(-1); // POP
1349           SET_STACK_LONG(r, 1);
1350           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1351       }
1352 
1353       CASE(_f2d):  /* convert top of stack float to double */
1354       {
1355           jfloat f;
1356           jdouble r;
1357           f = STACK_FLOAT(-1);
1358           r = (jdouble) f;
1359           MORE_STACK(-1); // POP
1360           SET_STACK_DOUBLE(r, 1);
1361           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1362       }
1363 
1364       CASE(_d2i): /* convert top of stack double to int */
1365       {
1366           jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1));
1367           MORE_STACK(-2);
1368           SET_STACK_INT(r1, 0);
1369           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1370       }
1371 
1372       CASE(_d2f): /* convert top of stack double to float */
1373       {
1374           jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1));
1375           MORE_STACK(-2);
1376           SET_STACK_FLOAT(r1, 0);
1377           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1378       }
1379 
1380       CASE(_d2l): /* convert top of stack double to long */
1381       {
1382           jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1));
1383           MORE_STACK(-2);
1384           SET_STACK_LONG(r1, 1);
1385           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1386       }
1387 
1388       CASE(_i2b):
1389           SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1);
1390           UPDATE_PC_AND_CONTINUE(1);
1391 
1392       CASE(_i2c):
1393           SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1);
1394           UPDATE_PC_AND_CONTINUE(1);
1395 
1396       CASE(_i2s):
1397           SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1);
1398           UPDATE_PC_AND_CONTINUE(1);
1399 
1400       /* comparison operators */
1401 
1402 
1403 #define COMPARISON_OP(name, comparison)                                      \
1404       CASE(_if_icmp##name): {                                                \
1405           int skip = (STACK_INT(-2) comparison STACK_INT(-1))                \
1406                       ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3;             \
1407           address branch_pc = pc;                                            \
1408           UPDATE_PC_AND_TOS(skip, -2);                                       \
1409           DO_BACKEDGE_CHECKS(skip, branch_pc);                               \
1410           CONTINUE;                                                          \
1411       }                                                                      \
1412       CASE(_if##name): {                                                     \
1413           int skip = (STACK_INT(-1) comparison 0)                            \
1414                       ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3;             \
1415           address branch_pc = pc;                                            \
1416           UPDATE_PC_AND_TOS(skip, -1);                                       \
1417           DO_BACKEDGE_CHECKS(skip, branch_pc);                               \
1418           CONTINUE;                                                          \
1419       }
1420 
1421 #define COMPARISON_OP2(name, comparison)                                     \
1422       COMPARISON_OP(name, comparison)                                        \
1423       CASE(_if_acmp##name): {                                                \
1424           int skip = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1))          \
1425                        ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3;            \
1426           address branch_pc = pc;                                            \
1427           UPDATE_PC_AND_TOS(skip, -2);                                       \
1428           DO_BACKEDGE_CHECKS(skip, branch_pc);                               \
1429           CONTINUE;                                                          \
1430       }
1431 
1432 #define NULL_COMPARISON_NOT_OP(name)                                         \
1433       CASE(_if##name): {                                                     \
1434           int skip = (!(STACK_OBJECT(-1) == NULL))                           \
1435                       ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3;             \
1436           address branch_pc = pc;                                            \
1437           UPDATE_PC_AND_TOS(skip, -1);                                       \
1438           DO_BACKEDGE_CHECKS(skip, branch_pc);                               \
1439           CONTINUE;                                                          \
1440       }
1441 
1442 #define NULL_COMPARISON_OP(name)                                             \
1443       CASE(_if##name): {                                                     \
1444           int skip = ((STACK_OBJECT(-1) == NULL))                            \
1445                       ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3;             \
1446           address branch_pc = pc;                                            \
1447           UPDATE_PC_AND_TOS(skip, -1);                                       \
1448           DO_BACKEDGE_CHECKS(skip, branch_pc);                               \
1449           CONTINUE;                                                          \
1450       }
1451       COMPARISON_OP(lt, <);
1452       COMPARISON_OP(gt, >);
1453       COMPARISON_OP(le, <=);
1454       COMPARISON_OP(ge, >=);
1455       COMPARISON_OP2(eq, ==);  /* include ref comparison */
1456       COMPARISON_OP2(ne, !=);  /* include ref comparison */
1457       NULL_COMPARISON_OP(null);
1458       NULL_COMPARISON_NOT_OP(nonnull);
1459 
1460       /* Goto pc at specified offset in switch table. */
1461 
1462       CASE(_tableswitch): {
1463           jint* lpc  = (jint*)VMalignWordUp(pc+1);
1464           int32_t  key  = STACK_INT(-1);
1465           int32_t  low  = Bytes::get_Java_u4((address)&lpc[1]);
1466           int32_t  high = Bytes::get_Java_u4((address)&lpc[2]);
1467           int32_t  skip;
1468           key -= low;
1469           skip = ((uint32_t) key > (uint32_t)(high - low))
1470                       ? Bytes::get_Java_u4((address)&lpc[0])
1471                       : Bytes::get_Java_u4((address)&lpc[key + 3]);
1472           // Does this really need a full backedge check (osr?)
1473           address branch_pc = pc;
1474           UPDATE_PC_AND_TOS(skip, -1);
1475           DO_BACKEDGE_CHECKS(skip, branch_pc);
1476           CONTINUE;
1477       }
1478 
1479       /* Goto pc whose table entry matches specified key */
1480 
1481       CASE(_lookupswitch): {
1482           jint* lpc  = (jint*)VMalignWordUp(pc+1);
1483           int32_t  key  = STACK_INT(-1);
1484           int32_t  skip = Bytes::get_Java_u4((address) lpc); /* default amount */
1485           int32_t  npairs = Bytes::get_Java_u4((address) &lpc[1]);
1486           while (--npairs >= 0) {
1487               lpc += 2;
1488               if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) {
1489                   skip = Bytes::get_Java_u4((address)&lpc[1]);
1490                   break;
1491               }
1492           }
1493           address branch_pc = pc;
1494           UPDATE_PC_AND_TOS(skip, -1);
1495           DO_BACKEDGE_CHECKS(skip, branch_pc);
1496           CONTINUE;
1497       }
1498 
1499       CASE(_fcmpl):
1500       CASE(_fcmpg):
1501       {
1502           SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2),
1503                                         STACK_FLOAT(-1),
1504                                         (opcode == Bytecodes::_fcmpl ? -1 : 1)),
1505                         -2);
1506           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1507       }
1508 
1509       CASE(_dcmpl):
1510       CASE(_dcmpg):
1511       {
1512           int r = VMdoubleCompare(STACK_DOUBLE(-3),
1513                                   STACK_DOUBLE(-1),
1514                                   (opcode == Bytecodes::_dcmpl ? -1 : 1));
1515           MORE_STACK(-4); // Pop
1516           SET_STACK_INT(r, 0);
1517           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1518       }
1519 
1520       CASE(_lcmp):
1521       {
1522           int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1));
1523           MORE_STACK(-4);
1524           SET_STACK_INT(r, 0);
1525           UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1526       }
1527 
1528 
1529       /* Return from a method */
1530 
1531       CASE(_areturn):
1532       CASE(_ireturn):
1533       CASE(_freturn):
1534       {
1535           // Allow a safepoint before returning to frame manager.
1536           SAFEPOINT;
1537 
1538           goto handle_return;
1539       }
1540 
1541       CASE(_lreturn):
1542       CASE(_dreturn):
1543       {
1544           // Allow a safepoint before returning to frame manager.
1545           SAFEPOINT;
1546           goto handle_return;
1547       }
1548 
1549       CASE(_return_register_finalizer): {
1550 
1551           oop rcvr = LOCALS_OBJECT(0);
1552           VERIFY_OOP(rcvr);
1553           if (rcvr->klass()->has_finalizer()) {
1554             CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception);
1555           }
1556           goto handle_return;
1557       }
1558       CASE(_return): {
1559 
1560           // Allow a safepoint before returning to frame manager.
1561           SAFEPOINT;
1562           goto handle_return;
1563       }
1564 
1565       /* Array access byte-codes */
1566 
1567       /* Every array access byte-code starts out like this */
1568 //        arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff);
1569 #define ARRAY_INTRO(arrayOff)                                                  \
1570       arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff);                      \
1571       jint     index  = STACK_INT(arrayOff + 1);                               \
1572       char message[jintAsStringSize];                                          \
1573       CHECK_NULL(arrObj);                                                      \
1574       if ((uint32_t)index >= (uint32_t)arrObj->length()) {                     \
1575           sprintf(message, "%d", index);                                       \
1576           VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \
1577                         message);                                              \
1578       }
1579 
1580       /* 32-bit loads. These handle conversion from < 32-bit types */
1581 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra)                                \
1582       {                                                                               \
1583           ARRAY_INTRO(-2);                                                            \
1584           extra;                                                                      \
1585           SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \
1586                            -2);                                                       \
1587           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);                                      \
1588       }
1589 
1590       /* 64-bit loads */
1591 #define ARRAY_LOADTO64(T,T2, stackRes, extra)                                              \
1592       {                                                                                    \
1593           ARRAY_INTRO(-2);                                                                 \
1594           SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \
1595           extra;                                                                           \
1596           UPDATE_PC_AND_CONTINUE(1);                                            \
1597       }
1598 
1599       CASE(_iaload):
1600           ARRAY_LOADTO32(T_INT, jint,   "%d",   STACK_INT, 0);
1601       CASE(_faload):
1602           ARRAY_LOADTO32(T_FLOAT, jfloat, "%f",   STACK_FLOAT, 0);
1603       CASE(_aaload):
1604           ARRAY_LOADTO32(T_OBJECT, oop,   INTPTR_FORMAT, STACK_OBJECT, 0);
1605       CASE(_baload):
1606           ARRAY_LOADTO32(T_BYTE, jbyte,  "%d",   STACK_INT, 0);
1607       CASE(_caload):
1608           ARRAY_LOADTO32(T_CHAR,  jchar, "%d",   STACK_INT, 0);
1609       CASE(_saload):
1610           ARRAY_LOADTO32(T_SHORT, jshort, "%d",   STACK_INT, 0);
1611       CASE(_laload):
1612           ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0);
1613       CASE(_daload):
1614           ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
1615 
1616       /* 32-bit stores. These handle conversion to < 32-bit types */
1617 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra)                            \
1618       {                                                                              \
1619           ARRAY_INTRO(-3);                                                           \
1620           extra;                                                                     \
1621           *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
1622           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);                                     \
1623       }
1624 
1625       /* 64-bit stores */
1626 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra)                                    \
1627       {                                                                              \
1628           ARRAY_INTRO(-4);                                                           \
1629           extra;                                                                     \
1630           *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
1631           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4);                                     \
1632       }
1633 
1634       CASE(_iastore):
1635           ARRAY_STOREFROM32(T_INT, jint,   "%d",   STACK_INT, 0);
1636       CASE(_fastore):
1637           ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f",   STACK_FLOAT, 0);
1638       /*
1639        * This one looks different because of the assignability check
1640        */
1641       CASE(_aastore): {
1642           oop rhsObject = STACK_OBJECT(-1);
1643           VERIFY_OOP(rhsObject);
1644           ARRAY_INTRO( -3);
1645           // arrObj, index are set
1646           if (rhsObject != NULL) {
1647             /* Check assignability of rhsObject into arrObj */
1648             Klass* rhsKlassOop = rhsObject->klass(); // EBX (subclass)
1649             Klass* elemKlassOop = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX
1650             //
1651             // Check for compatibilty. This check must not GC!!
1652             // Seems way more expensive now that we must dispatch
1653             //
1654             if (rhsKlassOop != elemKlassOop && !rhsKlassOop->is_subtype_of(elemKlassOop)) { // ebx->is...
1655               VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "");
1656             }
1657           }
1658           oop* elem_loc = (oop*)(((address) arrObj->base(T_OBJECT)) + index * sizeof(oop));
1659           // *(oop*)(((address) arrObj->base(T_OBJECT)) + index * sizeof(oop)) = rhsObject;
1660           *elem_loc = rhsObject;
1661           // Mark the card
1662           OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)elem_loc >> CardTableModRefBS::card_shift], 0);
1663           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);
1664       }
1665       CASE(_bastore):
1666           ARRAY_STOREFROM32(T_BYTE, jbyte,  "%d",   STACK_INT, 0);
1667       CASE(_castore):
1668           ARRAY_STOREFROM32(T_CHAR, jchar,  "%d",   STACK_INT, 0);
1669       CASE(_sastore):
1670           ARRAY_STOREFROM32(T_SHORT, jshort, "%d",   STACK_INT, 0);
1671       CASE(_lastore):
1672           ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0);
1673       CASE(_dastore):
1674           ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
1675 
1676       CASE(_arraylength):
1677       {
1678           arrayOop ary = (arrayOop) STACK_OBJECT(-1);
1679           CHECK_NULL(ary);
1680           SET_STACK_INT(ary->length(), -1);
1681           UPDATE_PC_AND_CONTINUE(1);
1682       }
1683 
1684       /* monitorenter and monitorexit for locking/unlocking an object */
1685 
1686       CASE(_monitorenter): {
1687         oop lockee = STACK_OBJECT(-1);
1688         // derefing's lockee ought to provoke implicit null check
1689         CHECK_NULL(lockee);
1690         // find a free monitor or one already allocated for this object
1691         // if we find a matching object then we need a new monitor
1692         // since this is recursive enter
1693         BasicObjectLock* limit = istate->monitor_base();
1694         BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1695         BasicObjectLock* entry = NULL;
1696         while (most_recent != limit ) {
1697           if (most_recent->obj() == NULL) entry = most_recent;
1698           else if (most_recent->obj() == lockee) break;
1699           most_recent++;
1700         }
1701         if (entry != NULL) {
1702           entry->set_obj(lockee);
1703           markOop displaced = lockee->mark()->set_unlocked();
1704           entry->lock()->set_displaced_header(displaced);
1705           if (Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
1706             // Is it simple recursive case?
1707             if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
1708               entry->lock()->set_displaced_header(NULL);
1709             } else {
1710               CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1711             }
1712           }
1713           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1714         } else {
1715           istate->set_msg(more_monitors);
1716           UPDATE_PC_AND_RETURN(0); // Re-execute
1717         }
1718       }
1719 
1720       CASE(_monitorexit): {
1721         oop lockee = STACK_OBJECT(-1);
1722         CHECK_NULL(lockee);
1723         // derefing's lockee ought to provoke implicit null check
1724         // find our monitor slot
1725         BasicObjectLock* limit = istate->monitor_base();
1726         BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1727         while (most_recent != limit ) {
1728           if ((most_recent)->obj() == lockee) {
1729             BasicLock* lock = most_recent->lock();
1730             markOop header = lock->displaced_header();
1731             most_recent->set_obj(NULL);
1732             // If it isn't recursive we either must swap old header or call the runtime
1733             if (header != NULL) {
1734               if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
1735                 // restore object for the slow case
1736                 most_recent->set_obj(lockee);
1737                 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
1738               }
1739             }
1740             UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1741           }
1742           most_recent++;
1743         }
1744         // Need to throw illegal monitor state exception
1745         CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception);
1746         ShouldNotReachHere();
1747       }
1748 
1749       /* All of the non-quick opcodes. */
1750 
1751       /* -Set clobbersCpIndex true if the quickened opcode clobbers the
1752        *  constant pool index in the instruction.
1753        */
1754       CASE(_getfield):
1755       CASE(_getstatic):
1756         {
1757           u2 index;
1758           ConstantPoolCacheEntry* cache;
1759           index = Bytes::get_native_u2(pc+1);
1760 
1761           // QQQ Need to make this as inlined as possible. Probably need to
1762           // split all the bytecode cases out so c++ compiler has a chance
1763           // for constant prop to fold everything possible away.
1764 
1765           cache = cp->entry_at(index);
1766           if (!cache->is_resolved((Bytecodes::Code)opcode)) {
1767             CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode),
1768                     handle_exception);
1769             cache = cp->entry_at(index);
1770           }
1771 
1772 #ifdef VM_JVMTI
1773           if (_jvmti_interp_events) {
1774             int *count_addr;
1775             oop obj;
1776             // Check to see if a field modification watch has been set
1777             // before we take the time to call into the VM.
1778             count_addr = (int *)JvmtiExport::get_field_access_count_addr();
1779             if ( *count_addr > 0 ) {
1780               if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
1781                 obj = (oop)NULL;
1782               } else {
1783                 obj = (oop) STACK_OBJECT(-1);
1784                 VERIFY_OOP(obj);
1785               }
1786               CALL_VM(InterpreterRuntime::post_field_access(THREAD,
1787                                           obj,
1788                                           cache),
1789                                           handle_exception);
1790             }
1791           }
1792 #endif /* VM_JVMTI */
1793 
1794           oop obj;
1795           if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
1796             Klass* k = cache->f1_as_klass();
1797             obj = k->java_mirror();
1798             MORE_STACK(1);  // Assume single slot push
1799           } else {
1800             obj = (oop) STACK_OBJECT(-1);
1801             CHECK_NULL(obj);
1802           }
1803 
1804           //
1805           // Now store the result on the stack
1806           //
1807           TosState tos_type = cache->flag_state();
1808           int field_offset = cache->f2_as_index();
1809           if (cache->is_volatile()) {
1810             if (tos_type == atos) {
1811               VERIFY_OOP(obj->obj_field_acquire(field_offset));
1812               SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1);
1813             } else if (tos_type == itos) {
1814               SET_STACK_INT(obj->int_field_acquire(field_offset), -1);
1815             } else if (tos_type == ltos) {
1816               SET_STACK_LONG(obj->long_field_acquire(field_offset), 0);
1817               MORE_STACK(1);
1818             } else if (tos_type == btos) {
1819               SET_STACK_INT(obj->byte_field_acquire(field_offset), -1);
1820             } else if (tos_type == ctos) {
1821               SET_STACK_INT(obj->char_field_acquire(field_offset), -1);
1822             } else if (tos_type == stos) {
1823               SET_STACK_INT(obj->short_field_acquire(field_offset), -1);
1824             } else if (tos_type == ftos) {
1825               SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1);
1826             } else {
1827               SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0);
1828               MORE_STACK(1);
1829             }
1830           } else {
1831             if (tos_type == atos) {
1832               VERIFY_OOP(obj->obj_field(field_offset));
1833               SET_STACK_OBJECT(obj->obj_field(field_offset), -1);
1834             } else if (tos_type == itos) {
1835               SET_STACK_INT(obj->int_field(field_offset), -1);
1836             } else if (tos_type == ltos) {
1837               SET_STACK_LONG(obj->long_field(field_offset), 0);
1838               MORE_STACK(1);
1839             } else if (tos_type == btos) {
1840               SET_STACK_INT(obj->byte_field(field_offset), -1);
1841             } else if (tos_type == ctos) {
1842               SET_STACK_INT(obj->char_field(field_offset), -1);
1843             } else if (tos_type == stos) {
1844               SET_STACK_INT(obj->short_field(field_offset), -1);
1845             } else if (tos_type == ftos) {
1846               SET_STACK_FLOAT(obj->float_field(field_offset), -1);
1847             } else {
1848               SET_STACK_DOUBLE(obj->double_field(field_offset), 0);
1849               MORE_STACK(1);
1850             }
1851           }
1852 
1853           UPDATE_PC_AND_CONTINUE(3);
1854          }
1855 
1856       CASE(_putfield):
1857       CASE(_putstatic):
1858         {
1859           u2 index = Bytes::get_native_u2(pc+1);
1860           ConstantPoolCacheEntry* cache = cp->entry_at(index);
1861           if (!cache->is_resolved((Bytecodes::Code)opcode)) {
1862             CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode),
1863                     handle_exception);
1864             cache = cp->entry_at(index);
1865           }
1866 
1867 #ifdef VM_JVMTI
1868           if (_jvmti_interp_events) {
1869             int *count_addr;
1870             oop obj;
1871             // Check to see if a field modification watch has been set
1872             // before we take the time to call into the VM.
1873             count_addr = (int *)JvmtiExport::get_field_modification_count_addr();
1874             if ( *count_addr > 0 ) {
1875               if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
1876                 obj = (oop)NULL;
1877               }
1878               else {
1879                 if (cache->is_long() || cache->is_double()) {
1880                   obj = (oop) STACK_OBJECT(-3);
1881                 } else {
1882                   obj = (oop) STACK_OBJECT(-2);
1883                 }
1884                 VERIFY_OOP(obj);
1885               }
1886 
1887               CALL_VM(InterpreterRuntime::post_field_modification(THREAD,
1888                                           obj,
1889                                           cache,
1890                                           (jvalue *)STACK_SLOT(-1)),
1891                                           handle_exception);
1892             }
1893           }
1894 #endif /* VM_JVMTI */
1895 
1896           // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
1897           // out so c++ compiler has a chance for constant prop to fold everything possible away.
1898 
1899           oop obj;
1900           int count;
1901           TosState tos_type = cache->flag_state();
1902 
1903           count = -1;
1904           if (tos_type == ltos || tos_type == dtos) {
1905             --count;
1906           }
1907           if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
1908             Klass* k = cache->f1_as_klass();
1909             obj = k->java_mirror();
1910           } else {
1911             --count;
1912             obj = (oop) STACK_OBJECT(count);
1913             CHECK_NULL(obj);
1914           }
1915 
1916           //
1917           // Now store the result
1918           //
1919           int field_offset = cache->f2_as_index();
1920           if (cache->is_volatile()) {
1921             if (tos_type == itos) {
1922               obj->release_int_field_put(field_offset, STACK_INT(-1));
1923             } else if (tos_type == atos) {
1924               VERIFY_OOP(STACK_OBJECT(-1));
1925               obj->release_obj_field_put(field_offset, STACK_OBJECT(-1));
1926               OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)obj >> CardTableModRefBS::card_shift], 0);
1927             } else if (tos_type == btos) {
1928               obj->release_byte_field_put(field_offset, STACK_INT(-1));
1929             } else if (tos_type == ltos) {
1930               obj->release_long_field_put(field_offset, STACK_LONG(-1));
1931             } else if (tos_type == ctos) {
1932               obj->release_char_field_put(field_offset, STACK_INT(-1));
1933             } else if (tos_type == stos) {
1934               obj->release_short_field_put(field_offset, STACK_INT(-1));
1935             } else if (tos_type == ftos) {
1936               obj->release_float_field_put(field_offset, STACK_FLOAT(-1));
1937             } else {
1938               obj->release_double_field_put(field_offset, STACK_DOUBLE(-1));
1939             }
1940             OrderAccess::storeload();
1941           } else {
1942             if (tos_type == itos) {
1943               obj->int_field_put(field_offset, STACK_INT(-1));
1944             } else if (tos_type == atos) {
1945               VERIFY_OOP(STACK_OBJECT(-1));
1946               obj->obj_field_put(field_offset, STACK_OBJECT(-1));
1947               OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)obj >> CardTableModRefBS::card_shift], 0);
1948             } else if (tos_type == btos) {
1949               obj->byte_field_put(field_offset, STACK_INT(-1));
1950             } else if (tos_type == ltos) {
1951               obj->long_field_put(field_offset, STACK_LONG(-1));
1952             } else if (tos_type == ctos) {
1953               obj->char_field_put(field_offset, STACK_INT(-1));
1954             } else if (tos_type == stos) {
1955               obj->short_field_put(field_offset, STACK_INT(-1));
1956             } else if (tos_type == ftos) {
1957               obj->float_field_put(field_offset, STACK_FLOAT(-1));
1958             } else {
1959               obj->double_field_put(field_offset, STACK_DOUBLE(-1));
1960             }
1961           }
1962 
1963           UPDATE_PC_AND_TOS_AND_CONTINUE(3, count);
1964         }
1965 
1966       CASE(_new): {
1967         u2 index = Bytes::get_Java_u2(pc+1);
1968         ConstantPool* constants = istate->method()->constants();
1969         if (!constants->tag_at(index).is_unresolved_klass()) {
1970           // Make sure klass is initialized and doesn't have a finalizer
1971           Klass* entry = constants->slot_at(index).get_klass();
1972           assert(entry->is_klass(), "Should be resolved klass");
1973           Klass* k_entry = (Klass*) entry;
1974           assert(k_entry->oop_is_instance(), "Should be InstanceKlass");
1975           InstanceKlass* ik = (InstanceKlass*) k_entry;
1976           if ( ik->is_initialized() && ik->can_be_fastpath_allocated() ) {
1977             size_t obj_size = ik->size_helper();
1978             oop result = NULL;
1979             // If the TLAB isn't pre-zeroed then we'll have to do it
1980             bool need_zero = !ZeroTLAB;
1981             if (UseTLAB) {
1982               result = (oop) THREAD->tlab().allocate(obj_size);
1983             }
1984             if (result == NULL) {
1985               need_zero = true;
1986               // Try allocate in shared eden
1987         retry:
1988               HeapWord* compare_to = *Universe::heap()->top_addr();
1989               HeapWord* new_top = compare_to + obj_size;
1990               if (new_top <= *Universe::heap()->end_addr()) {
1991                 if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
1992                   goto retry;
1993                 }
1994                 result = (oop) compare_to;
1995               }
1996             }
1997             if (result != NULL) {
1998               // Initialize object (if nonzero size and need) and then the header
1999               if (need_zero ) {
2000                 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize;
2001                 obj_size -= sizeof(oopDesc) / oopSize;
2002                 if (obj_size > 0 ) {
2003                   memset(to_zero, 0, obj_size * HeapWordSize);
2004                 }
2005               }
2006               if (UseBiasedLocking) {
2007                 result->set_mark(ik->prototype_header());
2008               } else {
2009                 result->set_mark(markOopDesc::prototype());
2010               }
2011               result->set_klass_gap(0);
2012               result->set_klass(k_entry);
2013               SET_STACK_OBJECT(result, 0);
2014               UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
2015             }
2016           }
2017         }
2018         // Slow case allocation
2019         CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index),
2020                 handle_exception);
2021         SET_STACK_OBJECT(THREAD->vm_result(), 0);
2022         THREAD->set_vm_result(NULL);
2023         UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
2024       }
2025       CASE(_anewarray): {
2026         u2 index = Bytes::get_Java_u2(pc+1);
2027         jint size = STACK_INT(-1);
2028         CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size),
2029                 handle_exception);
2030         SET_STACK_OBJECT(THREAD->vm_result(), -1);
2031         THREAD->set_vm_result(NULL);
2032         UPDATE_PC_AND_CONTINUE(3);
2033       }
2034       CASE(_multianewarray): {
2035         jint dims = *(pc+3);
2036         jint size = STACK_INT(-1);
2037         // stack grows down, dimensions are up!
2038         jint *dimarray =
2039                    (jint*)&topOfStack[dims * Interpreter::stackElementWords+
2040                                       Interpreter::stackElementWords-1];
2041         //adjust pointer to start of stack element
2042         CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray),
2043                 handle_exception);
2044         SET_STACK_OBJECT(THREAD->vm_result(), -dims);
2045         THREAD->set_vm_result(NULL);
2046         UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1));
2047       }
2048       CASE(_checkcast):
2049           if (STACK_OBJECT(-1) != NULL) {
2050             VERIFY_OOP(STACK_OBJECT(-1));
2051             u2 index = Bytes::get_Java_u2(pc+1);
2052             if (ProfileInterpreter) {
2053               // needs Profile_checkcast QQQ
2054               ShouldNotReachHere();
2055             }
2056             // Constant pool may have actual klass or unresolved klass. If it is
2057             // unresolved we must resolve it
2058             if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
2059               CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
2060             }
2061             Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass();
2062             Klass* objKlassOop = STACK_OBJECT(-1)->klass(); //ebx
2063             //
2064             // Check for compatibilty. This check must not GC!!
2065             // Seems way more expensive now that we must dispatch
2066             //
2067             if (objKlassOop != klassOf &&
2068                 !objKlassOop->is_subtype_of(klassOf)) {
2069               ResourceMark rm(THREAD);
2070               const char* objName = objKlassOop->external_name();
2071               const char* klassName = klassOf->external_name();
2072               char* message = SharedRuntime::generate_class_cast_message(
2073                 objName, klassName);
2074               VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message);
2075             }
2076           } else {
2077             if (UncommonNullCast) {
2078 //              istate->method()->set_null_cast_seen();
2079 // [RGV] Not sure what to do here!
2080 
2081             }
2082           }
2083           UPDATE_PC_AND_CONTINUE(3);
2084 
2085       CASE(_instanceof):
2086           if (STACK_OBJECT(-1) == NULL) {
2087             SET_STACK_INT(0, -1);
2088           } else {
2089             VERIFY_OOP(STACK_OBJECT(-1));
2090             u2 index = Bytes::get_Java_u2(pc+1);
2091             // Constant pool may have actual klass or unresolved klass. If it is
2092             // unresolved we must resolve it
2093             if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
2094               CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
2095             }
2096             Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass();
2097             Klass* objKlassOop = STACK_OBJECT(-1)->klass();
2098             //
2099             // Check for compatibilty. This check must not GC!!
2100             // Seems way more expensive now that we must dispatch
2101             //
2102             if ( objKlassOop == klassOf || objKlassOop->is_subtype_of(klassOf)) {
2103               SET_STACK_INT(1, -1);
2104             } else {
2105               SET_STACK_INT(0, -1);
2106             }
2107           }
2108           UPDATE_PC_AND_CONTINUE(3);
2109 
2110       CASE(_ldc_w):
2111       CASE(_ldc):
2112         {
2113           u2 index;
2114           bool wide = false;
2115           int incr = 2; // frequent case
2116           if (opcode == Bytecodes::_ldc) {
2117             index = pc[1];
2118           } else {
2119             index = Bytes::get_Java_u2(pc+1);
2120             incr = 3;
2121             wide = true;
2122           }
2123 
2124           ConstantPool* constants = METHOD->constants();
2125           switch (constants->tag_at(index).value()) {
2126           case JVM_CONSTANT_Integer:
2127             SET_STACK_INT(constants->int_at(index), 0);
2128             break;
2129 
2130           case JVM_CONSTANT_Float:
2131             SET_STACK_FLOAT(constants->float_at(index), 0);
2132             break;
2133 
2134           case JVM_CONSTANT_String:
2135             {
2136               oop result = constants->resolved_references()->obj_at(index);
2137               if (result == NULL) {
2138                 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
2139                 SET_STACK_OBJECT(THREAD->vm_result(), 0);
2140                 THREAD->set_vm_result(NULL);
2141               } else {
2142                 VERIFY_OOP(result);
2143                 SET_STACK_OBJECT(result, 0);
2144               }
2145             break;
2146             }
2147 
2148           case JVM_CONSTANT_Class:
2149             VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror());
2150             SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0);
2151             break;
2152 
2153           case JVM_CONSTANT_UnresolvedClass:
2154           case JVM_CONSTANT_UnresolvedClassInError:
2155             CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception);
2156             SET_STACK_OBJECT(THREAD->vm_result(), 0);
2157             THREAD->set_vm_result(NULL);
2158             break;
2159 
2160           default:  ShouldNotReachHere();
2161           }
2162           UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
2163         }
2164 
2165       CASE(_ldc2_w):
2166         {
2167           u2 index = Bytes::get_Java_u2(pc+1);
2168 
2169           ConstantPool* constants = METHOD->constants();
2170           switch (constants->tag_at(index).value()) {
2171 
2172           case JVM_CONSTANT_Long:
2173              SET_STACK_LONG(constants->long_at(index), 1);
2174             break;
2175 
2176           case JVM_CONSTANT_Double:
2177              SET_STACK_DOUBLE(constants->double_at(index), 1);
2178             break;
2179           default:  ShouldNotReachHere();
2180           }
2181           UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2);
2182         }
2183 
2184       CASE(_fast_aldc_w):
2185       CASE(_fast_aldc): {
2186         u2 index;
2187         int incr;
2188         if (opcode == Bytecodes::_fast_aldc) {
2189           index = pc[1];
2190           incr = 2;
2191         } else {
2192           index = Bytes::get_native_u2(pc+1);
2193           incr = 3;
2194         }
2195 
2196         // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
2197         // This kind of CP cache entry does not need to match the flags byte, because
2198         // there is a 1-1 relation between bytecode type and CP entry type.
2199         ConstantPool* constants = METHOD->constants();
2200         oop result = constants->resolved_references()->obj_at(index);
2201         if (result == NULL) {
2202           CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode),
2203                   handle_exception);
2204           result = THREAD->vm_result();
2205         }
2206 
2207         VERIFY_OOP(result);
2208         SET_STACK_OBJECT(result, 0);
2209         UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
2210       }
2211 
2212       CASE(_invokedynamic): {
2213 
2214         if (!EnableInvokeDynamic) {
2215           // We should not encounter this bytecode if !EnableInvokeDynamic.
2216           // The verifier will stop it.  However, if we get past the verifier,
2217           // this will stop the thread in a reasonable way, without crashing the JVM.
2218           CALL_VM(InterpreterRuntime::throw_IncompatibleClassChangeError(THREAD),
2219                   handle_exception);
2220           ShouldNotReachHere();
2221         }
2222 
2223         u4 index = Bytes::get_native_u4(pc+1);
2224         ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
2225 
2226         // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.)
2227         // This kind of CP cache entry does not need to match the flags byte, because
2228         // there is a 1-1 relation between bytecode type and CP entry type.
2229         if (! cache->is_resolved((Bytecodes::Code) opcode)) {
2230           CALL_VM(InterpreterRuntime::resolve_invokedynamic(THREAD),
2231                   handle_exception);
2232           cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
2233         }
2234 
2235         Method* method = cache->f1_as_method();
2236         VERIFY_OOP(method);
2237 
2238         if (cache->has_appendix()) {
2239           ConstantPool* constants = METHOD->constants();
2240           SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
2241           MORE_STACK(1);
2242         }
2243 
2244         istate->set_msg(call_method);
2245         istate->set_callee(method);
2246         istate->set_callee_entry_point(method->from_interpreted_entry());
2247         istate->set_bcp_advance(5);
2248 
2249         UPDATE_PC_AND_RETURN(0); // I'll be back...
2250       }
2251 
2252       CASE(_invokehandle): {
2253 
2254         if (!EnableInvokeDynamic) {
2255           ShouldNotReachHere();
2256         }
2257 
2258         u2 index = Bytes::get_native_u2(pc+1);
2259         ConstantPoolCacheEntry* cache = cp->entry_at(index);
2260 
2261         if (! cache->is_resolved((Bytecodes::Code) opcode)) {
2262           CALL_VM(InterpreterRuntime::resolve_invokehandle(THREAD),
2263                   handle_exception);
2264           cache = cp->entry_at(index);
2265         }
2266 
2267         Method* method = cache->f1_as_method();
2268 
2269         VERIFY_OOP(method);
2270 
2271         if (cache->has_appendix()) {
2272           ConstantPool* constants = METHOD->constants();
2273           SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
2274           MORE_STACK(1);
2275         }
2276 
2277         istate->set_msg(call_method);
2278         istate->set_callee(method);
2279         istate->set_callee_entry_point(method->from_interpreted_entry());
2280         istate->set_bcp_advance(3);
2281 
2282         UPDATE_PC_AND_RETURN(0); // I'll be back...
2283       }
2284 
2285       CASE(_invokeinterface): {
2286         u2 index = Bytes::get_native_u2(pc+1);
2287 
2288         // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
2289         // out so c++ compiler has a chance for constant prop to fold everything possible away.
2290 
2291         ConstantPoolCacheEntry* cache = cp->entry_at(index);
2292         if (!cache->is_resolved((Bytecodes::Code)opcode)) {
2293           CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode),
2294                   handle_exception);
2295           cache = cp->entry_at(index);
2296         }
2297 
2298         istate->set_msg(call_method);
2299 
2300         // Special case of invokeinterface called for virtual method of
2301         // java.lang.Object.  See cpCacheOop.cpp for details.
2302         // This code isn't produced by javac, but could be produced by
2303         // another compliant java compiler.
2304         if (cache->is_forced_virtual()) {
2305           Method* callee;
2306           CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
2307           if (cache->is_vfinal()) {
2308             callee = cache->f2_as_vfinal_method();
2309           } else {
2310             // get receiver
2311             int parms = cache->parameter_size();
2312             // Same comments as invokevirtual apply here
2313             VERIFY_OOP(STACK_OBJECT(-parms));
2314             InstanceKlass* rcvrKlass = (InstanceKlass*)
2315                                  STACK_OBJECT(-parms)->klass();
2316             callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()];
2317           }
2318           istate->set_callee(callee);
2319           istate->set_callee_entry_point(callee->from_interpreted_entry());
2320 #ifdef VM_JVMTI
2321           if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
2322             istate->set_callee_entry_point(callee->interpreter_entry());
2323           }
2324 #endif /* VM_JVMTI */
2325           istate->set_bcp_advance(5);
2326           UPDATE_PC_AND_RETURN(0); // I'll be back...
2327         }
2328 
2329         // this could definitely be cleaned up QQQ
2330         Method* callee;
2331         Klass* iclass = cache->f1_as_klass();
2332         // InstanceKlass* interface = (InstanceKlass*) iclass;
2333         // get receiver
2334         int parms = cache->parameter_size();
2335         oop rcvr = STACK_OBJECT(-parms);
2336         CHECK_NULL(rcvr);
2337         InstanceKlass* int2 = (InstanceKlass*) rcvr->klass();
2338         itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable();
2339         int i;
2340         for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) {
2341           if (ki->interface_klass() == iclass) break;
2342         }
2343         // If the interface isn't found, this class doesn't implement this
2344         // interface.  The link resolver checks this but only for the first
2345         // time this interface is called.
2346         if (i == int2->itable_length()) {
2347           VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "");
2348         }
2349         int mindex = cache->f2_as_index();
2350         itableMethodEntry* im = ki->first_method_entry(rcvr->klass());
2351         callee = im[mindex].method();
2352         if (callee == NULL) {
2353           VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "");
2354         }
2355 
2356         istate->set_callee(callee);
2357         istate->set_callee_entry_point(callee->from_interpreted_entry());
2358 #ifdef VM_JVMTI
2359         if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
2360           istate->set_callee_entry_point(callee->interpreter_entry());
2361         }
2362 #endif /* VM_JVMTI */
2363         istate->set_bcp_advance(5);
2364         UPDATE_PC_AND_RETURN(0); // I'll be back...
2365       }
2366 
2367       CASE(_invokevirtual):
2368       CASE(_invokespecial):
2369       CASE(_invokestatic): {
2370         u2 index = Bytes::get_native_u2(pc+1);
2371 
2372         ConstantPoolCacheEntry* cache = cp->entry_at(index);
2373         // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
2374         // out so c++ compiler has a chance for constant prop to fold everything possible away.
2375 
2376         if (!cache->is_resolved((Bytecodes::Code)opcode)) {
2377           CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode),
2378                   handle_exception);
2379           cache = cp->entry_at(index);
2380         }
2381 
2382         istate->set_msg(call_method);
2383         {
2384           Method* callee;
2385           if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) {
2386             CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
2387             if (cache->is_vfinal()) callee = cache->f2_as_vfinal_method();
2388             else {
2389               // get receiver
2390               int parms = cache->parameter_size();
2391               // this works but needs a resourcemark and seems to create a vtable on every call:
2392               // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index());
2393               //
2394               // this fails with an assert
2395               // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass());
2396               // but this works
2397               VERIFY_OOP(STACK_OBJECT(-parms));
2398               InstanceKlass* rcvrKlass = (InstanceKlass*) STACK_OBJECT(-parms)->klass();
2399               /*
2400                 Executing this code in java.lang.String:
2401                     public String(char value[]) {
2402                           this.count = value.length;
2403                           this.value = (char[])value.clone();
2404                      }
2405 
2406                  a find on rcvr->klass() reports:
2407                  {type array char}{type array class}
2408                   - klass: {other class}
2409 
2410                   but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure
2411                   because rcvr->klass()->oop_is_instance() == 0
2412                   However it seems to have a vtable in the right location. Huh?
2413 
2414               */
2415               callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()];
2416             }
2417           } else {
2418             if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) {
2419               CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
2420             }
2421             callee = cache->f1_as_method();
2422           }
2423 
2424           istate->set_callee(callee);
2425           istate->set_callee_entry_point(callee->from_interpreted_entry());
2426 #ifdef VM_JVMTI
2427           if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
2428             istate->set_callee_entry_point(callee->interpreter_entry());
2429           }
2430 #endif /* VM_JVMTI */
2431           istate->set_bcp_advance(3);
2432           UPDATE_PC_AND_RETURN(0); // I'll be back...
2433         }
2434       }
2435 
2436       /* Allocate memory for a new java object. */
2437 
2438       CASE(_newarray): {
2439         BasicType atype = (BasicType) *(pc+1);
2440         jint size = STACK_INT(-1);
2441         CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size),
2442                 handle_exception);
2443         SET_STACK_OBJECT(THREAD->vm_result(), -1);
2444         THREAD->set_vm_result(NULL);
2445 
2446         UPDATE_PC_AND_CONTINUE(2);
2447       }
2448 
2449       /* Throw an exception. */
2450 
2451       CASE(_athrow): {
2452           oop except_oop = STACK_OBJECT(-1);
2453           CHECK_NULL(except_oop);
2454           // set pending_exception so we use common code
2455           THREAD->set_pending_exception(except_oop, NULL, 0);
2456           goto handle_exception;
2457       }
2458 
2459       /* goto and jsr. They are exactly the same except jsr pushes
2460        * the address of the next instruction first.
2461        */
2462 
2463       CASE(_jsr): {
2464           /* push bytecode index on stack */
2465           SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0);
2466           MORE_STACK(1);
2467           /* FALL THROUGH */
2468       }
2469 
2470       CASE(_goto):
2471       {
2472           int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1);
2473           address branch_pc = pc;
2474           UPDATE_PC(offset);
2475           DO_BACKEDGE_CHECKS(offset, branch_pc);
2476           CONTINUE;
2477       }
2478 
2479       CASE(_jsr_w): {
2480           /* push return address on the stack */
2481           SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0);
2482           MORE_STACK(1);
2483           /* FALL THROUGH */
2484       }
2485 
2486       CASE(_goto_w):
2487       {
2488           int32_t offset = Bytes::get_Java_u4(pc + 1);
2489           address branch_pc = pc;
2490           UPDATE_PC(offset);
2491           DO_BACKEDGE_CHECKS(offset, branch_pc);
2492           CONTINUE;
2493       }
2494 
2495       /* return from a jsr or jsr_w */
2496 
2497       CASE(_ret): {
2498           pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1]));
2499           UPDATE_PC_AND_CONTINUE(0);
2500       }
2501 
2502       /* debugger breakpoint */
2503 
2504       CASE(_breakpoint): {
2505           Bytecodes::Code original_bytecode;
2506           DECACHE_STATE();
2507           SET_LAST_JAVA_FRAME();
2508           original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD,
2509                               METHOD, pc);
2510           RESET_LAST_JAVA_FRAME();
2511           CACHE_STATE();
2512           if (THREAD->has_pending_exception()) goto handle_exception;
2513             CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc),
2514                                                     handle_exception);
2515 
2516           opcode = (jubyte)original_bytecode;
2517           goto opcode_switch;
2518       }
2519 
2520       DEFAULT:
2521           fatal(err_msg("Unimplemented opcode %d = %s", opcode,
2522                         Bytecodes::name((Bytecodes::Code)opcode)));
2523           goto finish;
2524 
2525       } /* switch(opc) */
2526 
2527 
2528 #ifdef USELABELS
2529     check_for_exception:
2530 #endif
2531     {
2532       if (!THREAD->has_pending_exception()) {
2533         CONTINUE;
2534       }
2535       /* We will be gcsafe soon, so flush our state. */
2536       DECACHE_PC();
2537       goto handle_exception;
2538     }
2539   do_continue: ;
2540 
2541   } /* while (1) interpreter loop */
2542 
2543 
2544   // An exception exists in the thread state see whether this activation can handle it
2545   handle_exception: {
2546 
2547     HandleMarkCleaner __hmc(THREAD);
2548     Handle except_oop(THREAD, THREAD->pending_exception());
2549     // Prevent any subsequent HandleMarkCleaner in the VM
2550     // from freeing the except_oop handle.
2551     HandleMark __hm(THREAD);
2552 
2553     THREAD->clear_pending_exception();
2554     assert(except_oop(), "No exception to process");
2555     intptr_t continuation_bci;
2556     // expression stack is emptied
2557     topOfStack = istate->stack_base() - Interpreter::stackElementWords;
2558     CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()),
2559             handle_exception);
2560 
2561     except_oop = THREAD->vm_result();
2562     THREAD->set_vm_result(NULL);
2563     if (continuation_bci >= 0) {
2564       // Place exception on top of stack
2565       SET_STACK_OBJECT(except_oop(), 0);
2566       MORE_STACK(1);
2567       pc = METHOD->code_base() + continuation_bci;
2568       if (TraceExceptions) {
2569         ttyLocker ttyl;
2570         ResourceMark rm;
2571         tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop());
2572         tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
2573         tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT,
2574                       pc - (intptr_t)METHOD->code_base(),
2575                       continuation_bci, THREAD);
2576       }
2577       // for AbortVMOnException flag
2578       NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
2579       goto run;
2580     }
2581     if (TraceExceptions) {
2582       ttyLocker ttyl;
2583       ResourceMark rm;
2584       tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop());
2585       tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
2586       tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT,
2587                     pc  - (intptr_t) METHOD->code_base(),
2588                     THREAD);
2589     }
2590     // for AbortVMOnException flag
2591     NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
2592     // No handler in this activation, unwind and try again
2593     THREAD->set_pending_exception(except_oop(), NULL, 0);
2594     goto handle_return;
2595   }  /* handle_exception: */
2596 
2597 
2598 
2599   // Return from an interpreter invocation with the result of the interpretation
2600   // on the top of the Java Stack (or a pending exception)
2601 
2602 handle_Pop_Frame:
2603 
2604   // We don't really do anything special here except we must be aware
2605   // that we can get here without ever locking the method (if sync).
2606   // Also we skip the notification of the exit.
2607 
2608   istate->set_msg(popping_frame);
2609   // Clear pending so while the pop is in process
2610   // we don't start another one if a call_vm is done.
2611   THREAD->clr_pop_frame_pending();
2612   // Let interpreter (only) see the we're in the process of popping a frame
2613   THREAD->set_pop_frame_in_process();
2614 
2615 handle_return:
2616   {
2617     DECACHE_STATE();
2618 
2619     bool suppress_error = istate->msg() == popping_frame;
2620     bool suppress_exit_event = THREAD->has_pending_exception() || suppress_error;
2621     Handle original_exception(THREAD, THREAD->pending_exception());
2622     Handle illegal_state_oop(THREAD, NULL);
2623 
2624     // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner
2625     // in any following VM entries from freeing our live handles, but illegal_state_oop
2626     // isn't really allocated yet and so doesn't become live until later and
2627     // in unpredicatable places. Instead we must protect the places where we enter the
2628     // VM. It would be much simpler (and safer) if we could allocate a real handle with
2629     // a NULL oop in it and then overwrite the oop later as needed. This isn't
2630     // unfortunately isn't possible.
2631 
2632     THREAD->clear_pending_exception();
2633 
2634     //
2635     // As far as we are concerned we have returned. If we have a pending exception
2636     // that will be returned as this invocation's result. However if we get any
2637     // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions
2638     // will be our final result (i.e. monitor exception trumps a pending exception).
2639     //
2640 
2641     // If we never locked the method (or really passed the point where we would have),
2642     // there is no need to unlock it (or look for other monitors), since that
2643     // could not have happened.
2644 
2645     if (THREAD->do_not_unlock()) {
2646 
2647       // Never locked, reset the flag now because obviously any caller must
2648       // have passed their point of locking for us to have gotten here.
2649 
2650       THREAD->clr_do_not_unlock();
2651     } else {
2652       // At this point we consider that we have returned. We now check that the
2653       // locks were properly block structured. If we find that they were not
2654       // used properly we will return with an illegal monitor exception.
2655       // The exception is checked by the caller not the callee since this
2656       // checking is considered to be part of the invocation and therefore
2657       // in the callers scope (JVM spec 8.13).
2658       //
2659       // Another weird thing to watch for is if the method was locked
2660       // recursively and then not exited properly. This means we must
2661       // examine all the entries in reverse time(and stack) order and
2662       // unlock as we find them. If we find the method monitor before
2663       // we are at the initial entry then we should throw an exception.
2664       // It is not clear the template based interpreter does this
2665       // correctly
2666 
2667       BasicObjectLock* base = istate->monitor_base();
2668       BasicObjectLock* end = (BasicObjectLock*) istate->stack_base();
2669       bool method_unlock_needed = METHOD->is_synchronized();
2670       // We know the initial monitor was used for the method don't check that
2671       // slot in the loop
2672       if (method_unlock_needed) base--;
2673 
2674       // Check all the monitors to see they are unlocked. Install exception if found to be locked.
2675       while (end < base) {
2676         oop lockee = end->obj();
2677         if (lockee != NULL) {
2678           BasicLock* lock = end->lock();
2679           markOop header = lock->displaced_header();
2680           end->set_obj(NULL);
2681           // If it isn't recursive we either must swap old header or call the runtime
2682           if (header != NULL) {
2683             if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
2684               // restore object for the slow case
2685               end->set_obj(lockee);
2686               {
2687                 // Prevent any HandleMarkCleaner from freeing our live handles
2688                 HandleMark __hm(THREAD);
2689                 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end));
2690               }
2691             }
2692           }
2693           // One error is plenty
2694           if (illegal_state_oop() == NULL && !suppress_error) {
2695             {
2696               // Prevent any HandleMarkCleaner from freeing our live handles
2697               HandleMark __hm(THREAD);
2698               CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
2699             }
2700             assert(THREAD->has_pending_exception(), "Lost our exception!");
2701             illegal_state_oop = THREAD->pending_exception();
2702             THREAD->clear_pending_exception();
2703           }
2704         }
2705         end++;
2706       }
2707       // Unlock the method if needed
2708       if (method_unlock_needed) {
2709         if (base->obj() == NULL) {
2710           // The method is already unlocked this is not good.
2711           if (illegal_state_oop() == NULL && !suppress_error) {
2712             {
2713               // Prevent any HandleMarkCleaner from freeing our live handles
2714               HandleMark __hm(THREAD);
2715               CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
2716             }
2717             assert(THREAD->has_pending_exception(), "Lost our exception!");
2718             illegal_state_oop = THREAD->pending_exception();
2719             THREAD->clear_pending_exception();
2720           }
2721         } else {
2722           //
2723           // The initial monitor is always used for the method
2724           // However if that slot is no longer the oop for the method it was unlocked
2725           // and reused by something that wasn't unlocked!
2726           //
2727           // deopt can come in with rcvr dead because c2 knows
2728           // its value is preserved in the monitor. So we can't use locals[0] at all
2729           // and must use first monitor slot.
2730           //
2731           oop rcvr = base->obj();
2732           if (rcvr == NULL) {
2733             if (!suppress_error) {
2734               VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "");
2735               illegal_state_oop = THREAD->pending_exception();
2736               THREAD->clear_pending_exception();
2737             }
2738           } else {
2739             BasicLock* lock = base->lock();
2740             markOop header = lock->displaced_header();
2741             base->set_obj(NULL);
2742             // If it isn't recursive we either must swap old header or call the runtime
2743             if (header != NULL) {
2744               if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
2745                 // restore object for the slow case
2746                 base->set_obj(rcvr);
2747                 {
2748                   // Prevent any HandleMarkCleaner from freeing our live handles
2749                   HandleMark __hm(THREAD);
2750                   CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
2751                 }
2752                 if (THREAD->has_pending_exception()) {
2753                   if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
2754                   THREAD->clear_pending_exception();
2755                 }
2756               }
2757             }
2758           }
2759         }
2760       }
2761     }
2762 
2763     //
2764     // Notify jvmti/jvmdi
2765     //
2766     // NOTE: we do not notify a method_exit if we have a pending exception,
2767     // including an exception we generate for unlocking checks.  In the former
2768     // case, JVMDI has already been notified by our call for the exception handler
2769     // and in both cases as far as JVMDI is concerned we have already returned.
2770     // If we notify it again JVMDI will be all confused about how many frames
2771     // are still on the stack (4340444).
2772     //
2773     // NOTE Further! It turns out the the JVMTI spec in fact expects to see
2774     // method_exit events whenever we leave an activation unless it was done
2775     // for popframe. This is nothing like jvmdi. However we are passing the
2776     // tests at the moment (apparently because they are jvmdi based) so rather
2777     // than change this code and possibly fail tests we will leave it alone
2778     // (with this note) in anticipation of changing the vm and the tests
2779     // simultaneously.
2780 
2781 
2782     //
2783     suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL;
2784 
2785 
2786 
2787 #ifdef VM_JVMTI
2788       if (_jvmti_interp_events) {
2789         // Whenever JVMTI puts a thread in interp_only_mode, method
2790         // entry/exit events are sent for that thread to track stack depth.
2791         if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) {
2792           {
2793             // Prevent any HandleMarkCleaner from freeing our live handles
2794             HandleMark __hm(THREAD);
2795             CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD));
2796           }
2797         }
2798       }
2799 #endif /* VM_JVMTI */
2800 
2801     //
2802     // See if we are returning any exception
2803     // A pending exception that was pending prior to a possible popping frame
2804     // overrides the popping frame.
2805     //
2806     assert(!suppress_error || suppress_error && illegal_state_oop() == NULL, "Error was not suppressed");
2807     if (illegal_state_oop() != NULL || original_exception() != NULL) {
2808       // inform the frame manager we have no result
2809       istate->set_msg(throwing_exception);
2810       if (illegal_state_oop() != NULL)
2811         THREAD->set_pending_exception(illegal_state_oop(), NULL, 0);
2812       else
2813         THREAD->set_pending_exception(original_exception(), NULL, 0);
2814       istate->set_return_kind((Bytecodes::Code)opcode);
2815       UPDATE_PC_AND_RETURN(0);
2816     }
2817 
2818     if (istate->msg() == popping_frame) {
2819       // Make it simpler on the assembly code and set the message for the frame pop.
2820       // returns
2821       if (istate->prev() == NULL) {
2822         // We must be returning to a deoptimized frame (because popframe only happens between
2823         // two interpreted frames). We need to save the current arguments in C heap so that
2824         // the deoptimized frame when it restarts can copy the arguments to its expression
2825         // stack and re-execute the call. We also have to notify deoptimization that this
2826         // has occurred and to pick the preserved args copy them to the deoptimized frame's
2827         // java expression stack. Yuck.
2828         //
2829         THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize),
2830                                 LOCALS_SLOT(METHOD->size_of_parameters() - 1));
2831         THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit);
2832       }
2833       THREAD->clr_pop_frame_in_process();
2834     }
2835 
2836     // Normal return
2837     // Advance the pc and return to frame manager
2838     istate->set_msg(return_from_method);
2839     istate->set_return_kind((Bytecodes::Code)opcode);
2840     UPDATE_PC_AND_RETURN(1);
2841   } /* handle_return: */
2842 
2843 // This is really a fatal error return
2844 
2845 finish:
2846   DECACHE_TOS();
2847   DECACHE_PC();
2848 
2849   return;
2850 }
2851 
2852 /*
2853  * All the code following this point is only produced once and is not present
2854  * in the JVMTI version of the interpreter
2855 */
2856 
2857 #ifndef VM_JVMTI
2858 
2859 // This constructor should only be used to contruct the object to signal
2860 // interpreter initialization. All other instances should be created by
2861 // the frame manager.
2862 BytecodeInterpreter::BytecodeInterpreter(messages msg) {
2863   if (msg != initialize) ShouldNotReachHere();
2864   _msg = msg;
2865   _self_link = this;
2866   _prev_link = NULL;
2867 }
2868 
2869 // Inline static functions for Java Stack and Local manipulation
2870 
2871 // The implementations are platform dependent. We have to worry about alignment
2872 // issues on some machines which can change on the same platform depending on
2873 // whether it is an LP64 machine also.
2874 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) {
2875   return (address) tos[Interpreter::expr_index_at(-offset)];
2876 }
2877 
2878 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) {
2879   return *((jint*) &tos[Interpreter::expr_index_at(-offset)]);
2880 }
2881 
2882 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) {
2883   return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]);
2884 }
2885 
2886 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) {
2887   return (oop)tos [Interpreter::expr_index_at(-offset)];
2888 }
2889 
2890 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) {
2891   return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d;
2892 }
2893 
2894 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) {
2895   return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l;
2896 }
2897 
2898 // only used for value types
2899 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value,
2900                                                         int offset) {
2901   *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value;
2902 }
2903 
2904 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value,
2905                                                        int offset) {
2906   *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value;
2907 }
2908 
2909 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value,
2910                                                          int offset) {
2911   *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value;
2912 }
2913 
2914 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value,
2915                                                           int offset) {
2916   *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value;
2917 }
2918 
2919 // needs to be platform dep for the 32 bit platforms.
2920 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value,
2921                                                           int offset) {
2922   ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value;
2923 }
2924 
2925 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos,
2926                                               address addr, int offset) {
2927   (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d =
2928                         ((VMJavaVal64*)addr)->d);
2929 }
2930 
2931 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value,
2932                                                         int offset) {
2933   ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
2934   ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value;
2935 }
2936 
2937 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos,
2938                                             address addr, int offset) {
2939   ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
2940   ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l =
2941                         ((VMJavaVal64*)addr)->l;
2942 }
2943 
2944 // Locals
2945 
2946 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) {
2947   return (address)locals[Interpreter::local_index_at(-offset)];
2948 }
2949 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) {
2950   return (jint)locals[Interpreter::local_index_at(-offset)];
2951 }
2952 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) {
2953   return (jfloat)locals[Interpreter::local_index_at(-offset)];
2954 }
2955 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) {
2956   return (oop)locals[Interpreter::local_index_at(-offset)];
2957 }
2958 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) {
2959   return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d;
2960 }
2961 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) {
2962   return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l;
2963 }
2964 
2965 // Returns the address of locals value.
2966 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) {
2967   return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
2968 }
2969 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) {
2970   return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
2971 }
2972 
2973 // Used for local value or returnAddress
2974 void BytecodeInterpreter::set_locals_slot(intptr_t *locals,
2975                                    address value, int offset) {
2976   *((address*)&locals[Interpreter::local_index_at(-offset)]) = value;
2977 }
2978 void BytecodeInterpreter::set_locals_int(intptr_t *locals,
2979                                    jint value, int offset) {
2980   *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value;
2981 }
2982 void BytecodeInterpreter::set_locals_float(intptr_t *locals,
2983                                    jfloat value, int offset) {
2984   *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value;
2985 }
2986 void BytecodeInterpreter::set_locals_object(intptr_t *locals,
2987                                    oop value, int offset) {
2988   *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value;
2989 }
2990 void BytecodeInterpreter::set_locals_double(intptr_t *locals,
2991                                    jdouble value, int offset) {
2992   ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value;
2993 }
2994 void BytecodeInterpreter::set_locals_long(intptr_t *locals,
2995                                    jlong value, int offset) {
2996   ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value;
2997 }
2998 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals,
2999                                    address addr, int offset) {
3000   ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d;
3001 }
3002 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals,
3003                                    address addr, int offset) {
3004   ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l;
3005 }
3006 
3007 void BytecodeInterpreter::astore(intptr_t* tos,    int stack_offset,
3008                           intptr_t* locals, int locals_offset) {
3009   intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)];
3010   locals[Interpreter::local_index_at(-locals_offset)] = value;
3011 }
3012 
3013 
3014 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset,
3015                                    int to_offset) {
3016   tos[Interpreter::expr_index_at(-to_offset)] =
3017                       (intptr_t)tos[Interpreter::expr_index_at(-from_offset)];
3018 }
3019 
3020 void BytecodeInterpreter::dup(intptr_t *tos) {
3021   copy_stack_slot(tos, -1, 0);
3022 }
3023 void BytecodeInterpreter::dup2(intptr_t *tos) {
3024   copy_stack_slot(tos, -2, 0);
3025   copy_stack_slot(tos, -1, 1);
3026 }
3027 
3028 void BytecodeInterpreter::dup_x1(intptr_t *tos) {
3029   /* insert top word two down */
3030   copy_stack_slot(tos, -1, 0);
3031   copy_stack_slot(tos, -2, -1);
3032   copy_stack_slot(tos, 0, -2);
3033 }
3034 
3035 void BytecodeInterpreter::dup_x2(intptr_t *tos) {
3036   /* insert top word three down  */
3037   copy_stack_slot(tos, -1, 0);
3038   copy_stack_slot(tos, -2, -1);
3039   copy_stack_slot(tos, -3, -2);
3040   copy_stack_slot(tos, 0, -3);
3041 }
3042 void BytecodeInterpreter::dup2_x1(intptr_t *tos) {
3043   /* insert top 2 slots three down */
3044   copy_stack_slot(tos, -1, 1);
3045   copy_stack_slot(tos, -2, 0);
3046   copy_stack_slot(tos, -3, -1);
3047   copy_stack_slot(tos, 1, -2);
3048   copy_stack_slot(tos, 0, -3);
3049 }
3050 void BytecodeInterpreter::dup2_x2(intptr_t *tos) {
3051   /* insert top 2 slots four down */
3052   copy_stack_slot(tos, -1, 1);
3053   copy_stack_slot(tos, -2, 0);
3054   copy_stack_slot(tos, -3, -1);
3055   copy_stack_slot(tos, -4, -2);
3056   copy_stack_slot(tos, 1, -3);
3057   copy_stack_slot(tos, 0, -4);
3058 }
3059 
3060 
3061 void BytecodeInterpreter::swap(intptr_t *tos) {
3062   // swap top two elements
3063   intptr_t val = tos[Interpreter::expr_index_at(1)];
3064   // Copy -2 entry to -1
3065   copy_stack_slot(tos, -2, -1);
3066   // Store saved -1 entry into -2
3067   tos[Interpreter::expr_index_at(2)] = val;
3068 }
3069 // --------------------------------------------------------------------------------
3070 // Non-product code
3071 #ifndef PRODUCT
3072 
3073 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) {
3074   switch (msg) {
3075      case BytecodeInterpreter::no_request:  return("no_request");
3076      case BytecodeInterpreter::initialize:  return("initialize");
3077      // status message to C++ interpreter
3078      case BytecodeInterpreter::method_entry:  return("method_entry");
3079      case BytecodeInterpreter::method_resume:  return("method_resume");
3080      case BytecodeInterpreter::got_monitors:  return("got_monitors");
3081      case BytecodeInterpreter::rethrow_exception:  return("rethrow_exception");
3082      // requests to frame manager from C++ interpreter
3083      case BytecodeInterpreter::call_method:  return("call_method");
3084      case BytecodeInterpreter::return_from_method:  return("return_from_method");
3085      case BytecodeInterpreter::more_monitors:  return("more_monitors");
3086      case BytecodeInterpreter::throwing_exception:  return("throwing_exception");
3087      case BytecodeInterpreter::popping_frame:  return("popping_frame");
3088      case BytecodeInterpreter::do_osr:  return("do_osr");
3089      // deopt
3090      case BytecodeInterpreter::deopt_resume:  return("deopt_resume");
3091      case BytecodeInterpreter::deopt_resume2:  return("deopt_resume2");
3092      default: return("BAD MSG");
3093   }
3094 }
3095 void
3096 BytecodeInterpreter::print() {
3097   tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread);
3098   tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp);
3099   tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals);
3100   tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants);
3101   {
3102     ResourceMark rm;
3103     char *method_name = _method->name_and_sig_as_C_string();
3104     tty->print_cr("method: " INTPTR_FORMAT "[ %s ]",  (uintptr_t) this->_method, method_name);
3105   }
3106   tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx);
3107   tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack);
3108   tty->print_cr("msg: %s", C_msg(this->_msg));
3109   tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee);
3110   tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point);
3111   tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance);
3112   tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf);
3113   tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry);
3114   tty->print_cr("result_return_kind 0x%x ", (int) this->_result._return_kind);
3115   tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link);
3116   tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) this->_oop_temp);
3117   tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base);
3118   tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit);
3119   tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base);
3120 #ifdef SPARC
3121   tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc);
3122   tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom);
3123   tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult);
3124   tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult);
3125 #endif
3126 #if !defined(ZERO)
3127   tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp);
3128 #endif // !ZERO
3129   tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link);
3130 }
3131 
3132 extern "C" {
3133     void PI(uintptr_t arg) {
3134         ((BytecodeInterpreter*)arg)->print();
3135     }
3136 }
3137 #endif // PRODUCT
3138 
3139 #endif // JVMTI
3140 #endif // CC_INTERP