1 /* 2 * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "classfile/vmSymbols.hpp" 27 #include "gc_interface/collectedHeap.hpp" 28 #include "interpreter/bytecodeHistogram.hpp" 29 #include "interpreter/bytecodeInterpreter.hpp" 30 #include "interpreter/bytecodeInterpreter.inline.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "interpreter/interpreterRuntime.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "oops/methodCounters.hpp" 35 #include "oops/objArrayKlass.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/frame.inline.hpp" 40 #include "runtime/handles.inline.hpp" 41 #include "runtime/interfaceSupport.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/threadCritical.hpp" 44 #include "utilities/exceptions.hpp" 45 #ifdef TARGET_OS_ARCH_linux_x86 46 # include "orderAccess_linux_x86.inline.hpp" 47 #endif 48 #ifdef TARGET_OS_ARCH_linux_sparc 49 # include "orderAccess_linux_sparc.inline.hpp" 50 #endif 51 #ifdef TARGET_OS_ARCH_linux_zero 52 # include "orderAccess_linux_zero.inline.hpp" 53 #endif 54 #ifdef TARGET_OS_ARCH_solaris_x86 55 # include "orderAccess_solaris_x86.inline.hpp" 56 #endif 57 #ifdef TARGET_OS_ARCH_solaris_sparc 58 # include "orderAccess_solaris_sparc.inline.hpp" 59 #endif 60 #ifdef TARGET_OS_ARCH_windows_x86 61 # include "orderAccess_windows_x86.inline.hpp" 62 #endif 63 #ifdef TARGET_OS_ARCH_linux_arm 64 # include "orderAccess_linux_arm.inline.hpp" 65 #endif 66 #ifdef TARGET_OS_ARCH_linux_ppc 67 # include "orderAccess_linux_ppc.inline.hpp" 68 #endif 69 #ifdef TARGET_OS_ARCH_bsd_x86 70 # include "orderAccess_bsd_x86.inline.hpp" 71 #endif 72 #ifdef TARGET_OS_ARCH_bsd_zero 73 # include "orderAccess_bsd_zero.inline.hpp" 74 #endif 75 76 77 // no precompiled headers 78 #ifdef CC_INTERP 79 80 /* 81 * USELABELS - If using GCC, then use labels for the opcode dispatching 82 * rather -then a switch statement. This improves performance because it 83 * gives us the oportunity to have the instructions that calculate the 84 * next opcode to jump to be intermixed with the rest of the instructions 85 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro). 86 */ 87 #undef USELABELS 88 #ifdef __GNUC__ 89 /* 90 ASSERT signifies debugging. It is much easier to step thru bytecodes if we 91 don't use the computed goto approach. 92 */ 93 #ifndef ASSERT 94 #define USELABELS 95 #endif 96 #endif 97 98 #undef CASE 99 #ifdef USELABELS 100 #define CASE(opcode) opc ## opcode 101 #define DEFAULT opc_default 102 #else 103 #define CASE(opcode) case Bytecodes:: opcode 104 #define DEFAULT default 105 #endif 106 107 /* 108 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next 109 * opcode before going back to the top of the while loop, rather then having 110 * the top of the while loop handle it. This provides a better opportunity 111 * for instruction scheduling. Some compilers just do this prefetch 112 * automatically. Some actually end up with worse performance if you 113 * force the prefetch. Solaris gcc seems to do better, but cc does worse. 114 */ 115 #undef PREFETCH_OPCCODE 116 #define PREFETCH_OPCCODE 117 118 /* 119 Interpreter safepoint: it is expected that the interpreter will have no live 120 handles of its own creation live at an interpreter safepoint. Therefore we 121 run a HandleMarkCleaner and trash all handles allocated in the call chain 122 since the JavaCalls::call_helper invocation that initiated the chain. 123 There really shouldn't be any handles remaining to trash but this is cheap 124 in relation to a safepoint. 125 */ 126 #define SAFEPOINT \ 127 if ( SafepointSynchronize::is_synchronizing()) { \ 128 { \ 129 /* zap freed handles rather than GC'ing them */ \ 130 HandleMarkCleaner __hmc(THREAD); \ 131 } \ 132 CALL_VM(SafepointSynchronize::block(THREAD), handle_exception); \ 133 } 134 135 /* 136 * VM_JAVA_ERROR - Macro for throwing a java exception from 137 * the interpreter loop. Should really be a CALL_VM but there 138 * is no entry point to do the transition to vm so we just 139 * do it by hand here. 140 */ 141 #define VM_JAVA_ERROR_NO_JUMP(name, msg) \ 142 DECACHE_STATE(); \ 143 SET_LAST_JAVA_FRAME(); \ 144 { \ 145 ThreadInVMfromJava trans(THREAD); \ 146 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \ 147 } \ 148 RESET_LAST_JAVA_FRAME(); \ 149 CACHE_STATE(); 150 151 // Normal throw of a java error 152 #define VM_JAVA_ERROR(name, msg) \ 153 VM_JAVA_ERROR_NO_JUMP(name, msg) \ 154 goto handle_exception; 155 156 #ifdef PRODUCT 157 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) 158 #else 159 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \ 160 { \ 161 BytecodeCounter::_counter_value++; \ 162 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \ 163 if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \ 164 if (TraceBytecodes) { \ 165 CALL_VM((void)SharedRuntime::trace_bytecode(THREAD, 0, \ 166 topOfStack[Interpreter::expr_index_at(1)], \ 167 topOfStack[Interpreter::expr_index_at(2)]), \ 168 handle_exception); \ 169 } \ 170 } 171 #endif 172 173 #undef DEBUGGER_SINGLE_STEP_NOTIFY 174 #ifdef VM_JVMTI 175 /* NOTE: (kbr) This macro must be called AFTER the PC has been 176 incremented. JvmtiExport::at_single_stepping_point() may cause a 177 breakpoint opcode to get inserted at the current PC to allow the 178 debugger to coalesce single-step events. 179 180 As a result if we call at_single_stepping_point() we refetch opcode 181 to get the current opcode. This will override any other prefetching 182 that might have occurred. 183 */ 184 #define DEBUGGER_SINGLE_STEP_NOTIFY() \ 185 { \ 186 if (_jvmti_interp_events) { \ 187 if (JvmtiExport::should_post_single_step()) { \ 188 DECACHE_STATE(); \ 189 SET_LAST_JAVA_FRAME(); \ 190 ThreadInVMfromJava trans(THREAD); \ 191 JvmtiExport::at_single_stepping_point(THREAD, \ 192 istate->method(), \ 193 pc); \ 194 RESET_LAST_JAVA_FRAME(); \ 195 CACHE_STATE(); \ 196 if (THREAD->pop_frame_pending() && \ 197 !THREAD->pop_frame_in_process()) { \ 198 goto handle_Pop_Frame; \ 199 } \ 200 opcode = *pc; \ 201 } \ 202 } \ 203 } 204 #else 205 #define DEBUGGER_SINGLE_STEP_NOTIFY() 206 #endif 207 208 /* 209 * CONTINUE - Macro for executing the next opcode. 210 */ 211 #undef CONTINUE 212 #ifdef USELABELS 213 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an 214 // initialization (which is is the initialization of the table pointer...) 215 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode] 216 #define CONTINUE { \ 217 opcode = *pc; \ 218 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 219 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 220 DISPATCH(opcode); \ 221 } 222 #else 223 #ifdef PREFETCH_OPCCODE 224 #define CONTINUE { \ 225 opcode = *pc; \ 226 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 227 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 228 continue; \ 229 } 230 #else 231 #define CONTINUE { \ 232 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 233 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 234 continue; \ 235 } 236 #endif 237 #endif 238 239 240 #define UPDATE_PC(opsize) {pc += opsize; } 241 /* 242 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack. 243 */ 244 #undef UPDATE_PC_AND_TOS 245 #define UPDATE_PC_AND_TOS(opsize, stack) \ 246 {pc += opsize; MORE_STACK(stack); } 247 248 /* 249 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack, 250 * and executing the next opcode. It's somewhat similar to the combination 251 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations. 252 */ 253 #undef UPDATE_PC_AND_TOS_AND_CONTINUE 254 #ifdef USELABELS 255 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 256 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 257 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 258 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 259 DISPATCH(opcode); \ 260 } 261 262 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 263 pc += opsize; opcode = *pc; \ 264 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 265 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 266 DISPATCH(opcode); \ 267 } 268 #else 269 #ifdef PREFETCH_OPCCODE 270 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 271 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 272 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 273 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 274 goto do_continue; \ 275 } 276 277 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 278 pc += opsize; opcode = *pc; \ 279 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 280 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 281 goto do_continue; \ 282 } 283 #else 284 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 285 pc += opsize; MORE_STACK(stack); \ 286 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 287 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 288 goto do_continue; \ 289 } 290 291 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 292 pc += opsize; \ 293 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 294 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 295 goto do_continue; \ 296 } 297 #endif /* PREFETCH_OPCCODE */ 298 #endif /* USELABELS */ 299 300 // About to call a new method, update the save the adjusted pc and return to frame manager 301 #define UPDATE_PC_AND_RETURN(opsize) \ 302 DECACHE_TOS(); \ 303 istate->set_bcp(pc+opsize); \ 304 return; 305 306 307 #define METHOD istate->method() 308 #define GET_METHOD_COUNTERS(res) \ 309 res = METHOD->method_counters(); \ 310 if (res == NULL) { \ 311 CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \ 312 } 313 314 #define OSR_REQUEST(res, branch_pc) \ 315 CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception); 316 /* 317 * For those opcodes that need to have a GC point on a backwards branch 318 */ 319 320 // Backedge counting is kind of strange. The asm interpreter will increment 321 // the backedge counter as a separate counter but it does it's comparisons 322 // to the sum (scaled) of invocation counter and backedge count to make 323 // a decision. Seems kind of odd to sum them together like that 324 325 // skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp 326 327 328 #define DO_BACKEDGE_CHECKS(skip, branch_pc) \ 329 if ((skip) <= 0) { \ 330 MethodCounters* mcs; \ 331 GET_METHOD_COUNTERS(mcs); \ 332 if (UseLoopCounter) { \ 333 bool do_OSR = UseOnStackReplacement; \ 334 mcs->backedge_counter()->increment(); \ 335 if (do_OSR) do_OSR = mcs->backedge_counter()->reached_InvocationLimit(); \ 336 if (do_OSR) { \ 337 nmethod* osr_nmethod; \ 338 OSR_REQUEST(osr_nmethod, branch_pc); \ 339 if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) { \ 340 intptr_t* buf = SharedRuntime::OSR_migration_begin(THREAD); \ 341 istate->set_msg(do_osr); \ 342 istate->set_osr_buf((address)buf); \ 343 istate->set_osr_entry(osr_nmethod->osr_entry()); \ 344 return; \ 345 } \ 346 } \ 347 } /* UseCompiler ... */ \ 348 mcs->invocation_counter()->increment(); \ 349 SAFEPOINT; \ 350 } 351 352 /* 353 * For those opcodes that need to have a GC point on a backwards branch 354 */ 355 356 /* 357 * Macros for caching and flushing the interpreter state. Some local 358 * variables need to be flushed out to the frame before we do certain 359 * things (like pushing frames or becomming gc safe) and some need to 360 * be recached later (like after popping a frame). We could use one 361 * macro to cache or decache everything, but this would be less then 362 * optimal because we don't always need to cache or decache everything 363 * because some things we know are already cached or decached. 364 */ 365 #undef DECACHE_TOS 366 #undef CACHE_TOS 367 #undef CACHE_PREV_TOS 368 #define DECACHE_TOS() istate->set_stack(topOfStack); 369 370 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack(); 371 372 #undef DECACHE_PC 373 #undef CACHE_PC 374 #define DECACHE_PC() istate->set_bcp(pc); 375 #define CACHE_PC() pc = istate->bcp(); 376 #define CACHE_CP() cp = istate->constants(); 377 #define CACHE_LOCALS() locals = istate->locals(); 378 #undef CACHE_FRAME 379 #define CACHE_FRAME() 380 381 /* 382 * CHECK_NULL - Macro for throwing a NullPointerException if the object 383 * passed is a null ref. 384 * On some architectures/platforms it should be possible to do this implicitly 385 */ 386 #undef CHECK_NULL 387 #define CHECK_NULL(obj_) \ 388 if ((obj_) == NULL) { \ 389 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), ""); \ 390 } \ 391 VERIFY_OOP(obj_) 392 393 #define VMdoubleConstZero() 0.0 394 #define VMdoubleConstOne() 1.0 395 #define VMlongConstZero() (max_jlong-max_jlong) 396 #define VMlongConstOne() ((max_jlong-max_jlong)+1) 397 398 /* 399 * Alignment 400 */ 401 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3) 402 403 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod) 404 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS(); 405 406 // Reload interpreter state after calling the VM or a possible GC 407 #define CACHE_STATE() \ 408 CACHE_TOS(); \ 409 CACHE_PC(); \ 410 CACHE_CP(); \ 411 CACHE_LOCALS(); 412 413 // Call the VM don't check for pending exceptions 414 #define CALL_VM_NOCHECK(func) \ 415 DECACHE_STATE(); \ 416 SET_LAST_JAVA_FRAME(); \ 417 func; \ 418 RESET_LAST_JAVA_FRAME(); \ 419 CACHE_STATE(); \ 420 if (THREAD->pop_frame_pending() && \ 421 !THREAD->pop_frame_in_process()) { \ 422 goto handle_Pop_Frame; \ 423 } 424 425 // Call the VM and check for pending exceptions 426 #define CALL_VM(func, label) { \ 427 CALL_VM_NOCHECK(func); \ 428 if (THREAD->has_pending_exception()) goto label; \ 429 } 430 431 /* 432 * BytecodeInterpreter::run(interpreterState istate) 433 * BytecodeInterpreter::runWithChecks(interpreterState istate) 434 * 435 * The real deal. This is where byte codes actually get interpreted. 436 * Basically it's a big while loop that iterates until we return from 437 * the method passed in. 438 * 439 * The runWithChecks is used if JVMTI is enabled. 440 * 441 */ 442 #if defined(VM_JVMTI) 443 void 444 BytecodeInterpreter::runWithChecks(interpreterState istate) { 445 #else 446 void 447 BytecodeInterpreter::run(interpreterState istate) { 448 #endif 449 450 // In order to simplify some tests based on switches set at runtime 451 // we invoke the interpreter a single time after switches are enabled 452 // and set simpler to to test variables rather than method calls or complex 453 // boolean expressions. 454 455 static int initialized = 0; 456 static int checkit = 0; 457 static intptr_t* c_addr = NULL; 458 static intptr_t c_value; 459 460 if (checkit && *c_addr != c_value) { 461 os::breakpoint(); 462 } 463 #ifdef VM_JVMTI 464 static bool _jvmti_interp_events = 0; 465 #endif 466 467 static int _compiling; // (UseCompiler || CountCompiledCalls) 468 469 #ifdef ASSERT 470 if (istate->_msg != initialize) { 471 // We have a problem here if we are running with a pre-hsx24 JDK (for example during bootstrap) 472 // because in that case, EnableInvokeDynamic is true by default but will be later switched off 473 // if java_lang_invoke_MethodHandle::compute_offsets() detects that the JDK only has the classes 474 // for the old JSR292 implementation. 475 // This leads to a situation where 'istate->_stack_limit' always accounts for 476 // methodOopDesc::extra_stack_entries() because it is computed in 477 // CppInterpreterGenerator::generate_compute_interpreter_state() which was generated while 478 // EnableInvokeDynamic was still true. On the other hand, istate->_method->max_stack() doesn't 479 // account for extra_stack_entries() anymore because at the time when it is called 480 // EnableInvokeDynamic was already set to false. 481 // So we have a second version of the assertion which handles the case where EnableInvokeDynamic was 482 // switched off because of the wrong classes. 483 if (EnableInvokeDynamic || FLAG_IS_CMDLINE(EnableInvokeDynamic)) { 484 assert(abs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit"); 485 } else { 486 const int extra_stack_entries = Method::extra_stack_entries_for_indy; 487 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + extra_stack_entries 488 + 1), "bad stack limit"); 489 } 490 #ifndef SHARK 491 IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong")); 492 #endif // !SHARK 493 } 494 // Verify linkages. 495 interpreterState l = istate; 496 do { 497 assert(l == l->_self_link, "bad link"); 498 l = l->_prev_link; 499 } while (l != NULL); 500 // Screwups with stack management usually cause us to overwrite istate 501 // save a copy so we can verify it. 502 interpreterState orig = istate; 503 #endif 504 505 register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */ 506 register address pc = istate->bcp(); 507 register jubyte opcode; 508 register intptr_t* locals = istate->locals(); 509 register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache() 510 #ifdef LOTS_OF_REGS 511 register JavaThread* THREAD = istate->thread(); 512 #else 513 #undef THREAD 514 #define THREAD istate->thread() 515 #endif 516 517 #ifdef USELABELS 518 const static void* const opclabels_data[256] = { 519 /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0, 520 /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4, 521 /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0, 522 /* 0x0C */ &&opc_fconst_1,&&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1, 523 524 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w, 525 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload, 526 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0,&&opc_iload_1, 527 /* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1, 528 529 /* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1, 530 /* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1, 531 /* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1, 532 /* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload, 533 534 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload, 535 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore, 536 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0, 537 /* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0, 538 539 /* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0, 540 /* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0, 541 /* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0, 542 /* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore, 543 544 /* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore, 545 /* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop, 546 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2, 547 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap, 548 549 /* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd, 550 /* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub, 551 /* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul, 552 /* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv, 553 554 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem, 555 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg, 556 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr, 557 /* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land, 558 559 /* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor, 560 /* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d, 561 /* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i, 562 /* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l, 563 564 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s, 565 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl, 566 /* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt, 567 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq, 568 569 /* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge, &&opc_if_icmpgt, 570 /* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne, &&opc_goto, 571 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch,&&opc_lookupswitch, 572 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn, 573 574 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic, 575 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial, 576 /* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new, 577 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow, 578 579 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit, 580 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull, 581 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_default, 582 /* 0xCC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 583 584 /* 0xD0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 585 /* 0xD4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 586 /* 0xD8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 587 /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 588 589 /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 590 /* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer, 591 /* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default, 592 /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 593 594 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 595 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 596 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 597 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default 598 }; 599 register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0]; 600 #endif /* USELABELS */ 601 602 #ifdef ASSERT 603 // this will trigger a VERIFY_OOP on entry 604 if (istate->msg() != initialize && ! METHOD->is_static()) { 605 oop rcvr = LOCALS_OBJECT(0); 606 VERIFY_OOP(rcvr); 607 } 608 #endif 609 // #define HACK 610 #ifdef HACK 611 bool interesting = false; 612 #endif // HACK 613 614 /* QQQ this should be a stack method so we don't know actual direction */ 615 guarantee(istate->msg() == initialize || 616 topOfStack >= istate->stack_limit() && 617 topOfStack < istate->stack_base(), 618 "Stack top out of range"); 619 620 switch (istate->msg()) { 621 case initialize: { 622 if (initialized++) ShouldNotReachHere(); // Only one initialize call 623 _compiling = (UseCompiler || CountCompiledCalls); 624 #ifdef VM_JVMTI 625 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events(); 626 #endif 627 return; 628 } 629 break; 630 case method_entry: { 631 THREAD->set_do_not_unlock(); 632 // count invocations 633 assert(initialized, "Interpreter not initialized"); 634 if (_compiling) { 635 MethodCounters* mcs; 636 GET_METHOD_COUNTERS(mcs); 637 if (ProfileInterpreter) { 638 METHOD->increment_interpreter_invocation_count(THREAD); 639 } 640 mcs->invocation_counter()->increment(); 641 if (mcs->invocation_counter()->reached_InvocationLimit()) { 642 CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception); 643 644 // We no longer retry on a counter overflow 645 646 // istate->set_msg(retry_method); 647 // THREAD->clr_do_not_unlock(); 648 // return; 649 } 650 SAFEPOINT; 651 } 652 653 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 654 // initialize 655 os::breakpoint(); 656 } 657 658 #ifdef HACK 659 { 660 ResourceMark rm; 661 char *method_name = istate->method()->name_and_sig_as_C_string(); 662 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { 663 tty->print_cr("entering: depth %d bci: %d", 664 (istate->_stack_base - istate->_stack), 665 istate->_bcp - istate->_method->code_base()); 666 interesting = true; 667 } 668 } 669 #endif // HACK 670 671 672 // lock method if synchronized 673 if (METHOD->is_synchronized()) { 674 // oop rcvr = locals[0].j.r; 675 oop rcvr; 676 if (METHOD->is_static()) { 677 rcvr = METHOD->constants()->pool_holder()->java_mirror(); 678 } else { 679 rcvr = LOCALS_OBJECT(0); 680 VERIFY_OOP(rcvr); 681 } 682 // The initial monitor is ours for the taking 683 // Monitor not filled in frame manager any longer as this caused race condition with biased locking. 684 BasicObjectLock* mon = &istate->monitor_base()[-1]; 685 mon->set_obj(rcvr); 686 bool success = false; 687 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 688 markOop mark = rcvr->mark(); 689 intptr_t hash = (intptr_t) markOopDesc::no_hash; 690 // Implies UseBiasedLocking. 691 if (mark->has_bias_pattern()) { 692 uintptr_t thread_ident; 693 uintptr_t anticipated_bias_locking_value; 694 thread_ident = (uintptr_t)istate->thread(); 695 anticipated_bias_locking_value = 696 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 697 ~((uintptr_t) markOopDesc::age_mask_in_place); 698 699 if (anticipated_bias_locking_value == 0) { 700 // Already biased towards this thread, nothing to do. 701 if (PrintBiasedLockingStatistics) { 702 (* BiasedLocking::biased_lock_entry_count_addr())++; 703 } 704 success = true; 705 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 706 // Try to revoke bias. 707 markOop header = rcvr->klass()->prototype_header(); 708 if (hash != markOopDesc::no_hash) { 709 header = header->copy_set_hash(hash); 710 } 711 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) { 712 if (PrintBiasedLockingStatistics) 713 (*BiasedLocking::revoked_lock_entry_count_addr())++; 714 } 715 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) { 716 // Try to rebias. 717 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident); 718 if (hash != markOopDesc::no_hash) { 719 new_header = new_header->copy_set_hash(hash); 720 } 721 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) { 722 if (PrintBiasedLockingStatistics) { 723 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 724 } 725 } else { 726 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 727 } 728 success = true; 729 } else { 730 // Try to bias towards thread in case object is anonymously biased. 731 markOop header = (markOop) ((uintptr_t) mark & 732 ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 733 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); 734 if (hash != markOopDesc::no_hash) { 735 header = header->copy_set_hash(hash); 736 } 737 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 738 // Debugging hint. 739 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 740 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) { 741 if (PrintBiasedLockingStatistics) { 742 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 743 } 744 } else { 745 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 746 } 747 success = true; 748 } 749 } 750 751 // Traditional lightweight locking. 752 if (!success) { 753 markOop displaced = rcvr->mark()->set_unlocked(); 754 mon->lock()->set_displaced_header(displaced); 755 bool call_vm = UseHeavyMonitors; 756 if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) { 757 // Is it simple recursive case? 758 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 759 mon->lock()->set_displaced_header(NULL); 760 } else { 761 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 762 } 763 } 764 } 765 } 766 THREAD->clr_do_not_unlock(); 767 768 // Notify jvmti 769 #ifdef VM_JVMTI 770 if (_jvmti_interp_events) { 771 // Whenever JVMTI puts a thread in interp_only_mode, method 772 // entry/exit events are sent for that thread to track stack depth. 773 if (THREAD->is_interp_only_mode()) { 774 CALL_VM(InterpreterRuntime::post_method_entry(THREAD), 775 handle_exception); 776 } 777 } 778 #endif /* VM_JVMTI */ 779 780 goto run; 781 } 782 783 case popping_frame: { 784 // returned from a java call to pop the frame, restart the call 785 // clear the message so we don't confuse ourselves later 786 ShouldNotReachHere(); // we don't return this. 787 assert(THREAD->pop_frame_in_process(), "wrong frame pop state"); 788 istate->set_msg(no_request); 789 THREAD->clr_pop_frame_in_process(); 790 goto run; 791 } 792 793 case method_resume: { 794 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 795 // resume 796 os::breakpoint(); 797 } 798 #ifdef HACK 799 { 800 ResourceMark rm; 801 char *method_name = istate->method()->name_and_sig_as_C_string(); 802 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { 803 tty->print_cr("resume: depth %d bci: %d", 804 (istate->_stack_base - istate->_stack) , 805 istate->_bcp - istate->_method->code_base()); 806 interesting = true; 807 } 808 } 809 #endif // HACK 810 // returned from a java call, continue executing. 811 if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) { 812 goto handle_Pop_Frame; 813 } 814 815 if (THREAD->has_pending_exception()) goto handle_exception; 816 // Update the pc by the saved amount of the invoke bytecode size 817 UPDATE_PC(istate->bcp_advance()); 818 goto run; 819 } 820 821 case deopt_resume2: { 822 // Returned from an opcode that will reexecute. Deopt was 823 // a result of a PopFrame request. 824 // 825 goto run; 826 } 827 828 case deopt_resume: { 829 // Returned from an opcode that has completed. The stack has 830 // the result all we need to do is skip across the bytecode 831 // and continue (assuming there is no exception pending) 832 // 833 // compute continuation length 834 // 835 // Note: it is possible to deopt at a return_register_finalizer opcode 836 // because this requires entering the vm to do the registering. While the 837 // opcode is complete we can't advance because there are no more opcodes 838 // much like trying to deopt at a poll return. In that has we simply 839 // get out of here 840 // 841 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) { 842 // this will do the right thing even if an exception is pending. 843 goto handle_return; 844 } 845 UPDATE_PC(Bytecodes::length_at(METHOD, pc)); 846 if (THREAD->has_pending_exception()) goto handle_exception; 847 goto run; 848 } 849 case got_monitors: { 850 // continue locking now that we have a monitor to use 851 // we expect to find newly allocated monitor at the "top" of the monitor stack. 852 oop lockee = STACK_OBJECT(-1); 853 VERIFY_OOP(lockee); 854 // derefing's lockee ought to provoke implicit null check 855 // find a free monitor 856 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base(); 857 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor"); 858 entry->set_obj(lockee); 859 bool success = false; 860 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 861 862 markOop mark = lockee->mark(); 863 intptr_t hash = (intptr_t) markOopDesc::no_hash; 864 // implies UseBiasedLocking 865 if (mark->has_bias_pattern()) { 866 uintptr_t thread_ident; 867 uintptr_t anticipated_bias_locking_value; 868 thread_ident = (uintptr_t)istate->thread(); 869 anticipated_bias_locking_value = 870 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 871 ~((uintptr_t) markOopDesc::age_mask_in_place); 872 873 if (anticipated_bias_locking_value == 0) { 874 // already biased towards this thread, nothing to do 875 if (PrintBiasedLockingStatistics) { 876 (* BiasedLocking::biased_lock_entry_count_addr())++; 877 } 878 success = true; 879 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 880 // try revoke bias 881 markOop header = lockee->klass()->prototype_header(); 882 if (hash != markOopDesc::no_hash) { 883 header = header->copy_set_hash(hash); 884 } 885 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { 886 if (PrintBiasedLockingStatistics) { 887 (*BiasedLocking::revoked_lock_entry_count_addr())++; 888 } 889 } 890 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { 891 // try rebias 892 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); 893 if (hash != markOopDesc::no_hash) { 894 new_header = new_header->copy_set_hash(hash); 895 } 896 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { 897 if (PrintBiasedLockingStatistics) { 898 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 899 } 900 } else { 901 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 902 } 903 success = true; 904 } else { 905 // try to bias towards thread in case object is anonymously biased 906 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 907 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); 908 if (hash != markOopDesc::no_hash) { 909 header = header->copy_set_hash(hash); 910 } 911 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 912 // debugging hint 913 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 914 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { 915 if (PrintBiasedLockingStatistics) { 916 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 917 } 918 } else { 919 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 920 } 921 success = true; 922 } 923 } 924 925 // traditional lightweight locking 926 if (!success) { 927 markOop displaced = lockee->mark()->set_unlocked(); 928 entry->lock()->set_displaced_header(displaced); 929 bool call_vm = UseHeavyMonitors; 930 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { 931 // Is it simple recursive case? 932 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 933 entry->lock()->set_displaced_header(NULL); 934 } else { 935 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 936 } 937 } 938 } 939 UPDATE_PC_AND_TOS(1, -1); 940 goto run; 941 } 942 default: { 943 fatal("Unexpected message from frame manager"); 944 } 945 } 946 947 run: 948 949 DO_UPDATE_INSTRUCTION_COUNT(*pc) 950 DEBUGGER_SINGLE_STEP_NOTIFY(); 951 #ifdef PREFETCH_OPCCODE 952 opcode = *pc; /* prefetch first opcode */ 953 #endif 954 955 #ifndef USELABELS 956 while (1) 957 #endif 958 { 959 #ifndef PREFETCH_OPCCODE 960 opcode = *pc; 961 #endif 962 // Seems like this happens twice per opcode. At worst this is only 963 // need at entry to the loop. 964 // DEBUGGER_SINGLE_STEP_NOTIFY(); 965 /* Using this labels avoids double breakpoints when quickening and 966 * when returing from transition frames. 967 */ 968 opcode_switch: 969 assert(istate == orig, "Corrupted istate"); 970 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */ 971 assert(topOfStack >= istate->stack_limit(), "Stack overrun"); 972 assert(topOfStack < istate->stack_base(), "Stack underrun"); 973 974 #ifdef USELABELS 975 DISPATCH(opcode); 976 #else 977 switch (opcode) 978 #endif 979 { 980 CASE(_nop): 981 UPDATE_PC_AND_CONTINUE(1); 982 983 /* Push miscellaneous constants onto the stack. */ 984 985 CASE(_aconst_null): 986 SET_STACK_OBJECT(NULL, 0); 987 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 988 989 #undef OPC_CONST_n 990 #define OPC_CONST_n(opcode, const_type, value) \ 991 CASE(opcode): \ 992 SET_STACK_ ## const_type(value, 0); \ 993 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 994 995 OPC_CONST_n(_iconst_m1, INT, -1); 996 OPC_CONST_n(_iconst_0, INT, 0); 997 OPC_CONST_n(_iconst_1, INT, 1); 998 OPC_CONST_n(_iconst_2, INT, 2); 999 OPC_CONST_n(_iconst_3, INT, 3); 1000 OPC_CONST_n(_iconst_4, INT, 4); 1001 OPC_CONST_n(_iconst_5, INT, 5); 1002 OPC_CONST_n(_fconst_0, FLOAT, 0.0); 1003 OPC_CONST_n(_fconst_1, FLOAT, 1.0); 1004 OPC_CONST_n(_fconst_2, FLOAT, 2.0); 1005 1006 #undef OPC_CONST2_n 1007 #define OPC_CONST2_n(opcname, value, key, kind) \ 1008 CASE(_##opcname): \ 1009 { \ 1010 SET_STACK_ ## kind(VM##key##Const##value(), 1); \ 1011 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1012 } 1013 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE); 1014 OPC_CONST2_n(dconst_1, One, double, DOUBLE); 1015 OPC_CONST2_n(lconst_0, Zero, long, LONG); 1016 OPC_CONST2_n(lconst_1, One, long, LONG); 1017 1018 /* Load constant from constant pool: */ 1019 1020 /* Push a 1-byte signed integer value onto the stack. */ 1021 CASE(_bipush): 1022 SET_STACK_INT((jbyte)(pc[1]), 0); 1023 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1024 1025 /* Push a 2-byte signed integer constant onto the stack. */ 1026 CASE(_sipush): 1027 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0); 1028 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 1029 1030 /* load from local variable */ 1031 1032 CASE(_aload): 1033 VERIFY_OOP(LOCALS_OBJECT(pc[1])); 1034 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0); 1035 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1036 1037 CASE(_iload): 1038 CASE(_fload): 1039 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0); 1040 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1041 1042 CASE(_lload): 1043 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1); 1044 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1045 1046 CASE(_dload): 1047 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1); 1048 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1049 1050 #undef OPC_LOAD_n 1051 #define OPC_LOAD_n(num) \ 1052 CASE(_aload_##num): \ 1053 VERIFY_OOP(LOCALS_OBJECT(num)); \ 1054 SET_STACK_OBJECT(LOCALS_OBJECT(num), 0); \ 1055 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1056 \ 1057 CASE(_iload_##num): \ 1058 CASE(_fload_##num): \ 1059 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \ 1060 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1061 \ 1062 CASE(_lload_##num): \ 1063 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \ 1064 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1065 CASE(_dload_##num): \ 1066 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \ 1067 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1068 1069 OPC_LOAD_n(0); 1070 OPC_LOAD_n(1); 1071 OPC_LOAD_n(2); 1072 OPC_LOAD_n(3); 1073 1074 /* store to a local variable */ 1075 1076 CASE(_astore): 1077 astore(topOfStack, -1, locals, pc[1]); 1078 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1079 1080 CASE(_istore): 1081 CASE(_fstore): 1082 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]); 1083 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1084 1085 CASE(_lstore): 1086 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]); 1087 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1088 1089 CASE(_dstore): 1090 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]); 1091 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1092 1093 CASE(_wide): { 1094 uint16_t reg = Bytes::get_Java_u2(pc + 2); 1095 1096 opcode = pc[1]; 1097 switch(opcode) { 1098 case Bytecodes::_aload: 1099 VERIFY_OOP(LOCALS_OBJECT(reg)); 1100 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0); 1101 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1102 1103 case Bytecodes::_iload: 1104 case Bytecodes::_fload: 1105 SET_STACK_SLOT(LOCALS_SLOT(reg), 0); 1106 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1107 1108 case Bytecodes::_lload: 1109 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1110 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1111 1112 case Bytecodes::_dload: 1113 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1114 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1115 1116 case Bytecodes::_astore: 1117 astore(topOfStack, -1, locals, reg); 1118 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1119 1120 case Bytecodes::_istore: 1121 case Bytecodes::_fstore: 1122 SET_LOCALS_SLOT(STACK_SLOT(-1), reg); 1123 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1124 1125 case Bytecodes::_lstore: 1126 SET_LOCALS_LONG(STACK_LONG(-1), reg); 1127 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1128 1129 case Bytecodes::_dstore: 1130 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg); 1131 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1132 1133 case Bytecodes::_iinc: { 1134 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4); 1135 // Be nice to see what this generates.... QQQ 1136 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg); 1137 UPDATE_PC_AND_CONTINUE(6); 1138 } 1139 case Bytecodes::_ret: 1140 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg)); 1141 UPDATE_PC_AND_CONTINUE(0); 1142 default: 1143 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode"); 1144 } 1145 } 1146 1147 1148 #undef OPC_STORE_n 1149 #define OPC_STORE_n(num) \ 1150 CASE(_astore_##num): \ 1151 astore(topOfStack, -1, locals, num); \ 1152 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1153 CASE(_istore_##num): \ 1154 CASE(_fstore_##num): \ 1155 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \ 1156 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1157 1158 OPC_STORE_n(0); 1159 OPC_STORE_n(1); 1160 OPC_STORE_n(2); 1161 OPC_STORE_n(3); 1162 1163 #undef OPC_DSTORE_n 1164 #define OPC_DSTORE_n(num) \ 1165 CASE(_dstore_##num): \ 1166 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \ 1167 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1168 CASE(_lstore_##num): \ 1169 SET_LOCALS_LONG(STACK_LONG(-1), num); \ 1170 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1171 1172 OPC_DSTORE_n(0); 1173 OPC_DSTORE_n(1); 1174 OPC_DSTORE_n(2); 1175 OPC_DSTORE_n(3); 1176 1177 /* stack pop, dup, and insert opcodes */ 1178 1179 1180 CASE(_pop): /* Discard the top item on the stack */ 1181 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1182 1183 1184 CASE(_pop2): /* Discard the top 2 items on the stack */ 1185 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1186 1187 1188 CASE(_dup): /* Duplicate the top item on the stack */ 1189 dup(topOfStack); 1190 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1191 1192 CASE(_dup2): /* Duplicate the top 2 items on the stack */ 1193 dup2(topOfStack); 1194 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1195 1196 CASE(_dup_x1): /* insert top word two down */ 1197 dup_x1(topOfStack); 1198 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1199 1200 CASE(_dup_x2): /* insert top word three down */ 1201 dup_x2(topOfStack); 1202 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1203 1204 CASE(_dup2_x1): /* insert top 2 slots three down */ 1205 dup2_x1(topOfStack); 1206 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1207 1208 CASE(_dup2_x2): /* insert top 2 slots four down */ 1209 dup2_x2(topOfStack); 1210 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1211 1212 CASE(_swap): { /* swap top two elements on the stack */ 1213 swap(topOfStack); 1214 UPDATE_PC_AND_CONTINUE(1); 1215 } 1216 1217 /* Perform various binary integer operations */ 1218 1219 #undef OPC_INT_BINARY 1220 #define OPC_INT_BINARY(opcname, opname, test) \ 1221 CASE(_i##opcname): \ 1222 if (test && (STACK_INT(-1) == 0)) { \ 1223 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1224 "/ by zero"); \ 1225 } \ 1226 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1227 STACK_INT(-1)), \ 1228 -2); \ 1229 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1230 CASE(_l##opcname): \ 1231 { \ 1232 if (test) { \ 1233 jlong l1 = STACK_LONG(-1); \ 1234 if (VMlongEqz(l1)) { \ 1235 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1236 "/ by long zero"); \ 1237 } \ 1238 } \ 1239 /* First long at (-1,-2) next long at (-3,-4) */ \ 1240 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \ 1241 STACK_LONG(-1)), \ 1242 -3); \ 1243 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1244 } 1245 1246 OPC_INT_BINARY(add, Add, 0); 1247 OPC_INT_BINARY(sub, Sub, 0); 1248 OPC_INT_BINARY(mul, Mul, 0); 1249 OPC_INT_BINARY(and, And, 0); 1250 OPC_INT_BINARY(or, Or, 0); 1251 OPC_INT_BINARY(xor, Xor, 0); 1252 OPC_INT_BINARY(div, Div, 1); 1253 OPC_INT_BINARY(rem, Rem, 1); 1254 1255 1256 /* Perform various binary floating number operations */ 1257 /* On some machine/platforms/compilers div zero check can be implicit */ 1258 1259 #undef OPC_FLOAT_BINARY 1260 #define OPC_FLOAT_BINARY(opcname, opname) \ 1261 CASE(_d##opcname): { \ 1262 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \ 1263 STACK_DOUBLE(-1)), \ 1264 -3); \ 1265 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1266 } \ 1267 CASE(_f##opcname): \ 1268 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \ 1269 STACK_FLOAT(-1)), \ 1270 -2); \ 1271 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1272 1273 1274 OPC_FLOAT_BINARY(add, Add); 1275 OPC_FLOAT_BINARY(sub, Sub); 1276 OPC_FLOAT_BINARY(mul, Mul); 1277 OPC_FLOAT_BINARY(div, Div); 1278 OPC_FLOAT_BINARY(rem, Rem); 1279 1280 /* Shift operations 1281 * Shift left int and long: ishl, lshl 1282 * Logical shift right int and long w/zero extension: iushr, lushr 1283 * Arithmetic shift right int and long w/sign extension: ishr, lshr 1284 */ 1285 1286 #undef OPC_SHIFT_BINARY 1287 #define OPC_SHIFT_BINARY(opcname, opname) \ 1288 CASE(_i##opcname): \ 1289 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1290 STACK_INT(-1)), \ 1291 -2); \ 1292 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1293 CASE(_l##opcname): \ 1294 { \ 1295 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \ 1296 STACK_INT(-1)), \ 1297 -2); \ 1298 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1299 } 1300 1301 OPC_SHIFT_BINARY(shl, Shl); 1302 OPC_SHIFT_BINARY(shr, Shr); 1303 OPC_SHIFT_BINARY(ushr, Ushr); 1304 1305 /* Increment local variable by constant */ 1306 CASE(_iinc): 1307 { 1308 // locals[pc[1]].j.i += (jbyte)(pc[2]); 1309 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]); 1310 UPDATE_PC_AND_CONTINUE(3); 1311 } 1312 1313 /* negate the value on the top of the stack */ 1314 1315 CASE(_ineg): 1316 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1); 1317 UPDATE_PC_AND_CONTINUE(1); 1318 1319 CASE(_fneg): 1320 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1); 1321 UPDATE_PC_AND_CONTINUE(1); 1322 1323 CASE(_lneg): 1324 { 1325 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1); 1326 UPDATE_PC_AND_CONTINUE(1); 1327 } 1328 1329 CASE(_dneg): 1330 { 1331 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1); 1332 UPDATE_PC_AND_CONTINUE(1); 1333 } 1334 1335 /* Conversion operations */ 1336 1337 CASE(_i2f): /* convert top of stack int to float */ 1338 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1); 1339 UPDATE_PC_AND_CONTINUE(1); 1340 1341 CASE(_i2l): /* convert top of stack int to long */ 1342 { 1343 // this is ugly QQQ 1344 jlong r = VMint2Long(STACK_INT(-1)); 1345 MORE_STACK(-1); // Pop 1346 SET_STACK_LONG(r, 1); 1347 1348 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1349 } 1350 1351 CASE(_i2d): /* convert top of stack int to double */ 1352 { 1353 // this is ugly QQQ (why cast to jlong?? ) 1354 jdouble r = (jlong)STACK_INT(-1); 1355 MORE_STACK(-1); // Pop 1356 SET_STACK_DOUBLE(r, 1); 1357 1358 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1359 } 1360 1361 CASE(_l2i): /* convert top of stack long to int */ 1362 { 1363 jint r = VMlong2Int(STACK_LONG(-1)); 1364 MORE_STACK(-2); // Pop 1365 SET_STACK_INT(r, 0); 1366 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1367 } 1368 1369 CASE(_l2f): /* convert top of stack long to float */ 1370 { 1371 jlong r = STACK_LONG(-1); 1372 MORE_STACK(-2); // Pop 1373 SET_STACK_FLOAT(VMlong2Float(r), 0); 1374 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1375 } 1376 1377 CASE(_l2d): /* convert top of stack long to double */ 1378 { 1379 jlong r = STACK_LONG(-1); 1380 MORE_STACK(-2); // Pop 1381 SET_STACK_DOUBLE(VMlong2Double(r), 1); 1382 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1383 } 1384 1385 CASE(_f2i): /* Convert top of stack float to int */ 1386 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1); 1387 UPDATE_PC_AND_CONTINUE(1); 1388 1389 CASE(_f2l): /* convert top of stack float to long */ 1390 { 1391 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1)); 1392 MORE_STACK(-1); // POP 1393 SET_STACK_LONG(r, 1); 1394 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1395 } 1396 1397 CASE(_f2d): /* convert top of stack float to double */ 1398 { 1399 jfloat f; 1400 jdouble r; 1401 f = STACK_FLOAT(-1); 1402 r = (jdouble) f; 1403 MORE_STACK(-1); // POP 1404 SET_STACK_DOUBLE(r, 1); 1405 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1406 } 1407 1408 CASE(_d2i): /* convert top of stack double to int */ 1409 { 1410 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1)); 1411 MORE_STACK(-2); 1412 SET_STACK_INT(r1, 0); 1413 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1414 } 1415 1416 CASE(_d2f): /* convert top of stack double to float */ 1417 { 1418 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1)); 1419 MORE_STACK(-2); 1420 SET_STACK_FLOAT(r1, 0); 1421 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1422 } 1423 1424 CASE(_d2l): /* convert top of stack double to long */ 1425 { 1426 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1)); 1427 MORE_STACK(-2); 1428 SET_STACK_LONG(r1, 1); 1429 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1430 } 1431 1432 CASE(_i2b): 1433 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1); 1434 UPDATE_PC_AND_CONTINUE(1); 1435 1436 CASE(_i2c): 1437 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1); 1438 UPDATE_PC_AND_CONTINUE(1); 1439 1440 CASE(_i2s): 1441 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1); 1442 UPDATE_PC_AND_CONTINUE(1); 1443 1444 /* comparison operators */ 1445 1446 1447 #define COMPARISON_OP(name, comparison) \ 1448 CASE(_if_icmp##name): { \ 1449 int skip = (STACK_INT(-2) comparison STACK_INT(-1)) \ 1450 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1451 address branch_pc = pc; \ 1452 UPDATE_PC_AND_TOS(skip, -2); \ 1453 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1454 CONTINUE; \ 1455 } \ 1456 CASE(_if##name): { \ 1457 int skip = (STACK_INT(-1) comparison 0) \ 1458 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1459 address branch_pc = pc; \ 1460 UPDATE_PC_AND_TOS(skip, -1); \ 1461 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1462 CONTINUE; \ 1463 } 1464 1465 #define COMPARISON_OP2(name, comparison) \ 1466 COMPARISON_OP(name, comparison) \ 1467 CASE(_if_acmp##name): { \ 1468 int skip = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)) \ 1469 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1470 address branch_pc = pc; \ 1471 UPDATE_PC_AND_TOS(skip, -2); \ 1472 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1473 CONTINUE; \ 1474 } 1475 1476 #define NULL_COMPARISON_NOT_OP(name) \ 1477 CASE(_if##name): { \ 1478 int skip = (!(STACK_OBJECT(-1) == NULL)) \ 1479 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1480 address branch_pc = pc; \ 1481 UPDATE_PC_AND_TOS(skip, -1); \ 1482 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1483 CONTINUE; \ 1484 } 1485 1486 #define NULL_COMPARISON_OP(name) \ 1487 CASE(_if##name): { \ 1488 int skip = ((STACK_OBJECT(-1) == NULL)) \ 1489 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1490 address branch_pc = pc; \ 1491 UPDATE_PC_AND_TOS(skip, -1); \ 1492 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1493 CONTINUE; \ 1494 } 1495 COMPARISON_OP(lt, <); 1496 COMPARISON_OP(gt, >); 1497 COMPARISON_OP(le, <=); 1498 COMPARISON_OP(ge, >=); 1499 COMPARISON_OP2(eq, ==); /* include ref comparison */ 1500 COMPARISON_OP2(ne, !=); /* include ref comparison */ 1501 NULL_COMPARISON_OP(null); 1502 NULL_COMPARISON_NOT_OP(nonnull); 1503 1504 /* Goto pc at specified offset in switch table. */ 1505 1506 CASE(_tableswitch): { 1507 jint* lpc = (jint*)VMalignWordUp(pc+1); 1508 int32_t key = STACK_INT(-1); 1509 int32_t low = Bytes::get_Java_u4((address)&lpc[1]); 1510 int32_t high = Bytes::get_Java_u4((address)&lpc[2]); 1511 int32_t skip; 1512 key -= low; 1513 skip = ((uint32_t) key > (uint32_t)(high - low)) 1514 ? Bytes::get_Java_u4((address)&lpc[0]) 1515 : Bytes::get_Java_u4((address)&lpc[key + 3]); 1516 // Does this really need a full backedge check (osr?) 1517 address branch_pc = pc; 1518 UPDATE_PC_AND_TOS(skip, -1); 1519 DO_BACKEDGE_CHECKS(skip, branch_pc); 1520 CONTINUE; 1521 } 1522 1523 /* Goto pc whose table entry matches specified key */ 1524 1525 CASE(_lookupswitch): { 1526 jint* lpc = (jint*)VMalignWordUp(pc+1); 1527 int32_t key = STACK_INT(-1); 1528 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */ 1529 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]); 1530 while (--npairs >= 0) { 1531 lpc += 2; 1532 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) { 1533 skip = Bytes::get_Java_u4((address)&lpc[1]); 1534 break; 1535 } 1536 } 1537 address branch_pc = pc; 1538 UPDATE_PC_AND_TOS(skip, -1); 1539 DO_BACKEDGE_CHECKS(skip, branch_pc); 1540 CONTINUE; 1541 } 1542 1543 CASE(_fcmpl): 1544 CASE(_fcmpg): 1545 { 1546 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2), 1547 STACK_FLOAT(-1), 1548 (opcode == Bytecodes::_fcmpl ? -1 : 1)), 1549 -2); 1550 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1551 } 1552 1553 CASE(_dcmpl): 1554 CASE(_dcmpg): 1555 { 1556 int r = VMdoubleCompare(STACK_DOUBLE(-3), 1557 STACK_DOUBLE(-1), 1558 (opcode == Bytecodes::_dcmpl ? -1 : 1)); 1559 MORE_STACK(-4); // Pop 1560 SET_STACK_INT(r, 0); 1561 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1562 } 1563 1564 CASE(_lcmp): 1565 { 1566 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1)); 1567 MORE_STACK(-4); 1568 SET_STACK_INT(r, 0); 1569 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1570 } 1571 1572 1573 /* Return from a method */ 1574 1575 CASE(_areturn): 1576 CASE(_ireturn): 1577 CASE(_freturn): 1578 { 1579 // Allow a safepoint before returning to frame manager. 1580 SAFEPOINT; 1581 1582 goto handle_return; 1583 } 1584 1585 CASE(_lreturn): 1586 CASE(_dreturn): 1587 { 1588 // Allow a safepoint before returning to frame manager. 1589 SAFEPOINT; 1590 goto handle_return; 1591 } 1592 1593 CASE(_return_register_finalizer): { 1594 1595 oop rcvr = LOCALS_OBJECT(0); 1596 VERIFY_OOP(rcvr); 1597 if (rcvr->klass()->has_finalizer()) { 1598 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception); 1599 } 1600 goto handle_return; 1601 } 1602 CASE(_return): { 1603 1604 // Allow a safepoint before returning to frame manager. 1605 SAFEPOINT; 1606 goto handle_return; 1607 } 1608 1609 /* Array access byte-codes */ 1610 1611 /* Every array access byte-code starts out like this */ 1612 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff); 1613 #define ARRAY_INTRO(arrayOff) \ 1614 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \ 1615 jint index = STACK_INT(arrayOff + 1); \ 1616 char message[jintAsStringSize]; \ 1617 CHECK_NULL(arrObj); \ 1618 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \ 1619 sprintf(message, "%d", index); \ 1620 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \ 1621 message); \ 1622 } 1623 1624 /* 32-bit loads. These handle conversion from < 32-bit types */ 1625 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \ 1626 { \ 1627 ARRAY_INTRO(-2); \ 1628 extra; \ 1629 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \ 1630 -2); \ 1631 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1632 } 1633 1634 /* 64-bit loads */ 1635 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \ 1636 { \ 1637 ARRAY_INTRO(-2); \ 1638 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \ 1639 extra; \ 1640 UPDATE_PC_AND_CONTINUE(1); \ 1641 } 1642 1643 CASE(_iaload): 1644 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0); 1645 CASE(_faload): 1646 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1647 CASE(_aaload): 1648 ARRAY_LOADTO32(T_OBJECT, oop, INTPTR_FORMAT, STACK_OBJECT, 0); 1649 CASE(_baload): 1650 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0); 1651 CASE(_caload): 1652 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0); 1653 CASE(_saload): 1654 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0); 1655 CASE(_laload): 1656 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0); 1657 CASE(_daload): 1658 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1659 1660 /* 32-bit stores. These handle conversion to < 32-bit types */ 1661 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \ 1662 { \ 1663 ARRAY_INTRO(-3); \ 1664 extra; \ 1665 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1666 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \ 1667 } 1668 1669 /* 64-bit stores */ 1670 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \ 1671 { \ 1672 ARRAY_INTRO(-4); \ 1673 extra; \ 1674 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1675 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \ 1676 } 1677 1678 CASE(_iastore): 1679 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0); 1680 CASE(_fastore): 1681 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1682 /* 1683 * This one looks different because of the assignability check 1684 */ 1685 CASE(_aastore): { 1686 oop rhsObject = STACK_OBJECT(-1); 1687 VERIFY_OOP(rhsObject); 1688 ARRAY_INTRO( -3); 1689 // arrObj, index are set 1690 if (rhsObject != NULL) { 1691 /* Check assignability of rhsObject into arrObj */ 1692 Klass* rhsKlassOop = rhsObject->klass(); // EBX (subclass) 1693 Klass* elemKlassOop = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX 1694 // 1695 // Check for compatibilty. This check must not GC!! 1696 // Seems way more expensive now that we must dispatch 1697 // 1698 if (rhsKlassOop != elemKlassOop && !rhsKlassOop->is_subtype_of(elemKlassOop)) { // ebx->is... 1699 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), ""); 1700 } 1701 } 1702 ((objArrayOopDesc *) arrObj)->obj_at_put(index, rhsObject); 1703 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); 1704 } 1705 CASE(_bastore): 1706 ARRAY_STOREFROM32(T_BYTE, jbyte, "%d", STACK_INT, 0); 1707 CASE(_castore): 1708 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0); 1709 CASE(_sastore): 1710 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0); 1711 CASE(_lastore): 1712 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0); 1713 CASE(_dastore): 1714 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1715 1716 CASE(_arraylength): 1717 { 1718 arrayOop ary = (arrayOop) STACK_OBJECT(-1); 1719 CHECK_NULL(ary); 1720 SET_STACK_INT(ary->length(), -1); 1721 UPDATE_PC_AND_CONTINUE(1); 1722 } 1723 1724 /* monitorenter and monitorexit for locking/unlocking an object */ 1725 1726 CASE(_monitorenter): { 1727 oop lockee = STACK_OBJECT(-1); 1728 // derefing's lockee ought to provoke implicit null check 1729 CHECK_NULL(lockee); 1730 // find a free monitor or one already allocated for this object 1731 // if we find a matching object then we need a new monitor 1732 // since this is recursive enter 1733 BasicObjectLock* limit = istate->monitor_base(); 1734 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1735 BasicObjectLock* entry = NULL; 1736 while (most_recent != limit ) { 1737 if (most_recent->obj() == NULL) entry = most_recent; 1738 else if (most_recent->obj() == lockee) break; 1739 most_recent++; 1740 } 1741 if (entry != NULL) { 1742 entry->set_obj(lockee); 1743 int success = false; 1744 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 1745 1746 markOop mark = lockee->mark(); 1747 intptr_t hash = (intptr_t) markOopDesc::no_hash; 1748 // implies UseBiasedLocking 1749 if (mark->has_bias_pattern()) { 1750 uintptr_t thread_ident; 1751 uintptr_t anticipated_bias_locking_value; 1752 thread_ident = (uintptr_t)istate->thread(); 1753 anticipated_bias_locking_value = 1754 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 1755 ~((uintptr_t) markOopDesc::age_mask_in_place); 1756 1757 if (anticipated_bias_locking_value == 0) { 1758 // already biased towards this thread, nothing to do 1759 if (PrintBiasedLockingStatistics) { 1760 (* BiasedLocking::biased_lock_entry_count_addr())++; 1761 } 1762 success = true; 1763 } 1764 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 1765 // try revoke bias 1766 markOop header = lockee->klass()->prototype_header(); 1767 if (hash != markOopDesc::no_hash) { 1768 header = header->copy_set_hash(hash); 1769 } 1770 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { 1771 if (PrintBiasedLockingStatistics) 1772 (*BiasedLocking::revoked_lock_entry_count_addr())++; 1773 } 1774 } 1775 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { 1776 // try rebias 1777 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); 1778 if (hash != markOopDesc::no_hash) { 1779 new_header = new_header->copy_set_hash(hash); 1780 } 1781 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { 1782 if (PrintBiasedLockingStatistics) 1783 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 1784 } 1785 else { 1786 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1787 } 1788 success = true; 1789 } 1790 else { 1791 // try to bias towards thread in case object is anonymously biased 1792 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 1793 (uintptr_t)markOopDesc::age_mask_in_place | 1794 epoch_mask_in_place)); 1795 if (hash != markOopDesc::no_hash) { 1796 header = header->copy_set_hash(hash); 1797 } 1798 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 1799 // debugging hint 1800 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 1801 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { 1802 if (PrintBiasedLockingStatistics) 1803 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 1804 } 1805 else { 1806 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1807 } 1808 success = true; 1809 } 1810 } 1811 1812 // traditional lightweight locking 1813 if (!success) { 1814 markOop displaced = lockee->mark()->set_unlocked(); 1815 entry->lock()->set_displaced_header(displaced); 1816 bool call_vm = UseHeavyMonitors; 1817 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { 1818 // Is it simple recursive case? 1819 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 1820 entry->lock()->set_displaced_header(NULL); 1821 } else { 1822 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1823 } 1824 } 1825 } 1826 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1827 } else { 1828 istate->set_msg(more_monitors); 1829 UPDATE_PC_AND_RETURN(0); // Re-execute 1830 } 1831 } 1832 1833 CASE(_monitorexit): { 1834 oop lockee = STACK_OBJECT(-1); 1835 CHECK_NULL(lockee); 1836 // derefing's lockee ought to provoke implicit null check 1837 // find our monitor slot 1838 BasicObjectLock* limit = istate->monitor_base(); 1839 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1840 while (most_recent != limit ) { 1841 if ((most_recent)->obj() == lockee) { 1842 BasicLock* lock = most_recent->lock(); 1843 markOop header = lock->displaced_header(); 1844 most_recent->set_obj(NULL); 1845 if (!lockee->mark()->has_bias_pattern()) { 1846 bool call_vm = UseHeavyMonitors; 1847 // If it isn't recursive we either must swap old header or call the runtime 1848 if (header != NULL || call_vm) { 1849 if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { 1850 // restore object for the slow case 1851 most_recent->set_obj(lockee); 1852 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception); 1853 } 1854 } 1855 } 1856 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1857 } 1858 most_recent++; 1859 } 1860 // Need to throw illegal monitor state exception 1861 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception); 1862 ShouldNotReachHere(); 1863 } 1864 1865 /* All of the non-quick opcodes. */ 1866 1867 /* -Set clobbersCpIndex true if the quickened opcode clobbers the 1868 * constant pool index in the instruction. 1869 */ 1870 CASE(_getfield): 1871 CASE(_getstatic): 1872 { 1873 u2 index; 1874 ConstantPoolCacheEntry* cache; 1875 index = Bytes::get_native_u2(pc+1); 1876 1877 // QQQ Need to make this as inlined as possible. Probably need to 1878 // split all the bytecode cases out so c++ compiler has a chance 1879 // for constant prop to fold everything possible away. 1880 1881 cache = cp->entry_at(index); 1882 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 1883 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode), 1884 handle_exception); 1885 cache = cp->entry_at(index); 1886 } 1887 1888 #ifdef VM_JVMTI 1889 if (_jvmti_interp_events) { 1890 int *count_addr; 1891 oop obj; 1892 // Check to see if a field modification watch has been set 1893 // before we take the time to call into the VM. 1894 count_addr = (int *)JvmtiExport::get_field_access_count_addr(); 1895 if ( *count_addr > 0 ) { 1896 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { 1897 obj = (oop)NULL; 1898 } else { 1899 obj = (oop) STACK_OBJECT(-1); 1900 VERIFY_OOP(obj); 1901 } 1902 CALL_VM(InterpreterRuntime::post_field_access(THREAD, 1903 obj, 1904 cache), 1905 handle_exception); 1906 } 1907 } 1908 #endif /* VM_JVMTI */ 1909 1910 oop obj; 1911 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { 1912 Klass* k = cache->f1_as_klass(); 1913 obj = k->java_mirror(); 1914 MORE_STACK(1); // Assume single slot push 1915 } else { 1916 obj = (oop) STACK_OBJECT(-1); 1917 CHECK_NULL(obj); 1918 } 1919 1920 // 1921 // Now store the result on the stack 1922 // 1923 TosState tos_type = cache->flag_state(); 1924 int field_offset = cache->f2_as_index(); 1925 if (cache->is_volatile()) { 1926 if (tos_type == atos) { 1927 VERIFY_OOP(obj->obj_field_acquire(field_offset)); 1928 SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1); 1929 } else if (tos_type == itos) { 1930 SET_STACK_INT(obj->int_field_acquire(field_offset), -1); 1931 } else if (tos_type == ltos) { 1932 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0); 1933 MORE_STACK(1); 1934 } else if (tos_type == btos) { 1935 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1); 1936 } else if (tos_type == ctos) { 1937 SET_STACK_INT(obj->char_field_acquire(field_offset), -1); 1938 } else if (tos_type == stos) { 1939 SET_STACK_INT(obj->short_field_acquire(field_offset), -1); 1940 } else if (tos_type == ftos) { 1941 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1); 1942 } else { 1943 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0); 1944 MORE_STACK(1); 1945 } 1946 } else { 1947 if (tos_type == atos) { 1948 VERIFY_OOP(obj->obj_field(field_offset)); 1949 SET_STACK_OBJECT(obj->obj_field(field_offset), -1); 1950 } else if (tos_type == itos) { 1951 SET_STACK_INT(obj->int_field(field_offset), -1); 1952 } else if (tos_type == ltos) { 1953 SET_STACK_LONG(obj->long_field(field_offset), 0); 1954 MORE_STACK(1); 1955 } else if (tos_type == btos) { 1956 SET_STACK_INT(obj->byte_field(field_offset), -1); 1957 } else if (tos_type == ctos) { 1958 SET_STACK_INT(obj->char_field(field_offset), -1); 1959 } else if (tos_type == stos) { 1960 SET_STACK_INT(obj->short_field(field_offset), -1); 1961 } else if (tos_type == ftos) { 1962 SET_STACK_FLOAT(obj->float_field(field_offset), -1); 1963 } else { 1964 SET_STACK_DOUBLE(obj->double_field(field_offset), 0); 1965 MORE_STACK(1); 1966 } 1967 } 1968 1969 UPDATE_PC_AND_CONTINUE(3); 1970 } 1971 1972 CASE(_putfield): 1973 CASE(_putstatic): 1974 { 1975 u2 index = Bytes::get_native_u2(pc+1); 1976 ConstantPoolCacheEntry* cache = cp->entry_at(index); 1977 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 1978 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode), 1979 handle_exception); 1980 cache = cp->entry_at(index); 1981 } 1982 1983 #ifdef VM_JVMTI 1984 if (_jvmti_interp_events) { 1985 int *count_addr; 1986 oop obj; 1987 // Check to see if a field modification watch has been set 1988 // before we take the time to call into the VM. 1989 count_addr = (int *)JvmtiExport::get_field_modification_count_addr(); 1990 if ( *count_addr > 0 ) { 1991 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 1992 obj = (oop)NULL; 1993 } 1994 else { 1995 if (cache->is_long() || cache->is_double()) { 1996 obj = (oop) STACK_OBJECT(-3); 1997 } else { 1998 obj = (oop) STACK_OBJECT(-2); 1999 } 2000 VERIFY_OOP(obj); 2001 } 2002 2003 CALL_VM(InterpreterRuntime::post_field_modification(THREAD, 2004 obj, 2005 cache, 2006 (jvalue *)STACK_SLOT(-1)), 2007 handle_exception); 2008 } 2009 } 2010 #endif /* VM_JVMTI */ 2011 2012 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2013 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2014 2015 oop obj; 2016 int count; 2017 TosState tos_type = cache->flag_state(); 2018 2019 count = -1; 2020 if (tos_type == ltos || tos_type == dtos) { 2021 --count; 2022 } 2023 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 2024 Klass* k = cache->f1_as_klass(); 2025 obj = k->java_mirror(); 2026 } else { 2027 --count; 2028 obj = (oop) STACK_OBJECT(count); 2029 CHECK_NULL(obj); 2030 } 2031 2032 // 2033 // Now store the result 2034 // 2035 int field_offset = cache->f2_as_index(); 2036 if (cache->is_volatile()) { 2037 if (tos_type == itos) { 2038 obj->release_int_field_put(field_offset, STACK_INT(-1)); 2039 } else if (tos_type == atos) { 2040 VERIFY_OOP(STACK_OBJECT(-1)); 2041 obj->release_obj_field_put(field_offset, STACK_OBJECT(-1)); 2042 } else if (tos_type == btos) { 2043 obj->release_byte_field_put(field_offset, STACK_INT(-1)); 2044 } else if (tos_type == ltos) { 2045 obj->release_long_field_put(field_offset, STACK_LONG(-1)); 2046 } else if (tos_type == ctos) { 2047 obj->release_char_field_put(field_offset, STACK_INT(-1)); 2048 } else if (tos_type == stos) { 2049 obj->release_short_field_put(field_offset, STACK_INT(-1)); 2050 } else if (tos_type == ftos) { 2051 obj->release_float_field_put(field_offset, STACK_FLOAT(-1)); 2052 } else { 2053 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1)); 2054 } 2055 OrderAccess::storeload(); 2056 } else { 2057 if (tos_type == itos) { 2058 obj->int_field_put(field_offset, STACK_INT(-1)); 2059 } else if (tos_type == atos) { 2060 VERIFY_OOP(STACK_OBJECT(-1)); 2061 obj->obj_field_put(field_offset, STACK_OBJECT(-1)); 2062 } else if (tos_type == btos) { 2063 obj->byte_field_put(field_offset, STACK_INT(-1)); 2064 } else if (tos_type == ltos) { 2065 obj->long_field_put(field_offset, STACK_LONG(-1)); 2066 } else if (tos_type == ctos) { 2067 obj->char_field_put(field_offset, STACK_INT(-1)); 2068 } else if (tos_type == stos) { 2069 obj->short_field_put(field_offset, STACK_INT(-1)); 2070 } else if (tos_type == ftos) { 2071 obj->float_field_put(field_offset, STACK_FLOAT(-1)); 2072 } else { 2073 obj->double_field_put(field_offset, STACK_DOUBLE(-1)); 2074 } 2075 } 2076 2077 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count); 2078 } 2079 2080 CASE(_new): { 2081 u2 index = Bytes::get_Java_u2(pc+1); 2082 ConstantPool* constants = istate->method()->constants(); 2083 if (!constants->tag_at(index).is_unresolved_klass()) { 2084 // Make sure klass is initialized and doesn't have a finalizer 2085 Klass* entry = constants->slot_at(index).get_klass(); 2086 assert(entry->is_klass(), "Should be resolved klass"); 2087 Klass* k_entry = (Klass*) entry; 2088 assert(k_entry->oop_is_instance(), "Should be InstanceKlass"); 2089 InstanceKlass* ik = (InstanceKlass*) k_entry; 2090 if ( ik->is_initialized() && ik->can_be_fastpath_allocated() ) { 2091 size_t obj_size = ik->size_helper(); 2092 oop result = NULL; 2093 // If the TLAB isn't pre-zeroed then we'll have to do it 2094 bool need_zero = !ZeroTLAB; 2095 if (UseTLAB) { 2096 result = (oop) THREAD->tlab().allocate(obj_size); 2097 } 2098 if (result == NULL) { 2099 need_zero = true; 2100 // Try allocate in shared eden 2101 retry: 2102 HeapWord* compare_to = *Universe::heap()->top_addr(); 2103 HeapWord* new_top = compare_to + obj_size; 2104 if (new_top <= *Universe::heap()->end_addr()) { 2105 if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) { 2106 goto retry; 2107 } 2108 result = (oop) compare_to; 2109 } 2110 } 2111 if (result != NULL) { 2112 // Initialize object (if nonzero size and need) and then the header 2113 if (need_zero ) { 2114 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize; 2115 obj_size -= sizeof(oopDesc) / oopSize; 2116 if (obj_size > 0 ) { 2117 memset(to_zero, 0, obj_size * HeapWordSize); 2118 } 2119 } 2120 if (UseBiasedLocking) { 2121 result->set_mark(ik->prototype_header()); 2122 } else { 2123 result->set_mark(markOopDesc::prototype()); 2124 } 2125 result->set_klass_gap(0); 2126 result->set_klass(k_entry); 2127 SET_STACK_OBJECT(result, 0); 2128 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2129 } 2130 } 2131 } 2132 // Slow case allocation 2133 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index), 2134 handle_exception); 2135 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2136 THREAD->set_vm_result(NULL); 2137 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2138 } 2139 CASE(_anewarray): { 2140 u2 index = Bytes::get_Java_u2(pc+1); 2141 jint size = STACK_INT(-1); 2142 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size), 2143 handle_exception); 2144 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2145 THREAD->set_vm_result(NULL); 2146 UPDATE_PC_AND_CONTINUE(3); 2147 } 2148 CASE(_multianewarray): { 2149 jint dims = *(pc+3); 2150 jint size = STACK_INT(-1); 2151 // stack grows down, dimensions are up! 2152 jint *dimarray = 2153 (jint*)&topOfStack[dims * Interpreter::stackElementWords+ 2154 Interpreter::stackElementWords-1]; 2155 //adjust pointer to start of stack element 2156 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray), 2157 handle_exception); 2158 SET_STACK_OBJECT(THREAD->vm_result(), -dims); 2159 THREAD->set_vm_result(NULL); 2160 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1)); 2161 } 2162 CASE(_checkcast): 2163 if (STACK_OBJECT(-1) != NULL) { 2164 VERIFY_OOP(STACK_OBJECT(-1)); 2165 u2 index = Bytes::get_Java_u2(pc+1); 2166 if (ProfileInterpreter) { 2167 // needs Profile_checkcast QQQ 2168 ShouldNotReachHere(); 2169 } 2170 // Constant pool may have actual klass or unresolved klass. If it is 2171 // unresolved we must resolve it 2172 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2173 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2174 } 2175 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); 2176 Klass* objKlassOop = STACK_OBJECT(-1)->klass(); //ebx 2177 // 2178 // Check for compatibilty. This check must not GC!! 2179 // Seems way more expensive now that we must dispatch 2180 // 2181 if (objKlassOop != klassOf && 2182 !objKlassOop->is_subtype_of(klassOf)) { 2183 ResourceMark rm(THREAD); 2184 const char* objName = objKlassOop->external_name(); 2185 const char* klassName = klassOf->external_name(); 2186 char* message = SharedRuntime::generate_class_cast_message( 2187 objName, klassName); 2188 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message); 2189 } 2190 } else { 2191 if (UncommonNullCast) { 2192 // istate->method()->set_null_cast_seen(); 2193 // [RGV] Not sure what to do here! 2194 2195 } 2196 } 2197 UPDATE_PC_AND_CONTINUE(3); 2198 2199 CASE(_instanceof): 2200 if (STACK_OBJECT(-1) == NULL) { 2201 SET_STACK_INT(0, -1); 2202 } else { 2203 VERIFY_OOP(STACK_OBJECT(-1)); 2204 u2 index = Bytes::get_Java_u2(pc+1); 2205 // Constant pool may have actual klass or unresolved klass. If it is 2206 // unresolved we must resolve it 2207 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2208 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2209 } 2210 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); 2211 Klass* objKlassOop = STACK_OBJECT(-1)->klass(); 2212 // 2213 // Check for compatibilty. This check must not GC!! 2214 // Seems way more expensive now that we must dispatch 2215 // 2216 if ( objKlassOop == klassOf || objKlassOop->is_subtype_of(klassOf)) { 2217 SET_STACK_INT(1, -1); 2218 } else { 2219 SET_STACK_INT(0, -1); 2220 } 2221 } 2222 UPDATE_PC_AND_CONTINUE(3); 2223 2224 CASE(_ldc_w): 2225 CASE(_ldc): 2226 { 2227 u2 index; 2228 bool wide = false; 2229 int incr = 2; // frequent case 2230 if (opcode == Bytecodes::_ldc) { 2231 index = pc[1]; 2232 } else { 2233 index = Bytes::get_Java_u2(pc+1); 2234 incr = 3; 2235 wide = true; 2236 } 2237 2238 ConstantPool* constants = METHOD->constants(); 2239 switch (constants->tag_at(index).value()) { 2240 case JVM_CONSTANT_Integer: 2241 SET_STACK_INT(constants->int_at(index), 0); 2242 break; 2243 2244 case JVM_CONSTANT_Float: 2245 SET_STACK_FLOAT(constants->float_at(index), 0); 2246 break; 2247 2248 case JVM_CONSTANT_String: 2249 { 2250 oop result = constants->resolved_references()->obj_at(index); 2251 if (result == NULL) { 2252 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception); 2253 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2254 THREAD->set_vm_result(NULL); 2255 } else { 2256 VERIFY_OOP(result); 2257 SET_STACK_OBJECT(result, 0); 2258 } 2259 break; 2260 } 2261 2262 case JVM_CONSTANT_Class: 2263 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror()); 2264 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0); 2265 break; 2266 2267 case JVM_CONSTANT_UnresolvedClass: 2268 case JVM_CONSTANT_UnresolvedClassInError: 2269 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception); 2270 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2271 THREAD->set_vm_result(NULL); 2272 break; 2273 2274 default: ShouldNotReachHere(); 2275 } 2276 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2277 } 2278 2279 CASE(_ldc2_w): 2280 { 2281 u2 index = Bytes::get_Java_u2(pc+1); 2282 2283 ConstantPool* constants = METHOD->constants(); 2284 switch (constants->tag_at(index).value()) { 2285 2286 case JVM_CONSTANT_Long: 2287 SET_STACK_LONG(constants->long_at(index), 1); 2288 break; 2289 2290 case JVM_CONSTANT_Double: 2291 SET_STACK_DOUBLE(constants->double_at(index), 1); 2292 break; 2293 default: ShouldNotReachHere(); 2294 } 2295 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2); 2296 } 2297 2298 CASE(_fast_aldc_w): 2299 CASE(_fast_aldc): { 2300 u2 index; 2301 int incr; 2302 if (opcode == Bytecodes::_fast_aldc) { 2303 index = pc[1]; 2304 incr = 2; 2305 } else { 2306 index = Bytes::get_native_u2(pc+1); 2307 incr = 3; 2308 } 2309 2310 // We are resolved if the f1 field contains a non-null object (CallSite, etc.) 2311 // This kind of CP cache entry does not need to match the flags byte, because 2312 // there is a 1-1 relation between bytecode type and CP entry type. 2313 ConstantPool* constants = METHOD->constants(); 2314 oop result = constants->resolved_references()->obj_at(index); 2315 if (result == NULL) { 2316 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), 2317 handle_exception); 2318 result = THREAD->vm_result(); 2319 } 2320 2321 VERIFY_OOP(result); 2322 SET_STACK_OBJECT(result, 0); 2323 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2324 } 2325 2326 CASE(_invokedynamic): { 2327 2328 if (!EnableInvokeDynamic) { 2329 // We should not encounter this bytecode if !EnableInvokeDynamic. 2330 // The verifier will stop it. However, if we get past the verifier, 2331 // this will stop the thread in a reasonable way, without crashing the JVM. 2332 CALL_VM(InterpreterRuntime::throw_IncompatibleClassChangeError(THREAD), 2333 handle_exception); 2334 ShouldNotReachHere(); 2335 } 2336 2337 u4 index = Bytes::get_native_u4(pc+1); 2338 ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2339 2340 // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.) 2341 // This kind of CP cache entry does not need to match the flags byte, because 2342 // there is a 1-1 relation between bytecode type and CP entry type. 2343 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2344 CALL_VM(InterpreterRuntime::resolve_invokedynamic(THREAD), 2345 handle_exception); 2346 cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2347 } 2348 2349 Method* method = cache->f1_as_method(); 2350 VERIFY_OOP(method); 2351 2352 if (cache->has_appendix()) { 2353 ConstantPool* constants = METHOD->constants(); 2354 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2355 MORE_STACK(1); 2356 } 2357 2358 istate->set_msg(call_method); 2359 istate->set_callee(method); 2360 istate->set_callee_entry_point(method->from_interpreted_entry()); 2361 istate->set_bcp_advance(5); 2362 2363 UPDATE_PC_AND_RETURN(0); // I'll be back... 2364 } 2365 2366 CASE(_invokehandle): { 2367 2368 if (!EnableInvokeDynamic) { 2369 ShouldNotReachHere(); 2370 } 2371 2372 u2 index = Bytes::get_native_u2(pc+1); 2373 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2374 2375 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2376 CALL_VM(InterpreterRuntime::resolve_invokehandle(THREAD), 2377 handle_exception); 2378 cache = cp->entry_at(index); 2379 } 2380 2381 Method* method = cache->f1_as_method(); 2382 2383 VERIFY_OOP(method); 2384 2385 if (cache->has_appendix()) { 2386 ConstantPool* constants = METHOD->constants(); 2387 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2388 MORE_STACK(1); 2389 } 2390 2391 istate->set_msg(call_method); 2392 istate->set_callee(method); 2393 istate->set_callee_entry_point(method->from_interpreted_entry()); 2394 istate->set_bcp_advance(3); 2395 2396 UPDATE_PC_AND_RETURN(0); // I'll be back... 2397 } 2398 2399 CASE(_invokeinterface): { 2400 u2 index = Bytes::get_native_u2(pc+1); 2401 2402 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2403 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2404 2405 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2406 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2407 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode), 2408 handle_exception); 2409 cache = cp->entry_at(index); 2410 } 2411 2412 istate->set_msg(call_method); 2413 2414 // Special case of invokeinterface called for virtual method of 2415 // java.lang.Object. See cpCacheOop.cpp for details. 2416 // This code isn't produced by javac, but could be produced by 2417 // another compliant java compiler. 2418 if (cache->is_forced_virtual()) { 2419 Method* callee; 2420 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2421 if (cache->is_vfinal()) { 2422 callee = cache->f2_as_vfinal_method(); 2423 } else { 2424 // get receiver 2425 int parms = cache->parameter_size(); 2426 // Same comments as invokevirtual apply here 2427 VERIFY_OOP(STACK_OBJECT(-parms)); 2428 InstanceKlass* rcvrKlass = (InstanceKlass*) 2429 STACK_OBJECT(-parms)->klass(); 2430 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; 2431 } 2432 istate->set_callee(callee); 2433 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2434 #ifdef VM_JVMTI 2435 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2436 istate->set_callee_entry_point(callee->interpreter_entry()); 2437 } 2438 #endif /* VM_JVMTI */ 2439 istate->set_bcp_advance(5); 2440 UPDATE_PC_AND_RETURN(0); // I'll be back... 2441 } 2442 2443 // this could definitely be cleaned up QQQ 2444 Method* callee; 2445 Klass* iclass = cache->f1_as_klass(); 2446 // InstanceKlass* interface = (InstanceKlass*) iclass; 2447 // get receiver 2448 int parms = cache->parameter_size(); 2449 oop rcvr = STACK_OBJECT(-parms); 2450 CHECK_NULL(rcvr); 2451 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass(); 2452 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable(); 2453 int i; 2454 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) { 2455 if (ki->interface_klass() == iclass) break; 2456 } 2457 // If the interface isn't found, this class doesn't implement this 2458 // interface. The link resolver checks this but only for the first 2459 // time this interface is called. 2460 if (i == int2->itable_length()) { 2461 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), ""); 2462 } 2463 int mindex = cache->f2_as_index(); 2464 itableMethodEntry* im = ki->first_method_entry(rcvr->klass()); 2465 callee = im[mindex].method(); 2466 if (callee == NULL) { 2467 VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), ""); 2468 } 2469 2470 istate->set_callee(callee); 2471 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2472 #ifdef VM_JVMTI 2473 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2474 istate->set_callee_entry_point(callee->interpreter_entry()); 2475 } 2476 #endif /* VM_JVMTI */ 2477 istate->set_bcp_advance(5); 2478 UPDATE_PC_AND_RETURN(0); // I'll be back... 2479 } 2480 2481 CASE(_invokevirtual): 2482 CASE(_invokespecial): 2483 CASE(_invokestatic): { 2484 u2 index = Bytes::get_native_u2(pc+1); 2485 2486 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2487 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2488 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2489 2490 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2491 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode), 2492 handle_exception); 2493 cache = cp->entry_at(index); 2494 } 2495 2496 istate->set_msg(call_method); 2497 { 2498 Method* callee; 2499 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) { 2500 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2501 if (cache->is_vfinal()) callee = cache->f2_as_vfinal_method(); 2502 else { 2503 // get receiver 2504 int parms = cache->parameter_size(); 2505 // this works but needs a resourcemark and seems to create a vtable on every call: 2506 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index()); 2507 // 2508 // this fails with an assert 2509 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass()); 2510 // but this works 2511 VERIFY_OOP(STACK_OBJECT(-parms)); 2512 InstanceKlass* rcvrKlass = (InstanceKlass*) STACK_OBJECT(-parms)->klass(); 2513 /* 2514 Executing this code in java.lang.String: 2515 public String(char value[]) { 2516 this.count = value.length; 2517 this.value = (char[])value.clone(); 2518 } 2519 2520 a find on rcvr->klass() reports: 2521 {type array char}{type array class} 2522 - klass: {other class} 2523 2524 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure 2525 because rcvr->klass()->oop_is_instance() == 0 2526 However it seems to have a vtable in the right location. Huh? 2527 2528 */ 2529 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; 2530 } 2531 } else { 2532 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) { 2533 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2534 } 2535 callee = cache->f1_as_method(); 2536 } 2537 2538 istate->set_callee(callee); 2539 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2540 #ifdef VM_JVMTI 2541 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2542 istate->set_callee_entry_point(callee->interpreter_entry()); 2543 } 2544 #endif /* VM_JVMTI */ 2545 istate->set_bcp_advance(3); 2546 UPDATE_PC_AND_RETURN(0); // I'll be back... 2547 } 2548 } 2549 2550 /* Allocate memory for a new java object. */ 2551 2552 CASE(_newarray): { 2553 BasicType atype = (BasicType) *(pc+1); 2554 jint size = STACK_INT(-1); 2555 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size), 2556 handle_exception); 2557 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2558 THREAD->set_vm_result(NULL); 2559 2560 UPDATE_PC_AND_CONTINUE(2); 2561 } 2562 2563 /* Throw an exception. */ 2564 2565 CASE(_athrow): { 2566 oop except_oop = STACK_OBJECT(-1); 2567 CHECK_NULL(except_oop); 2568 // set pending_exception so we use common code 2569 THREAD->set_pending_exception(except_oop, NULL, 0); 2570 goto handle_exception; 2571 } 2572 2573 /* goto and jsr. They are exactly the same except jsr pushes 2574 * the address of the next instruction first. 2575 */ 2576 2577 CASE(_jsr): { 2578 /* push bytecode index on stack */ 2579 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0); 2580 MORE_STACK(1); 2581 /* FALL THROUGH */ 2582 } 2583 2584 CASE(_goto): 2585 { 2586 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1); 2587 address branch_pc = pc; 2588 UPDATE_PC(offset); 2589 DO_BACKEDGE_CHECKS(offset, branch_pc); 2590 CONTINUE; 2591 } 2592 2593 CASE(_jsr_w): { 2594 /* push return address on the stack */ 2595 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0); 2596 MORE_STACK(1); 2597 /* FALL THROUGH */ 2598 } 2599 2600 CASE(_goto_w): 2601 { 2602 int32_t offset = Bytes::get_Java_u4(pc + 1); 2603 address branch_pc = pc; 2604 UPDATE_PC(offset); 2605 DO_BACKEDGE_CHECKS(offset, branch_pc); 2606 CONTINUE; 2607 } 2608 2609 /* return from a jsr or jsr_w */ 2610 2611 CASE(_ret): { 2612 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1])); 2613 UPDATE_PC_AND_CONTINUE(0); 2614 } 2615 2616 /* debugger breakpoint */ 2617 2618 CASE(_breakpoint): { 2619 Bytecodes::Code original_bytecode; 2620 DECACHE_STATE(); 2621 SET_LAST_JAVA_FRAME(); 2622 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD, 2623 METHOD, pc); 2624 RESET_LAST_JAVA_FRAME(); 2625 CACHE_STATE(); 2626 if (THREAD->has_pending_exception()) goto handle_exception; 2627 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc), 2628 handle_exception); 2629 2630 opcode = (jubyte)original_bytecode; 2631 goto opcode_switch; 2632 } 2633 2634 DEFAULT: 2635 fatal(err_msg("Unimplemented opcode %d = %s", opcode, 2636 Bytecodes::name((Bytecodes::Code)opcode))); 2637 goto finish; 2638 2639 } /* switch(opc) */ 2640 2641 2642 #ifdef USELABELS 2643 check_for_exception: 2644 #endif 2645 { 2646 if (!THREAD->has_pending_exception()) { 2647 CONTINUE; 2648 } 2649 /* We will be gcsafe soon, so flush our state. */ 2650 DECACHE_PC(); 2651 goto handle_exception; 2652 } 2653 do_continue: ; 2654 2655 } /* while (1) interpreter loop */ 2656 2657 2658 // An exception exists in the thread state see whether this activation can handle it 2659 handle_exception: { 2660 2661 HandleMarkCleaner __hmc(THREAD); 2662 Handle except_oop(THREAD, THREAD->pending_exception()); 2663 // Prevent any subsequent HandleMarkCleaner in the VM 2664 // from freeing the except_oop handle. 2665 HandleMark __hm(THREAD); 2666 2667 THREAD->clear_pending_exception(); 2668 assert(except_oop(), "No exception to process"); 2669 intptr_t continuation_bci; 2670 // expression stack is emptied 2671 topOfStack = istate->stack_base() - Interpreter::stackElementWords; 2672 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()), 2673 handle_exception); 2674 2675 except_oop = THREAD->vm_result(); 2676 THREAD->set_vm_result(NULL); 2677 if (continuation_bci >= 0) { 2678 // Place exception on top of stack 2679 SET_STACK_OBJECT(except_oop(), 0); 2680 MORE_STACK(1); 2681 pc = METHOD->code_base() + continuation_bci; 2682 if (TraceExceptions) { 2683 ttyLocker ttyl; 2684 ResourceMark rm; 2685 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop()); 2686 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); 2687 tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT, 2688 pc - (intptr_t)METHOD->code_base(), 2689 continuation_bci, THREAD); 2690 } 2691 // for AbortVMOnException flag 2692 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); 2693 goto run; 2694 } 2695 if (TraceExceptions) { 2696 ttyLocker ttyl; 2697 ResourceMark rm; 2698 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop()); 2699 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); 2700 tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT, 2701 pc - (intptr_t) METHOD->code_base(), 2702 THREAD); 2703 } 2704 // for AbortVMOnException flag 2705 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); 2706 // No handler in this activation, unwind and try again 2707 THREAD->set_pending_exception(except_oop(), NULL, 0); 2708 goto handle_return; 2709 } /* handle_exception: */ 2710 2711 2712 2713 // Return from an interpreter invocation with the result of the interpretation 2714 // on the top of the Java Stack (or a pending exception) 2715 2716 handle_Pop_Frame: 2717 2718 // We don't really do anything special here except we must be aware 2719 // that we can get here without ever locking the method (if sync). 2720 // Also we skip the notification of the exit. 2721 2722 istate->set_msg(popping_frame); 2723 // Clear pending so while the pop is in process 2724 // we don't start another one if a call_vm is done. 2725 THREAD->clr_pop_frame_pending(); 2726 // Let interpreter (only) see the we're in the process of popping a frame 2727 THREAD->set_pop_frame_in_process(); 2728 2729 handle_return: 2730 { 2731 DECACHE_STATE(); 2732 2733 bool suppress_error = istate->msg() == popping_frame; 2734 bool suppress_exit_event = THREAD->has_pending_exception() || suppress_error; 2735 Handle original_exception(THREAD, THREAD->pending_exception()); 2736 Handle illegal_state_oop(THREAD, NULL); 2737 2738 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner 2739 // in any following VM entries from freeing our live handles, but illegal_state_oop 2740 // isn't really allocated yet and so doesn't become live until later and 2741 // in unpredicatable places. Instead we must protect the places where we enter the 2742 // VM. It would be much simpler (and safer) if we could allocate a real handle with 2743 // a NULL oop in it and then overwrite the oop later as needed. This isn't 2744 // unfortunately isn't possible. 2745 2746 THREAD->clear_pending_exception(); 2747 2748 // 2749 // As far as we are concerned we have returned. If we have a pending exception 2750 // that will be returned as this invocation's result. However if we get any 2751 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions 2752 // will be our final result (i.e. monitor exception trumps a pending exception). 2753 // 2754 2755 // If we never locked the method (or really passed the point where we would have), 2756 // there is no need to unlock it (or look for other monitors), since that 2757 // could not have happened. 2758 2759 if (THREAD->do_not_unlock()) { 2760 2761 // Never locked, reset the flag now because obviously any caller must 2762 // have passed their point of locking for us to have gotten here. 2763 2764 THREAD->clr_do_not_unlock(); 2765 } else { 2766 // At this point we consider that we have returned. We now check that the 2767 // locks were properly block structured. If we find that they were not 2768 // used properly we will return with an illegal monitor exception. 2769 // The exception is checked by the caller not the callee since this 2770 // checking is considered to be part of the invocation and therefore 2771 // in the callers scope (JVM spec 8.13). 2772 // 2773 // Another weird thing to watch for is if the method was locked 2774 // recursively and then not exited properly. This means we must 2775 // examine all the entries in reverse time(and stack) order and 2776 // unlock as we find them. If we find the method monitor before 2777 // we are at the initial entry then we should throw an exception. 2778 // It is not clear the template based interpreter does this 2779 // correctly 2780 2781 BasicObjectLock* base = istate->monitor_base(); 2782 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base(); 2783 bool method_unlock_needed = METHOD->is_synchronized(); 2784 // We know the initial monitor was used for the method don't check that 2785 // slot in the loop 2786 if (method_unlock_needed) base--; 2787 2788 // Check all the monitors to see they are unlocked. Install exception if found to be locked. 2789 while (end < base) { 2790 oop lockee = end->obj(); 2791 if (lockee != NULL) { 2792 BasicLock* lock = end->lock(); 2793 markOop header = lock->displaced_header(); 2794 end->set_obj(NULL); 2795 2796 if (!lockee->mark()->has_bias_pattern()) { 2797 // If it isn't recursive we either must swap old header or call the runtime 2798 if (header != NULL) { 2799 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { 2800 // restore object for the slow case 2801 end->set_obj(lockee); 2802 { 2803 // Prevent any HandleMarkCleaner from freeing our live handles 2804 HandleMark __hm(THREAD); 2805 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end)); 2806 } 2807 } 2808 } 2809 } 2810 // One error is plenty 2811 if (illegal_state_oop() == NULL && !suppress_error) { 2812 { 2813 // Prevent any HandleMarkCleaner from freeing our live handles 2814 HandleMark __hm(THREAD); 2815 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 2816 } 2817 assert(THREAD->has_pending_exception(), "Lost our exception!"); 2818 illegal_state_oop = THREAD->pending_exception(); 2819 THREAD->clear_pending_exception(); 2820 } 2821 } 2822 end++; 2823 } 2824 // Unlock the method if needed 2825 if (method_unlock_needed) { 2826 if (base->obj() == NULL) { 2827 // The method is already unlocked this is not good. 2828 if (illegal_state_oop() == NULL && !suppress_error) { 2829 { 2830 // Prevent any HandleMarkCleaner from freeing our live handles 2831 HandleMark __hm(THREAD); 2832 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 2833 } 2834 assert(THREAD->has_pending_exception(), "Lost our exception!"); 2835 illegal_state_oop = THREAD->pending_exception(); 2836 THREAD->clear_pending_exception(); 2837 } 2838 } else { 2839 // 2840 // The initial monitor is always used for the method 2841 // However if that slot is no longer the oop for the method it was unlocked 2842 // and reused by something that wasn't unlocked! 2843 // 2844 // deopt can come in with rcvr dead because c2 knows 2845 // its value is preserved in the monitor. So we can't use locals[0] at all 2846 // and must use first monitor slot. 2847 // 2848 oop rcvr = base->obj(); 2849 if (rcvr == NULL) { 2850 if (!suppress_error) { 2851 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), ""); 2852 illegal_state_oop = THREAD->pending_exception(); 2853 THREAD->clear_pending_exception(); 2854 } 2855 } else if (UseHeavyMonitors) { 2856 { 2857 // Prevent any HandleMarkCleaner from freeing our live handles. 2858 HandleMark __hm(THREAD); 2859 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 2860 } 2861 if (THREAD->has_pending_exception()) { 2862 if (!suppress_error) illegal_state_oop = THREAD->pending_exception(); 2863 THREAD->clear_pending_exception(); 2864 } 2865 } else { 2866 BasicLock* lock = base->lock(); 2867 markOop header = lock->displaced_header(); 2868 base->set_obj(NULL); 2869 2870 if (!rcvr->mark()->has_bias_pattern()) { 2871 base->set_obj(NULL); 2872 // If it isn't recursive we either must swap old header or call the runtime 2873 if (header != NULL) { 2874 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) { 2875 // restore object for the slow case 2876 base->set_obj(rcvr); 2877 { 2878 // Prevent any HandleMarkCleaner from freeing our live handles 2879 HandleMark __hm(THREAD); 2880 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 2881 } 2882 if (THREAD->has_pending_exception()) { 2883 if (!suppress_error) illegal_state_oop = THREAD->pending_exception(); 2884 THREAD->clear_pending_exception(); 2885 } 2886 } 2887 } 2888 } 2889 } 2890 } 2891 } 2892 } 2893 // Clear the do_not_unlock flag now. 2894 THREAD->clr_do_not_unlock(); 2895 2896 // 2897 // Notify jvmti/jvmdi 2898 // 2899 // NOTE: we do not notify a method_exit if we have a pending exception, 2900 // including an exception we generate for unlocking checks. In the former 2901 // case, JVMDI has already been notified by our call for the exception handler 2902 // and in both cases as far as JVMDI is concerned we have already returned. 2903 // If we notify it again JVMDI will be all confused about how many frames 2904 // are still on the stack (4340444). 2905 // 2906 // NOTE Further! It turns out the the JVMTI spec in fact expects to see 2907 // method_exit events whenever we leave an activation unless it was done 2908 // for popframe. This is nothing like jvmdi. However we are passing the 2909 // tests at the moment (apparently because they are jvmdi based) so rather 2910 // than change this code and possibly fail tests we will leave it alone 2911 // (with this note) in anticipation of changing the vm and the tests 2912 // simultaneously. 2913 2914 2915 // 2916 suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL; 2917 2918 2919 2920 #ifdef VM_JVMTI 2921 if (_jvmti_interp_events) { 2922 // Whenever JVMTI puts a thread in interp_only_mode, method 2923 // entry/exit events are sent for that thread to track stack depth. 2924 if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) { 2925 { 2926 // Prevent any HandleMarkCleaner from freeing our live handles 2927 HandleMark __hm(THREAD); 2928 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD)); 2929 } 2930 } 2931 } 2932 #endif /* VM_JVMTI */ 2933 2934 // 2935 // See if we are returning any exception 2936 // A pending exception that was pending prior to a possible popping frame 2937 // overrides the popping frame. 2938 // 2939 assert(!suppress_error || suppress_error && illegal_state_oop() == NULL, "Error was not suppressed"); 2940 if (illegal_state_oop() != NULL || original_exception() != NULL) { 2941 // inform the frame manager we have no result 2942 istate->set_msg(throwing_exception); 2943 if (illegal_state_oop() != NULL) 2944 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0); 2945 else 2946 THREAD->set_pending_exception(original_exception(), NULL, 0); 2947 istate->set_return_kind((Bytecodes::Code)opcode); 2948 UPDATE_PC_AND_RETURN(0); 2949 } 2950 2951 if (istate->msg() == popping_frame) { 2952 // Make it simpler on the assembly code and set the message for the frame pop. 2953 // returns 2954 if (istate->prev() == NULL) { 2955 // We must be returning to a deoptimized frame (because popframe only happens between 2956 // two interpreted frames). We need to save the current arguments in C heap so that 2957 // the deoptimized frame when it restarts can copy the arguments to its expression 2958 // stack and re-execute the call. We also have to notify deoptimization that this 2959 // has occurred and to pick the preserved args copy them to the deoptimized frame's 2960 // java expression stack. Yuck. 2961 // 2962 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize), 2963 LOCALS_SLOT(METHOD->size_of_parameters() - 1)); 2964 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit); 2965 } 2966 THREAD->clr_pop_frame_in_process(); 2967 } 2968 2969 // Normal return 2970 // Advance the pc and return to frame manager 2971 istate->set_msg(return_from_method); 2972 istate->set_return_kind((Bytecodes::Code)opcode); 2973 UPDATE_PC_AND_RETURN(1); 2974 } /* handle_return: */ 2975 2976 // This is really a fatal error return 2977 2978 finish: 2979 DECACHE_TOS(); 2980 DECACHE_PC(); 2981 2982 return; 2983 } 2984 2985 /* 2986 * All the code following this point is only produced once and is not present 2987 * in the JVMTI version of the interpreter 2988 */ 2989 2990 #ifndef VM_JVMTI 2991 2992 // This constructor should only be used to contruct the object to signal 2993 // interpreter initialization. All other instances should be created by 2994 // the frame manager. 2995 BytecodeInterpreter::BytecodeInterpreter(messages msg) { 2996 if (msg != initialize) ShouldNotReachHere(); 2997 _msg = msg; 2998 _self_link = this; 2999 _prev_link = NULL; 3000 } 3001 3002 // Inline static functions for Java Stack and Local manipulation 3003 3004 // The implementations are platform dependent. We have to worry about alignment 3005 // issues on some machines which can change on the same platform depending on 3006 // whether it is an LP64 machine also. 3007 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) { 3008 return (address) tos[Interpreter::expr_index_at(-offset)]; 3009 } 3010 3011 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) { 3012 return *((jint*) &tos[Interpreter::expr_index_at(-offset)]); 3013 } 3014 3015 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) { 3016 return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]); 3017 } 3018 3019 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) { 3020 return (oop)tos [Interpreter::expr_index_at(-offset)]; 3021 } 3022 3023 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) { 3024 return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d; 3025 } 3026 3027 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) { 3028 return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l; 3029 } 3030 3031 // only used for value types 3032 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value, 3033 int offset) { 3034 *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3035 } 3036 3037 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value, 3038 int offset) { 3039 *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3040 } 3041 3042 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value, 3043 int offset) { 3044 *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3045 } 3046 3047 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value, 3048 int offset) { 3049 *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3050 } 3051 3052 // needs to be platform dep for the 32 bit platforms. 3053 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value, 3054 int offset) { 3055 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value; 3056 } 3057 3058 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos, 3059 address addr, int offset) { 3060 (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = 3061 ((VMJavaVal64*)addr)->d); 3062 } 3063 3064 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value, 3065 int offset) { 3066 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3067 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value; 3068 } 3069 3070 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos, 3071 address addr, int offset) { 3072 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3073 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = 3074 ((VMJavaVal64*)addr)->l; 3075 } 3076 3077 // Locals 3078 3079 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) { 3080 return (address)locals[Interpreter::local_index_at(-offset)]; 3081 } 3082 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) { 3083 return (jint)locals[Interpreter::local_index_at(-offset)]; 3084 } 3085 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) { 3086 return (jfloat)locals[Interpreter::local_index_at(-offset)]; 3087 } 3088 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) { 3089 return (oop)locals[Interpreter::local_index_at(-offset)]; 3090 } 3091 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) { 3092 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d; 3093 } 3094 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) { 3095 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l; 3096 } 3097 3098 // Returns the address of locals value. 3099 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) { 3100 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3101 } 3102 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) { 3103 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3104 } 3105 3106 // Used for local value or returnAddress 3107 void BytecodeInterpreter::set_locals_slot(intptr_t *locals, 3108 address value, int offset) { 3109 *((address*)&locals[Interpreter::local_index_at(-offset)]) = value; 3110 } 3111 void BytecodeInterpreter::set_locals_int(intptr_t *locals, 3112 jint value, int offset) { 3113 *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value; 3114 } 3115 void BytecodeInterpreter::set_locals_float(intptr_t *locals, 3116 jfloat value, int offset) { 3117 *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value; 3118 } 3119 void BytecodeInterpreter::set_locals_object(intptr_t *locals, 3120 oop value, int offset) { 3121 *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value; 3122 } 3123 void BytecodeInterpreter::set_locals_double(intptr_t *locals, 3124 jdouble value, int offset) { 3125 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value; 3126 } 3127 void BytecodeInterpreter::set_locals_long(intptr_t *locals, 3128 jlong value, int offset) { 3129 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value; 3130 } 3131 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals, 3132 address addr, int offset) { 3133 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d; 3134 } 3135 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals, 3136 address addr, int offset) { 3137 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l; 3138 } 3139 3140 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset, 3141 intptr_t* locals, int locals_offset) { 3142 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)]; 3143 locals[Interpreter::local_index_at(-locals_offset)] = value; 3144 } 3145 3146 3147 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset, 3148 int to_offset) { 3149 tos[Interpreter::expr_index_at(-to_offset)] = 3150 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)]; 3151 } 3152 3153 void BytecodeInterpreter::dup(intptr_t *tos) { 3154 copy_stack_slot(tos, -1, 0); 3155 } 3156 void BytecodeInterpreter::dup2(intptr_t *tos) { 3157 copy_stack_slot(tos, -2, 0); 3158 copy_stack_slot(tos, -1, 1); 3159 } 3160 3161 void BytecodeInterpreter::dup_x1(intptr_t *tos) { 3162 /* insert top word two down */ 3163 copy_stack_slot(tos, -1, 0); 3164 copy_stack_slot(tos, -2, -1); 3165 copy_stack_slot(tos, 0, -2); 3166 } 3167 3168 void BytecodeInterpreter::dup_x2(intptr_t *tos) { 3169 /* insert top word three down */ 3170 copy_stack_slot(tos, -1, 0); 3171 copy_stack_slot(tos, -2, -1); 3172 copy_stack_slot(tos, -3, -2); 3173 copy_stack_slot(tos, 0, -3); 3174 } 3175 void BytecodeInterpreter::dup2_x1(intptr_t *tos) { 3176 /* insert top 2 slots three down */ 3177 copy_stack_slot(tos, -1, 1); 3178 copy_stack_slot(tos, -2, 0); 3179 copy_stack_slot(tos, -3, -1); 3180 copy_stack_slot(tos, 1, -2); 3181 copy_stack_slot(tos, 0, -3); 3182 } 3183 void BytecodeInterpreter::dup2_x2(intptr_t *tos) { 3184 /* insert top 2 slots four down */ 3185 copy_stack_slot(tos, -1, 1); 3186 copy_stack_slot(tos, -2, 0); 3187 copy_stack_slot(tos, -3, -1); 3188 copy_stack_slot(tos, -4, -2); 3189 copy_stack_slot(tos, 1, -3); 3190 copy_stack_slot(tos, 0, -4); 3191 } 3192 3193 3194 void BytecodeInterpreter::swap(intptr_t *tos) { 3195 // swap top two elements 3196 intptr_t val = tos[Interpreter::expr_index_at(1)]; 3197 // Copy -2 entry to -1 3198 copy_stack_slot(tos, -2, -1); 3199 // Store saved -1 entry into -2 3200 tos[Interpreter::expr_index_at(2)] = val; 3201 } 3202 // -------------------------------------------------------------------------------- 3203 // Non-product code 3204 #ifndef PRODUCT 3205 3206 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) { 3207 switch (msg) { 3208 case BytecodeInterpreter::no_request: return("no_request"); 3209 case BytecodeInterpreter::initialize: return("initialize"); 3210 // status message to C++ interpreter 3211 case BytecodeInterpreter::method_entry: return("method_entry"); 3212 case BytecodeInterpreter::method_resume: return("method_resume"); 3213 case BytecodeInterpreter::got_monitors: return("got_monitors"); 3214 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception"); 3215 // requests to frame manager from C++ interpreter 3216 case BytecodeInterpreter::call_method: return("call_method"); 3217 case BytecodeInterpreter::return_from_method: return("return_from_method"); 3218 case BytecodeInterpreter::more_monitors: return("more_monitors"); 3219 case BytecodeInterpreter::throwing_exception: return("throwing_exception"); 3220 case BytecodeInterpreter::popping_frame: return("popping_frame"); 3221 case BytecodeInterpreter::do_osr: return("do_osr"); 3222 // deopt 3223 case BytecodeInterpreter::deopt_resume: return("deopt_resume"); 3224 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2"); 3225 default: return("BAD MSG"); 3226 } 3227 } 3228 void 3229 BytecodeInterpreter::print() { 3230 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread); 3231 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp); 3232 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals); 3233 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants); 3234 { 3235 ResourceMark rm; 3236 char *method_name = _method->name_and_sig_as_C_string(); 3237 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name); 3238 } 3239 tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx); 3240 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack); 3241 tty->print_cr("msg: %s", C_msg(this->_msg)); 3242 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee); 3243 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point); 3244 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance); 3245 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf); 3246 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry); 3247 tty->print_cr("result_return_kind 0x%x ", (int) this->_result._return_kind); 3248 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link); 3249 tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) this->_oop_temp); 3250 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base); 3251 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit); 3252 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base); 3253 #ifdef SPARC 3254 tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc); 3255 tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom); 3256 tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult); 3257 tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult); 3258 #endif 3259 #if !defined(ZERO) 3260 tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp); 3261 #endif // !ZERO 3262 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link); 3263 } 3264 3265 extern "C" { 3266 void PI(uintptr_t arg) { 3267 ((BytecodeInterpreter*)arg)->print(); 3268 } 3269 } 3270 #endif // PRODUCT 3271 3272 #endif // JVMTI 3273 #endif // CC_INTERP