1 /* 2 * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "classfile/vmSymbols.hpp" 27 #include "gc_interface/collectedHeap.hpp" 28 #include "interpreter/bytecodeHistogram.hpp" 29 #include "interpreter/bytecodeInterpreter.hpp" 30 #include "interpreter/bytecodeInterpreter.inline.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "interpreter/interpreterRuntime.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "oops/methodCounters.hpp" 35 #include "oops/objArrayKlass.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "prims/jvmtiThreadState.hpp" 39 #include "runtime/biasedLocking.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/handles.inline.hpp" 42 #include "runtime/interfaceSupport.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "runtime/threadCritical.hpp" 45 #include "utilities/exceptions.hpp" 46 #ifdef TARGET_OS_ARCH_linux_x86 47 # include "orderAccess_linux_x86.inline.hpp" 48 #endif 49 #ifdef TARGET_OS_ARCH_linux_sparc 50 # include "orderAccess_linux_sparc.inline.hpp" 51 #endif 52 #ifdef TARGET_OS_ARCH_linux_zero 53 # include "orderAccess_linux_zero.inline.hpp" 54 #endif 55 #ifdef TARGET_OS_ARCH_solaris_x86 56 # include "orderAccess_solaris_x86.inline.hpp" 57 #endif 58 #ifdef TARGET_OS_ARCH_solaris_sparc 59 # include "orderAccess_solaris_sparc.inline.hpp" 60 #endif 61 #ifdef TARGET_OS_ARCH_windows_x86 62 # include "orderAccess_windows_x86.inline.hpp" 63 #endif 64 #ifdef TARGET_OS_ARCH_linux_arm 65 # include "orderAccess_linux_arm.inline.hpp" 66 #endif 67 #ifdef TARGET_OS_ARCH_linux_ppc 68 # include "orderAccess_linux_ppc.inline.hpp" 69 #endif 70 #ifdef TARGET_OS_ARCH_aix_ppc 71 # include "orderAccess_aix_ppc.inline.hpp" 72 #endif 73 #ifdef TARGET_OS_ARCH_bsd_x86 74 # include "orderAccess_bsd_x86.inline.hpp" 75 #endif 76 #ifdef TARGET_OS_ARCH_bsd_zero 77 # include "orderAccess_bsd_zero.inline.hpp" 78 #endif 79 80 81 // no precompiled headers 82 #ifdef CC_INTERP 83 84 /* 85 * USELABELS - If using GCC, then use labels for the opcode dispatching 86 * rather -then a switch statement. This improves performance because it 87 * gives us the oportunity to have the instructions that calculate the 88 * next opcode to jump to be intermixed with the rest of the instructions 89 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro). 90 */ 91 #undef USELABELS 92 #ifdef __GNUC__ 93 /* 94 ASSERT signifies debugging. It is much easier to step thru bytecodes if we 95 don't use the computed goto approach. 96 */ 97 #ifndef ASSERT 98 #define USELABELS 99 #endif 100 #endif 101 102 #undef CASE 103 #ifdef USELABELS 104 #define CASE(opcode) opc ## opcode 105 #define DEFAULT opc_default 106 #else 107 #define CASE(opcode) case Bytecodes:: opcode 108 #define DEFAULT default 109 #endif 110 111 /* 112 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next 113 * opcode before going back to the top of the while loop, rather then having 114 * the top of the while loop handle it. This provides a better opportunity 115 * for instruction scheduling. Some compilers just do this prefetch 116 * automatically. Some actually end up with worse performance if you 117 * force the prefetch. Solaris gcc seems to do better, but cc does worse. 118 */ 119 #undef PREFETCH_OPCCODE 120 #define PREFETCH_OPCCODE 121 122 /* 123 Interpreter safepoint: it is expected that the interpreter will have no live 124 handles of its own creation live at an interpreter safepoint. Therefore we 125 run a HandleMarkCleaner and trash all handles allocated in the call chain 126 since the JavaCalls::call_helper invocation that initiated the chain. 127 There really shouldn't be any handles remaining to trash but this is cheap 128 in relation to a safepoint. 129 */ 130 #define SAFEPOINT \ 131 if ( SafepointSynchronize::is_synchronizing()) { \ 132 { \ 133 /* zap freed handles rather than GC'ing them */ \ 134 HandleMarkCleaner __hmc(THREAD); \ 135 } \ 136 CALL_VM(SafepointSynchronize::block(THREAD), handle_exception); \ 137 } 138 139 /* 140 * VM_JAVA_ERROR - Macro for throwing a java exception from 141 * the interpreter loop. Should really be a CALL_VM but there 142 * is no entry point to do the transition to vm so we just 143 * do it by hand here. 144 */ 145 #define VM_JAVA_ERROR_NO_JUMP(name, msg) \ 146 DECACHE_STATE(); \ 147 SET_LAST_JAVA_FRAME(); \ 148 { \ 149 ThreadInVMfromJava trans(THREAD); \ 150 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \ 151 } \ 152 RESET_LAST_JAVA_FRAME(); \ 153 CACHE_STATE(); 154 155 // Normal throw of a java error 156 #define VM_JAVA_ERROR(name, msg) \ 157 VM_JAVA_ERROR_NO_JUMP(name, msg) \ 158 goto handle_exception; 159 160 #ifdef PRODUCT 161 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) 162 #else 163 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \ 164 { \ 165 BytecodeCounter::_counter_value++; \ 166 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \ 167 if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \ 168 if (TraceBytecodes) { \ 169 CALL_VM((void)SharedRuntime::trace_bytecode(THREAD, 0, \ 170 topOfStack[Interpreter::expr_index_at(1)], \ 171 topOfStack[Interpreter::expr_index_at(2)]), \ 172 handle_exception); \ 173 } \ 174 } 175 #endif 176 177 #undef DEBUGGER_SINGLE_STEP_NOTIFY 178 #ifdef VM_JVMTI 179 /* NOTE: (kbr) This macro must be called AFTER the PC has been 180 incremented. JvmtiExport::at_single_stepping_point() may cause a 181 breakpoint opcode to get inserted at the current PC to allow the 182 debugger to coalesce single-step events. 183 184 As a result if we call at_single_stepping_point() we refetch opcode 185 to get the current opcode. This will override any other prefetching 186 that might have occurred. 187 */ 188 #define DEBUGGER_SINGLE_STEP_NOTIFY() \ 189 { \ 190 if (_jvmti_interp_events) { \ 191 if (JvmtiExport::should_post_single_step()) { \ 192 DECACHE_STATE(); \ 193 SET_LAST_JAVA_FRAME(); \ 194 ThreadInVMfromJava trans(THREAD); \ 195 JvmtiExport::at_single_stepping_point(THREAD, \ 196 istate->method(), \ 197 pc); \ 198 RESET_LAST_JAVA_FRAME(); \ 199 CACHE_STATE(); \ 200 if (THREAD->pop_frame_pending() && \ 201 !THREAD->pop_frame_in_process()) { \ 202 goto handle_Pop_Frame; \ 203 } \ 204 if (THREAD->jvmti_thread_state() && \ 205 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ 206 goto handle_Early_Return; \ 207 } \ 208 opcode = *pc; \ 209 } \ 210 } \ 211 } 212 #else 213 #define DEBUGGER_SINGLE_STEP_NOTIFY() 214 #endif 215 216 /* 217 * CONTINUE - Macro for executing the next opcode. 218 */ 219 #undef CONTINUE 220 #ifdef USELABELS 221 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an 222 // initialization (which is is the initialization of the table pointer...) 223 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode] 224 #define CONTINUE { \ 225 opcode = *pc; \ 226 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 227 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 228 DISPATCH(opcode); \ 229 } 230 #else 231 #ifdef PREFETCH_OPCCODE 232 #define CONTINUE { \ 233 opcode = *pc; \ 234 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 235 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 236 continue; \ 237 } 238 #else 239 #define CONTINUE { \ 240 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 241 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 242 continue; \ 243 } 244 #endif 245 #endif 246 247 248 #define UPDATE_PC(opsize) {pc += opsize; } 249 /* 250 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack. 251 */ 252 #undef UPDATE_PC_AND_TOS 253 #define UPDATE_PC_AND_TOS(opsize, stack) \ 254 {pc += opsize; MORE_STACK(stack); } 255 256 /* 257 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack, 258 * and executing the next opcode. It's somewhat similar to the combination 259 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations. 260 */ 261 #undef UPDATE_PC_AND_TOS_AND_CONTINUE 262 #ifdef USELABELS 263 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 264 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 265 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 266 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 267 DISPATCH(opcode); \ 268 } 269 270 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 271 pc += opsize; opcode = *pc; \ 272 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 273 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 274 DISPATCH(opcode); \ 275 } 276 #else 277 #ifdef PREFETCH_OPCCODE 278 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 279 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 280 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 281 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 282 goto do_continue; \ 283 } 284 285 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 286 pc += opsize; opcode = *pc; \ 287 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 288 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 289 goto do_continue; \ 290 } 291 #else 292 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 293 pc += opsize; MORE_STACK(stack); \ 294 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 295 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 296 goto do_continue; \ 297 } 298 299 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 300 pc += opsize; \ 301 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 302 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 303 goto do_continue; \ 304 } 305 #endif /* PREFETCH_OPCCODE */ 306 #endif /* USELABELS */ 307 308 // About to call a new method, update the save the adjusted pc and return to frame manager 309 #define UPDATE_PC_AND_RETURN(opsize) \ 310 DECACHE_TOS(); \ 311 istate->set_bcp(pc+opsize); \ 312 return; 313 314 315 #define METHOD istate->method() 316 #define GET_METHOD_COUNTERS(res) \ 317 res = METHOD->method_counters(); \ 318 if (res == NULL) { \ 319 CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \ 320 } 321 322 #define OSR_REQUEST(res, branch_pc) \ 323 CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception); 324 /* 325 * For those opcodes that need to have a GC point on a backwards branch 326 */ 327 328 // Backedge counting is kind of strange. The asm interpreter will increment 329 // the backedge counter as a separate counter but it does it's comparisons 330 // to the sum (scaled) of invocation counter and backedge count to make 331 // a decision. Seems kind of odd to sum them together like that 332 333 // skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp 334 335 336 #define DO_BACKEDGE_CHECKS(skip, branch_pc) \ 337 if ((skip) <= 0) { \ 338 MethodCounters* mcs; \ 339 GET_METHOD_COUNTERS(mcs); \ 340 if (UseLoopCounter) { \ 341 bool do_OSR = UseOnStackReplacement; \ 342 mcs->backedge_counter()->increment(); \ 343 if (do_OSR) do_OSR = mcs->backedge_counter()->reached_InvocationLimit(); \ 344 if (do_OSR) { \ 345 nmethod* osr_nmethod; \ 346 OSR_REQUEST(osr_nmethod, branch_pc); \ 347 if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) { \ 348 intptr_t* buf; \ 349 /* Call OSR migration with last java frame only, no checks. */ \ 350 CALL_VM_NAKED_LJF(buf=SharedRuntime::OSR_migration_begin(THREAD)); \ 351 istate->set_msg(do_osr); \ 352 istate->set_osr_buf((address)buf); \ 353 istate->set_osr_entry(osr_nmethod->osr_entry()); \ 354 return; \ 355 } \ 356 } \ 357 } /* UseCompiler ... */ \ 358 mcs->invocation_counter()->increment(); \ 359 SAFEPOINT; \ 360 } 361 362 /* 363 * For those opcodes that need to have a GC point on a backwards branch 364 */ 365 366 /* 367 * Macros for caching and flushing the interpreter state. Some local 368 * variables need to be flushed out to the frame before we do certain 369 * things (like pushing frames or becomming gc safe) and some need to 370 * be recached later (like after popping a frame). We could use one 371 * macro to cache or decache everything, but this would be less then 372 * optimal because we don't always need to cache or decache everything 373 * because some things we know are already cached or decached. 374 */ 375 #undef DECACHE_TOS 376 #undef CACHE_TOS 377 #undef CACHE_PREV_TOS 378 #define DECACHE_TOS() istate->set_stack(topOfStack); 379 380 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack(); 381 382 #undef DECACHE_PC 383 #undef CACHE_PC 384 #define DECACHE_PC() istate->set_bcp(pc); 385 #define CACHE_PC() pc = istate->bcp(); 386 #define CACHE_CP() cp = istate->constants(); 387 #define CACHE_LOCALS() locals = istate->locals(); 388 #undef CACHE_FRAME 389 #define CACHE_FRAME() 390 391 /* 392 * CHECK_NULL - Macro for throwing a NullPointerException if the object 393 * passed is a null ref. 394 * On some architectures/platforms it should be possible to do this implicitly 395 */ 396 #undef CHECK_NULL 397 #define CHECK_NULL(obj_) \ 398 if ((obj_) == NULL) { \ 399 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), ""); \ 400 } \ 401 VERIFY_OOP(obj_) 402 403 #define VMdoubleConstZero() 0.0 404 #define VMdoubleConstOne() 1.0 405 #define VMlongConstZero() (max_jlong-max_jlong) 406 #define VMlongConstOne() ((max_jlong-max_jlong)+1) 407 408 /* 409 * Alignment 410 */ 411 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3) 412 413 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod) 414 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS(); 415 416 // Reload interpreter state after calling the VM or a possible GC 417 #define CACHE_STATE() \ 418 CACHE_TOS(); \ 419 CACHE_PC(); \ 420 CACHE_CP(); \ 421 CACHE_LOCALS(); 422 423 // Call the VM with last java frame only. 424 #define CALL_VM_NAKED_LJF(func) \ 425 DECACHE_STATE(); \ 426 SET_LAST_JAVA_FRAME(); \ 427 func; \ 428 RESET_LAST_JAVA_FRAME(); \ 429 CACHE_STATE(); 430 431 // Call the VM. Don't check for pending exceptions. 432 #define CALL_VM_NOCHECK(func) \ 433 CALL_VM_NAKED_LJF(func) \ 434 if (THREAD->pop_frame_pending() && \ 435 !THREAD->pop_frame_in_process()) { \ 436 goto handle_Pop_Frame; \ 437 } \ 438 if (THREAD->jvmti_thread_state() && \ 439 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ 440 goto handle_Early_Return; \ 441 } 442 443 // Call the VM and check for pending exceptions 444 #define CALL_VM(func, label) { \ 445 CALL_VM_NOCHECK(func); \ 446 if (THREAD->has_pending_exception()) goto label; \ 447 } 448 449 /* 450 * BytecodeInterpreter::run(interpreterState istate) 451 * BytecodeInterpreter::runWithChecks(interpreterState istate) 452 * 453 * The real deal. This is where byte codes actually get interpreted. 454 * Basically it's a big while loop that iterates until we return from 455 * the method passed in. 456 * 457 * The runWithChecks is used if JVMTI is enabled. 458 * 459 */ 460 #if defined(VM_JVMTI) 461 void 462 BytecodeInterpreter::runWithChecks(interpreterState istate) { 463 #else 464 void 465 BytecodeInterpreter::run(interpreterState istate) { 466 #endif 467 468 // In order to simplify some tests based on switches set at runtime 469 // we invoke the interpreter a single time after switches are enabled 470 // and set simpler to to test variables rather than method calls or complex 471 // boolean expressions. 472 473 static int initialized = 0; 474 static int checkit = 0; 475 static intptr_t* c_addr = NULL; 476 static intptr_t c_value; 477 478 if (checkit && *c_addr != c_value) { 479 os::breakpoint(); 480 } 481 #ifdef VM_JVMTI 482 static bool _jvmti_interp_events = 0; 483 #endif 484 485 static int _compiling; // (UseCompiler || CountCompiledCalls) 486 487 #ifdef ASSERT 488 if (istate->_msg != initialize) { 489 // We have a problem here if we are running with a pre-hsx24 JDK (for example during bootstrap) 490 // because in that case, EnableInvokeDynamic is true by default but will be later switched off 491 // if java_lang_invoke_MethodHandle::compute_offsets() detects that the JDK only has the classes 492 // for the old JSR292 implementation. 493 // This leads to a situation where 'istate->_stack_limit' always accounts for 494 // methodOopDesc::extra_stack_entries() because it is computed in 495 // CppInterpreterGenerator::generate_compute_interpreter_state() which was generated while 496 // EnableInvokeDynamic was still true. On the other hand, istate->_method->max_stack() doesn't 497 // account for extra_stack_entries() anymore because at the time when it is called 498 // EnableInvokeDynamic was already set to false. 499 // So we have a second version of the assertion which handles the case where EnableInvokeDynamic was 500 // switched off because of the wrong classes. 501 if (EnableInvokeDynamic || FLAG_IS_CMDLINE(EnableInvokeDynamic)) { 502 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit"); 503 } else { 504 const int extra_stack_entries = Method::extra_stack_entries_for_jsr292; 505 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + extra_stack_entries 506 + 1), "bad stack limit"); 507 } 508 #ifndef SHARK 509 IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong")); 510 #endif // !SHARK 511 } 512 // Verify linkages. 513 interpreterState l = istate; 514 do { 515 assert(l == l->_self_link, "bad link"); 516 l = l->_prev_link; 517 } while (l != NULL); 518 // Screwups with stack management usually cause us to overwrite istate 519 // save a copy so we can verify it. 520 interpreterState orig = istate; 521 #endif 522 523 register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */ 524 register address pc = istate->bcp(); 525 register jubyte opcode; 526 register intptr_t* locals = istate->locals(); 527 register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache() 528 #ifdef LOTS_OF_REGS 529 register JavaThread* THREAD = istate->thread(); 530 #else 531 #undef THREAD 532 #define THREAD istate->thread() 533 #endif 534 535 #ifdef USELABELS 536 const static void* const opclabels_data[256] = { 537 /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0, 538 /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4, 539 /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0, 540 /* 0x0C */ &&opc_fconst_1,&&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1, 541 542 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w, 543 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload, 544 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0,&&opc_iload_1, 545 /* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1, 546 547 /* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1, 548 /* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1, 549 /* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1, 550 /* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload, 551 552 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload, 553 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore, 554 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0, 555 /* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0, 556 557 /* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0, 558 /* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0, 559 /* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0, 560 /* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore, 561 562 /* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore, 563 /* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop, 564 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2, 565 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap, 566 567 /* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd, 568 /* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub, 569 /* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul, 570 /* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv, 571 572 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem, 573 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg, 574 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr, 575 /* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land, 576 577 /* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor, 578 /* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d, 579 /* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i, 580 /* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l, 581 582 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s, 583 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl, 584 /* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt, 585 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq, 586 587 /* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge, &&opc_if_icmpgt, 588 /* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne, &&opc_goto, 589 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch,&&opc_lookupswitch, 590 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn, 591 592 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic, 593 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial, 594 /* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new, 595 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow, 596 597 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit, 598 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull, 599 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_default, 600 /* 0xCC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 601 602 /* 0xD0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 603 /* 0xD4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 604 /* 0xD8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 605 /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 606 607 /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 608 /* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer, 609 /* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default, 610 /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 611 612 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 613 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 614 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 615 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default 616 }; 617 register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0]; 618 #endif /* USELABELS */ 619 620 #ifdef ASSERT 621 // this will trigger a VERIFY_OOP on entry 622 if (istate->msg() != initialize && ! METHOD->is_static()) { 623 oop rcvr = LOCALS_OBJECT(0); 624 VERIFY_OOP(rcvr); 625 } 626 #endif 627 // #define HACK 628 #ifdef HACK 629 bool interesting = false; 630 #endif // HACK 631 632 /* QQQ this should be a stack method so we don't know actual direction */ 633 guarantee(istate->msg() == initialize || 634 topOfStack >= istate->stack_limit() && 635 topOfStack < istate->stack_base(), 636 "Stack top out of range"); 637 638 switch (istate->msg()) { 639 case initialize: { 640 if (initialized++) ShouldNotReachHere(); // Only one initialize call 641 _compiling = (UseCompiler || CountCompiledCalls); 642 #ifdef VM_JVMTI 643 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events(); 644 #endif 645 return; 646 } 647 break; 648 case method_entry: { 649 THREAD->set_do_not_unlock(); 650 // count invocations 651 assert(initialized, "Interpreter not initialized"); 652 if (_compiling) { 653 MethodCounters* mcs; 654 GET_METHOD_COUNTERS(mcs); 655 if (ProfileInterpreter) { 656 METHOD->increment_interpreter_invocation_count(THREAD); 657 } 658 mcs->invocation_counter()->increment(); 659 if (mcs->invocation_counter()->reached_InvocationLimit()) { 660 CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception); 661 662 // We no longer retry on a counter overflow 663 664 // istate->set_msg(retry_method); 665 // THREAD->clr_do_not_unlock(); 666 // return; 667 } 668 SAFEPOINT; 669 } 670 671 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 672 // initialize 673 os::breakpoint(); 674 } 675 676 #ifdef HACK 677 { 678 ResourceMark rm; 679 char *method_name = istate->method()->name_and_sig_as_C_string(); 680 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { 681 tty->print_cr("entering: depth %d bci: %d", 682 (istate->_stack_base - istate->_stack), 683 istate->_bcp - istate->_method->code_base()); 684 interesting = true; 685 } 686 } 687 #endif // HACK 688 689 690 // lock method if synchronized 691 if (METHOD->is_synchronized()) { 692 // oop rcvr = locals[0].j.r; 693 oop rcvr; 694 if (METHOD->is_static()) { 695 rcvr = METHOD->constants()->pool_holder()->java_mirror(); 696 } else { 697 rcvr = LOCALS_OBJECT(0); 698 VERIFY_OOP(rcvr); 699 } 700 // The initial monitor is ours for the taking 701 // Monitor not filled in frame manager any longer as this caused race condition with biased locking. 702 BasicObjectLock* mon = &istate->monitor_base()[-1]; 703 mon->set_obj(rcvr); 704 bool success = false; 705 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 706 markOop mark = rcvr->mark(); 707 intptr_t hash = (intptr_t) markOopDesc::no_hash; 708 // Implies UseBiasedLocking. 709 if (mark->has_bias_pattern()) { 710 uintptr_t thread_ident; 711 uintptr_t anticipated_bias_locking_value; 712 thread_ident = (uintptr_t)istate->thread(); 713 anticipated_bias_locking_value = 714 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 715 ~((uintptr_t) markOopDesc::age_mask_in_place); 716 717 if (anticipated_bias_locking_value == 0) { 718 // Already biased towards this thread, nothing to do. 719 if (PrintBiasedLockingStatistics) { 720 (* BiasedLocking::biased_lock_entry_count_addr())++; 721 } 722 success = true; 723 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 724 // Try to revoke bias. 725 markOop header = rcvr->klass()->prototype_header(); 726 if (hash != markOopDesc::no_hash) { 727 header = header->copy_set_hash(hash); 728 } 729 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) { 730 if (PrintBiasedLockingStatistics) 731 (*BiasedLocking::revoked_lock_entry_count_addr())++; 732 } 733 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) { 734 // Try to rebias. 735 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident); 736 if (hash != markOopDesc::no_hash) { 737 new_header = new_header->copy_set_hash(hash); 738 } 739 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) { 740 if (PrintBiasedLockingStatistics) { 741 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 742 } 743 } else { 744 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 745 } 746 success = true; 747 } else { 748 // Try to bias towards thread in case object is anonymously biased. 749 markOop header = (markOop) ((uintptr_t) mark & 750 ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 751 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); 752 if (hash != markOopDesc::no_hash) { 753 header = header->copy_set_hash(hash); 754 } 755 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 756 // Debugging hint. 757 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 758 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) { 759 if (PrintBiasedLockingStatistics) { 760 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 761 } 762 } else { 763 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 764 } 765 success = true; 766 } 767 } 768 769 // Traditional lightweight locking. 770 if (!success) { 771 markOop displaced = rcvr->mark()->set_unlocked(); 772 mon->lock()->set_displaced_header(displaced); 773 bool call_vm = UseHeavyMonitors; 774 if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) { 775 // Is it simple recursive case? 776 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 777 mon->lock()->set_displaced_header(NULL); 778 } else { 779 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 780 } 781 } 782 } 783 } 784 THREAD->clr_do_not_unlock(); 785 786 // Notify jvmti 787 #ifdef VM_JVMTI 788 if (_jvmti_interp_events) { 789 // Whenever JVMTI puts a thread in interp_only_mode, method 790 // entry/exit events are sent for that thread to track stack depth. 791 if (THREAD->is_interp_only_mode()) { 792 CALL_VM(InterpreterRuntime::post_method_entry(THREAD), 793 handle_exception); 794 } 795 } 796 #endif /* VM_JVMTI */ 797 798 goto run; 799 } 800 801 case popping_frame: { 802 // returned from a java call to pop the frame, restart the call 803 // clear the message so we don't confuse ourselves later 804 assert(THREAD->pop_frame_in_process(), "wrong frame pop state"); 805 istate->set_msg(no_request); 806 THREAD->clr_pop_frame_in_process(); 807 goto run; 808 } 809 810 case method_resume: { 811 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 812 // resume 813 os::breakpoint(); 814 } 815 #ifdef HACK 816 { 817 ResourceMark rm; 818 char *method_name = istate->method()->name_and_sig_as_C_string(); 819 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { 820 tty->print_cr("resume: depth %d bci: %d", 821 (istate->_stack_base - istate->_stack) , 822 istate->_bcp - istate->_method->code_base()); 823 interesting = true; 824 } 825 } 826 #endif // HACK 827 // returned from a java call, continue executing. 828 if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) { 829 goto handle_Pop_Frame; 830 } 831 if (THREAD->jvmti_thread_state() && 832 THREAD->jvmti_thread_state()->is_earlyret_pending()) { 833 goto handle_Early_Return; 834 } 835 836 if (THREAD->has_pending_exception()) goto handle_exception; 837 // Update the pc by the saved amount of the invoke bytecode size 838 UPDATE_PC(istate->bcp_advance()); 839 goto run; 840 } 841 842 case deopt_resume2: { 843 // Returned from an opcode that will reexecute. Deopt was 844 // a result of a PopFrame request. 845 // 846 goto run; 847 } 848 849 case deopt_resume: { 850 // Returned from an opcode that has completed. The stack has 851 // the result all we need to do is skip across the bytecode 852 // and continue (assuming there is no exception pending) 853 // 854 // compute continuation length 855 // 856 // Note: it is possible to deopt at a return_register_finalizer opcode 857 // because this requires entering the vm to do the registering. While the 858 // opcode is complete we can't advance because there are no more opcodes 859 // much like trying to deopt at a poll return. In that has we simply 860 // get out of here 861 // 862 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) { 863 // this will do the right thing even if an exception is pending. 864 goto handle_return; 865 } 866 UPDATE_PC(Bytecodes::length_at(METHOD, pc)); 867 if (THREAD->has_pending_exception()) goto handle_exception; 868 goto run; 869 } 870 case got_monitors: { 871 // continue locking now that we have a monitor to use 872 // we expect to find newly allocated monitor at the "top" of the monitor stack. 873 oop lockee = STACK_OBJECT(-1); 874 VERIFY_OOP(lockee); 875 // derefing's lockee ought to provoke implicit null check 876 // find a free monitor 877 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base(); 878 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor"); 879 entry->set_obj(lockee); 880 bool success = false; 881 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 882 883 markOop mark = lockee->mark(); 884 intptr_t hash = (intptr_t) markOopDesc::no_hash; 885 // implies UseBiasedLocking 886 if (mark->has_bias_pattern()) { 887 uintptr_t thread_ident; 888 uintptr_t anticipated_bias_locking_value; 889 thread_ident = (uintptr_t)istate->thread(); 890 anticipated_bias_locking_value = 891 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 892 ~((uintptr_t) markOopDesc::age_mask_in_place); 893 894 if (anticipated_bias_locking_value == 0) { 895 // already biased towards this thread, nothing to do 896 if (PrintBiasedLockingStatistics) { 897 (* BiasedLocking::biased_lock_entry_count_addr())++; 898 } 899 success = true; 900 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 901 // try revoke bias 902 markOop header = lockee->klass()->prototype_header(); 903 if (hash != markOopDesc::no_hash) { 904 header = header->copy_set_hash(hash); 905 } 906 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { 907 if (PrintBiasedLockingStatistics) { 908 (*BiasedLocking::revoked_lock_entry_count_addr())++; 909 } 910 } 911 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { 912 // try rebias 913 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); 914 if (hash != markOopDesc::no_hash) { 915 new_header = new_header->copy_set_hash(hash); 916 } 917 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { 918 if (PrintBiasedLockingStatistics) { 919 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 920 } 921 } else { 922 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 923 } 924 success = true; 925 } else { 926 // try to bias towards thread in case object is anonymously biased 927 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 928 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); 929 if (hash != markOopDesc::no_hash) { 930 header = header->copy_set_hash(hash); 931 } 932 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 933 // debugging hint 934 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 935 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { 936 if (PrintBiasedLockingStatistics) { 937 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 938 } 939 } else { 940 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 941 } 942 success = true; 943 } 944 } 945 946 // traditional lightweight locking 947 if (!success) { 948 markOop displaced = lockee->mark()->set_unlocked(); 949 entry->lock()->set_displaced_header(displaced); 950 bool call_vm = UseHeavyMonitors; 951 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { 952 // Is it simple recursive case? 953 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 954 entry->lock()->set_displaced_header(NULL); 955 } else { 956 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 957 } 958 } 959 } 960 UPDATE_PC_AND_TOS(1, -1); 961 goto run; 962 } 963 default: { 964 fatal("Unexpected message from frame manager"); 965 } 966 } 967 968 run: 969 970 DO_UPDATE_INSTRUCTION_COUNT(*pc) 971 DEBUGGER_SINGLE_STEP_NOTIFY(); 972 #ifdef PREFETCH_OPCCODE 973 opcode = *pc; /* prefetch first opcode */ 974 #endif 975 976 #ifndef USELABELS 977 while (1) 978 #endif 979 { 980 #ifndef PREFETCH_OPCCODE 981 opcode = *pc; 982 #endif 983 // Seems like this happens twice per opcode. At worst this is only 984 // need at entry to the loop. 985 // DEBUGGER_SINGLE_STEP_NOTIFY(); 986 /* Using this labels avoids double breakpoints when quickening and 987 * when returing from transition frames. 988 */ 989 opcode_switch: 990 assert(istate == orig, "Corrupted istate"); 991 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */ 992 assert(topOfStack >= istate->stack_limit(), "Stack overrun"); 993 assert(topOfStack < istate->stack_base(), "Stack underrun"); 994 995 #ifdef USELABELS 996 DISPATCH(opcode); 997 #else 998 switch (opcode) 999 #endif 1000 { 1001 CASE(_nop): 1002 UPDATE_PC_AND_CONTINUE(1); 1003 1004 /* Push miscellaneous constants onto the stack. */ 1005 1006 CASE(_aconst_null): 1007 SET_STACK_OBJECT(NULL, 0); 1008 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1009 1010 #undef OPC_CONST_n 1011 #define OPC_CONST_n(opcode, const_type, value) \ 1012 CASE(opcode): \ 1013 SET_STACK_ ## const_type(value, 0); \ 1014 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1015 1016 OPC_CONST_n(_iconst_m1, INT, -1); 1017 OPC_CONST_n(_iconst_0, INT, 0); 1018 OPC_CONST_n(_iconst_1, INT, 1); 1019 OPC_CONST_n(_iconst_2, INT, 2); 1020 OPC_CONST_n(_iconst_3, INT, 3); 1021 OPC_CONST_n(_iconst_4, INT, 4); 1022 OPC_CONST_n(_iconst_5, INT, 5); 1023 OPC_CONST_n(_fconst_0, FLOAT, 0.0); 1024 OPC_CONST_n(_fconst_1, FLOAT, 1.0); 1025 OPC_CONST_n(_fconst_2, FLOAT, 2.0); 1026 1027 #undef OPC_CONST2_n 1028 #define OPC_CONST2_n(opcname, value, key, kind) \ 1029 CASE(_##opcname): \ 1030 { \ 1031 SET_STACK_ ## kind(VM##key##Const##value(), 1); \ 1032 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1033 } 1034 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE); 1035 OPC_CONST2_n(dconst_1, One, double, DOUBLE); 1036 OPC_CONST2_n(lconst_0, Zero, long, LONG); 1037 OPC_CONST2_n(lconst_1, One, long, LONG); 1038 1039 /* Load constant from constant pool: */ 1040 1041 /* Push a 1-byte signed integer value onto the stack. */ 1042 CASE(_bipush): 1043 SET_STACK_INT((jbyte)(pc[1]), 0); 1044 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1045 1046 /* Push a 2-byte signed integer constant onto the stack. */ 1047 CASE(_sipush): 1048 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0); 1049 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 1050 1051 /* load from local variable */ 1052 1053 CASE(_aload): 1054 VERIFY_OOP(LOCALS_OBJECT(pc[1])); 1055 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0); 1056 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1057 1058 CASE(_iload): 1059 CASE(_fload): 1060 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0); 1061 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1062 1063 CASE(_lload): 1064 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1); 1065 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1066 1067 CASE(_dload): 1068 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1); 1069 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1070 1071 #undef OPC_LOAD_n 1072 #define OPC_LOAD_n(num) \ 1073 CASE(_aload_##num): \ 1074 VERIFY_OOP(LOCALS_OBJECT(num)); \ 1075 SET_STACK_OBJECT(LOCALS_OBJECT(num), 0); \ 1076 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1077 \ 1078 CASE(_iload_##num): \ 1079 CASE(_fload_##num): \ 1080 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \ 1081 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1082 \ 1083 CASE(_lload_##num): \ 1084 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \ 1085 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1086 CASE(_dload_##num): \ 1087 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \ 1088 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1089 1090 OPC_LOAD_n(0); 1091 OPC_LOAD_n(1); 1092 OPC_LOAD_n(2); 1093 OPC_LOAD_n(3); 1094 1095 /* store to a local variable */ 1096 1097 CASE(_astore): 1098 astore(topOfStack, -1, locals, pc[1]); 1099 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1100 1101 CASE(_istore): 1102 CASE(_fstore): 1103 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]); 1104 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1105 1106 CASE(_lstore): 1107 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]); 1108 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1109 1110 CASE(_dstore): 1111 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]); 1112 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1113 1114 CASE(_wide): { 1115 uint16_t reg = Bytes::get_Java_u2(pc + 2); 1116 1117 opcode = pc[1]; 1118 switch(opcode) { 1119 case Bytecodes::_aload: 1120 VERIFY_OOP(LOCALS_OBJECT(reg)); 1121 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0); 1122 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1123 1124 case Bytecodes::_iload: 1125 case Bytecodes::_fload: 1126 SET_STACK_SLOT(LOCALS_SLOT(reg), 0); 1127 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1128 1129 case Bytecodes::_lload: 1130 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1131 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1132 1133 case Bytecodes::_dload: 1134 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1135 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1136 1137 case Bytecodes::_astore: 1138 astore(topOfStack, -1, locals, reg); 1139 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1140 1141 case Bytecodes::_istore: 1142 case Bytecodes::_fstore: 1143 SET_LOCALS_SLOT(STACK_SLOT(-1), reg); 1144 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1145 1146 case Bytecodes::_lstore: 1147 SET_LOCALS_LONG(STACK_LONG(-1), reg); 1148 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1149 1150 case Bytecodes::_dstore: 1151 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg); 1152 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1153 1154 case Bytecodes::_iinc: { 1155 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4); 1156 // Be nice to see what this generates.... QQQ 1157 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg); 1158 UPDATE_PC_AND_CONTINUE(6); 1159 } 1160 case Bytecodes::_ret: 1161 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg)); 1162 UPDATE_PC_AND_CONTINUE(0); 1163 default: 1164 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode"); 1165 } 1166 } 1167 1168 1169 #undef OPC_STORE_n 1170 #define OPC_STORE_n(num) \ 1171 CASE(_astore_##num): \ 1172 astore(topOfStack, -1, locals, num); \ 1173 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1174 CASE(_istore_##num): \ 1175 CASE(_fstore_##num): \ 1176 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \ 1177 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1178 1179 OPC_STORE_n(0); 1180 OPC_STORE_n(1); 1181 OPC_STORE_n(2); 1182 OPC_STORE_n(3); 1183 1184 #undef OPC_DSTORE_n 1185 #define OPC_DSTORE_n(num) \ 1186 CASE(_dstore_##num): \ 1187 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \ 1188 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1189 CASE(_lstore_##num): \ 1190 SET_LOCALS_LONG(STACK_LONG(-1), num); \ 1191 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1192 1193 OPC_DSTORE_n(0); 1194 OPC_DSTORE_n(1); 1195 OPC_DSTORE_n(2); 1196 OPC_DSTORE_n(3); 1197 1198 /* stack pop, dup, and insert opcodes */ 1199 1200 1201 CASE(_pop): /* Discard the top item on the stack */ 1202 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1203 1204 1205 CASE(_pop2): /* Discard the top 2 items on the stack */ 1206 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1207 1208 1209 CASE(_dup): /* Duplicate the top item on the stack */ 1210 dup(topOfStack); 1211 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1212 1213 CASE(_dup2): /* Duplicate the top 2 items on the stack */ 1214 dup2(topOfStack); 1215 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1216 1217 CASE(_dup_x1): /* insert top word two down */ 1218 dup_x1(topOfStack); 1219 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1220 1221 CASE(_dup_x2): /* insert top word three down */ 1222 dup_x2(topOfStack); 1223 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1224 1225 CASE(_dup2_x1): /* insert top 2 slots three down */ 1226 dup2_x1(topOfStack); 1227 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1228 1229 CASE(_dup2_x2): /* insert top 2 slots four down */ 1230 dup2_x2(topOfStack); 1231 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1232 1233 CASE(_swap): { /* swap top two elements on the stack */ 1234 swap(topOfStack); 1235 UPDATE_PC_AND_CONTINUE(1); 1236 } 1237 1238 /* Perform various binary integer operations */ 1239 1240 #undef OPC_INT_BINARY 1241 #define OPC_INT_BINARY(opcname, opname, test) \ 1242 CASE(_i##opcname): \ 1243 if (test && (STACK_INT(-1) == 0)) { \ 1244 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1245 "/ by zero"); \ 1246 } \ 1247 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1248 STACK_INT(-1)), \ 1249 -2); \ 1250 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1251 CASE(_l##opcname): \ 1252 { \ 1253 if (test) { \ 1254 jlong l1 = STACK_LONG(-1); \ 1255 if (VMlongEqz(l1)) { \ 1256 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1257 "/ by long zero"); \ 1258 } \ 1259 } \ 1260 /* First long at (-1,-2) next long at (-3,-4) */ \ 1261 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \ 1262 STACK_LONG(-1)), \ 1263 -3); \ 1264 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1265 } 1266 1267 OPC_INT_BINARY(add, Add, 0); 1268 OPC_INT_BINARY(sub, Sub, 0); 1269 OPC_INT_BINARY(mul, Mul, 0); 1270 OPC_INT_BINARY(and, And, 0); 1271 OPC_INT_BINARY(or, Or, 0); 1272 OPC_INT_BINARY(xor, Xor, 0); 1273 OPC_INT_BINARY(div, Div, 1); 1274 OPC_INT_BINARY(rem, Rem, 1); 1275 1276 1277 /* Perform various binary floating number operations */ 1278 /* On some machine/platforms/compilers div zero check can be implicit */ 1279 1280 #undef OPC_FLOAT_BINARY 1281 #define OPC_FLOAT_BINARY(opcname, opname) \ 1282 CASE(_d##opcname): { \ 1283 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \ 1284 STACK_DOUBLE(-1)), \ 1285 -3); \ 1286 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1287 } \ 1288 CASE(_f##opcname): \ 1289 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \ 1290 STACK_FLOAT(-1)), \ 1291 -2); \ 1292 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1293 1294 1295 OPC_FLOAT_BINARY(add, Add); 1296 OPC_FLOAT_BINARY(sub, Sub); 1297 OPC_FLOAT_BINARY(mul, Mul); 1298 OPC_FLOAT_BINARY(div, Div); 1299 OPC_FLOAT_BINARY(rem, Rem); 1300 1301 /* Shift operations 1302 * Shift left int and long: ishl, lshl 1303 * Logical shift right int and long w/zero extension: iushr, lushr 1304 * Arithmetic shift right int and long w/sign extension: ishr, lshr 1305 */ 1306 1307 #undef OPC_SHIFT_BINARY 1308 #define OPC_SHIFT_BINARY(opcname, opname) \ 1309 CASE(_i##opcname): \ 1310 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1311 STACK_INT(-1)), \ 1312 -2); \ 1313 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1314 CASE(_l##opcname): \ 1315 { \ 1316 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \ 1317 STACK_INT(-1)), \ 1318 -2); \ 1319 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1320 } 1321 1322 OPC_SHIFT_BINARY(shl, Shl); 1323 OPC_SHIFT_BINARY(shr, Shr); 1324 OPC_SHIFT_BINARY(ushr, Ushr); 1325 1326 /* Increment local variable by constant */ 1327 CASE(_iinc): 1328 { 1329 // locals[pc[1]].j.i += (jbyte)(pc[2]); 1330 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]); 1331 UPDATE_PC_AND_CONTINUE(3); 1332 } 1333 1334 /* negate the value on the top of the stack */ 1335 1336 CASE(_ineg): 1337 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1); 1338 UPDATE_PC_AND_CONTINUE(1); 1339 1340 CASE(_fneg): 1341 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1); 1342 UPDATE_PC_AND_CONTINUE(1); 1343 1344 CASE(_lneg): 1345 { 1346 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1); 1347 UPDATE_PC_AND_CONTINUE(1); 1348 } 1349 1350 CASE(_dneg): 1351 { 1352 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1); 1353 UPDATE_PC_AND_CONTINUE(1); 1354 } 1355 1356 /* Conversion operations */ 1357 1358 CASE(_i2f): /* convert top of stack int to float */ 1359 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1); 1360 UPDATE_PC_AND_CONTINUE(1); 1361 1362 CASE(_i2l): /* convert top of stack int to long */ 1363 { 1364 // this is ugly QQQ 1365 jlong r = VMint2Long(STACK_INT(-1)); 1366 MORE_STACK(-1); // Pop 1367 SET_STACK_LONG(r, 1); 1368 1369 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1370 } 1371 1372 CASE(_i2d): /* convert top of stack int to double */ 1373 { 1374 // this is ugly QQQ (why cast to jlong?? ) 1375 jdouble r = (jlong)STACK_INT(-1); 1376 MORE_STACK(-1); // Pop 1377 SET_STACK_DOUBLE(r, 1); 1378 1379 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1380 } 1381 1382 CASE(_l2i): /* convert top of stack long to int */ 1383 { 1384 jint r = VMlong2Int(STACK_LONG(-1)); 1385 MORE_STACK(-2); // Pop 1386 SET_STACK_INT(r, 0); 1387 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1388 } 1389 1390 CASE(_l2f): /* convert top of stack long to float */ 1391 { 1392 jlong r = STACK_LONG(-1); 1393 MORE_STACK(-2); // Pop 1394 SET_STACK_FLOAT(VMlong2Float(r), 0); 1395 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1396 } 1397 1398 CASE(_l2d): /* convert top of stack long to double */ 1399 { 1400 jlong r = STACK_LONG(-1); 1401 MORE_STACK(-2); // Pop 1402 SET_STACK_DOUBLE(VMlong2Double(r), 1); 1403 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1404 } 1405 1406 CASE(_f2i): /* Convert top of stack float to int */ 1407 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1); 1408 UPDATE_PC_AND_CONTINUE(1); 1409 1410 CASE(_f2l): /* convert top of stack float to long */ 1411 { 1412 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1)); 1413 MORE_STACK(-1); // POP 1414 SET_STACK_LONG(r, 1); 1415 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1416 } 1417 1418 CASE(_f2d): /* convert top of stack float to double */ 1419 { 1420 jfloat f; 1421 jdouble r; 1422 f = STACK_FLOAT(-1); 1423 r = (jdouble) f; 1424 MORE_STACK(-1); // POP 1425 SET_STACK_DOUBLE(r, 1); 1426 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1427 } 1428 1429 CASE(_d2i): /* convert top of stack double to int */ 1430 { 1431 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1)); 1432 MORE_STACK(-2); 1433 SET_STACK_INT(r1, 0); 1434 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1435 } 1436 1437 CASE(_d2f): /* convert top of stack double to float */ 1438 { 1439 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1)); 1440 MORE_STACK(-2); 1441 SET_STACK_FLOAT(r1, 0); 1442 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1443 } 1444 1445 CASE(_d2l): /* convert top of stack double to long */ 1446 { 1447 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1)); 1448 MORE_STACK(-2); 1449 SET_STACK_LONG(r1, 1); 1450 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1451 } 1452 1453 CASE(_i2b): 1454 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1); 1455 UPDATE_PC_AND_CONTINUE(1); 1456 1457 CASE(_i2c): 1458 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1); 1459 UPDATE_PC_AND_CONTINUE(1); 1460 1461 CASE(_i2s): 1462 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1); 1463 UPDATE_PC_AND_CONTINUE(1); 1464 1465 /* comparison operators */ 1466 1467 1468 #define COMPARISON_OP(name, comparison) \ 1469 CASE(_if_icmp##name): { \ 1470 int skip = (STACK_INT(-2) comparison STACK_INT(-1)) \ 1471 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1472 address branch_pc = pc; \ 1473 UPDATE_PC_AND_TOS(skip, -2); \ 1474 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1475 CONTINUE; \ 1476 } \ 1477 CASE(_if##name): { \ 1478 int skip = (STACK_INT(-1) comparison 0) \ 1479 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1480 address branch_pc = pc; \ 1481 UPDATE_PC_AND_TOS(skip, -1); \ 1482 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1483 CONTINUE; \ 1484 } 1485 1486 #define COMPARISON_OP2(name, comparison) \ 1487 COMPARISON_OP(name, comparison) \ 1488 CASE(_if_acmp##name): { \ 1489 int skip = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)) \ 1490 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1491 address branch_pc = pc; \ 1492 UPDATE_PC_AND_TOS(skip, -2); \ 1493 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1494 CONTINUE; \ 1495 } 1496 1497 #define NULL_COMPARISON_NOT_OP(name) \ 1498 CASE(_if##name): { \ 1499 int skip = (!(STACK_OBJECT(-1) == NULL)) \ 1500 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1501 address branch_pc = pc; \ 1502 UPDATE_PC_AND_TOS(skip, -1); \ 1503 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1504 CONTINUE; \ 1505 } 1506 1507 #define NULL_COMPARISON_OP(name) \ 1508 CASE(_if##name): { \ 1509 int skip = ((STACK_OBJECT(-1) == NULL)) \ 1510 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1511 address branch_pc = pc; \ 1512 UPDATE_PC_AND_TOS(skip, -1); \ 1513 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1514 CONTINUE; \ 1515 } 1516 COMPARISON_OP(lt, <); 1517 COMPARISON_OP(gt, >); 1518 COMPARISON_OP(le, <=); 1519 COMPARISON_OP(ge, >=); 1520 COMPARISON_OP2(eq, ==); /* include ref comparison */ 1521 COMPARISON_OP2(ne, !=); /* include ref comparison */ 1522 NULL_COMPARISON_OP(null); 1523 NULL_COMPARISON_NOT_OP(nonnull); 1524 1525 /* Goto pc at specified offset in switch table. */ 1526 1527 CASE(_tableswitch): { 1528 jint* lpc = (jint*)VMalignWordUp(pc+1); 1529 int32_t key = STACK_INT(-1); 1530 int32_t low = Bytes::get_Java_u4((address)&lpc[1]); 1531 int32_t high = Bytes::get_Java_u4((address)&lpc[2]); 1532 int32_t skip; 1533 key -= low; 1534 skip = ((uint32_t) key > (uint32_t)(high - low)) 1535 ? Bytes::get_Java_u4((address)&lpc[0]) 1536 : Bytes::get_Java_u4((address)&lpc[key + 3]); 1537 // Does this really need a full backedge check (osr?) 1538 address branch_pc = pc; 1539 UPDATE_PC_AND_TOS(skip, -1); 1540 DO_BACKEDGE_CHECKS(skip, branch_pc); 1541 CONTINUE; 1542 } 1543 1544 /* Goto pc whose table entry matches specified key */ 1545 1546 CASE(_lookupswitch): { 1547 jint* lpc = (jint*)VMalignWordUp(pc+1); 1548 int32_t key = STACK_INT(-1); 1549 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */ 1550 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]); 1551 while (--npairs >= 0) { 1552 lpc += 2; 1553 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) { 1554 skip = Bytes::get_Java_u4((address)&lpc[1]); 1555 break; 1556 } 1557 } 1558 address branch_pc = pc; 1559 UPDATE_PC_AND_TOS(skip, -1); 1560 DO_BACKEDGE_CHECKS(skip, branch_pc); 1561 CONTINUE; 1562 } 1563 1564 CASE(_fcmpl): 1565 CASE(_fcmpg): 1566 { 1567 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2), 1568 STACK_FLOAT(-1), 1569 (opcode == Bytecodes::_fcmpl ? -1 : 1)), 1570 -2); 1571 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1572 } 1573 1574 CASE(_dcmpl): 1575 CASE(_dcmpg): 1576 { 1577 int r = VMdoubleCompare(STACK_DOUBLE(-3), 1578 STACK_DOUBLE(-1), 1579 (opcode == Bytecodes::_dcmpl ? -1 : 1)); 1580 MORE_STACK(-4); // Pop 1581 SET_STACK_INT(r, 0); 1582 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1583 } 1584 1585 CASE(_lcmp): 1586 { 1587 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1)); 1588 MORE_STACK(-4); 1589 SET_STACK_INT(r, 0); 1590 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1591 } 1592 1593 1594 /* Return from a method */ 1595 1596 CASE(_areturn): 1597 CASE(_ireturn): 1598 CASE(_freturn): 1599 { 1600 // Allow a safepoint before returning to frame manager. 1601 SAFEPOINT; 1602 1603 goto handle_return; 1604 } 1605 1606 CASE(_lreturn): 1607 CASE(_dreturn): 1608 { 1609 // Allow a safepoint before returning to frame manager. 1610 SAFEPOINT; 1611 goto handle_return; 1612 } 1613 1614 CASE(_return_register_finalizer): { 1615 1616 oop rcvr = LOCALS_OBJECT(0); 1617 VERIFY_OOP(rcvr); 1618 if (rcvr->klass()->has_finalizer()) { 1619 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception); 1620 } 1621 goto handle_return; 1622 } 1623 CASE(_return): { 1624 1625 // Allow a safepoint before returning to frame manager. 1626 SAFEPOINT; 1627 goto handle_return; 1628 } 1629 1630 /* Array access byte-codes */ 1631 1632 /* Every array access byte-code starts out like this */ 1633 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff); 1634 #define ARRAY_INTRO(arrayOff) \ 1635 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \ 1636 jint index = STACK_INT(arrayOff + 1); \ 1637 char message[jintAsStringSize]; \ 1638 CHECK_NULL(arrObj); \ 1639 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \ 1640 sprintf(message, "%d", index); \ 1641 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \ 1642 message); \ 1643 } 1644 1645 /* 32-bit loads. These handle conversion from < 32-bit types */ 1646 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \ 1647 { \ 1648 ARRAY_INTRO(-2); \ 1649 (void)extra; \ 1650 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \ 1651 -2); \ 1652 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1653 } 1654 1655 /* 64-bit loads */ 1656 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \ 1657 { \ 1658 ARRAY_INTRO(-2); \ 1659 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \ 1660 (void)extra; \ 1661 UPDATE_PC_AND_CONTINUE(1); \ 1662 } 1663 1664 CASE(_iaload): 1665 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0); 1666 CASE(_faload): 1667 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1668 CASE(_aaload): { 1669 ARRAY_INTRO(-2); 1670 SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2); 1671 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1672 } 1673 CASE(_baload): 1674 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0); 1675 CASE(_caload): 1676 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0); 1677 CASE(_saload): 1678 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0); 1679 CASE(_laload): 1680 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0); 1681 CASE(_daload): 1682 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1683 1684 /* 32-bit stores. These handle conversion to < 32-bit types */ 1685 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \ 1686 { \ 1687 ARRAY_INTRO(-3); \ 1688 (void)extra; \ 1689 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1690 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \ 1691 } 1692 1693 /* 64-bit stores */ 1694 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \ 1695 { \ 1696 ARRAY_INTRO(-4); \ 1697 (void)extra; \ 1698 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1699 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \ 1700 } 1701 1702 CASE(_iastore): 1703 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0); 1704 CASE(_fastore): 1705 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1706 /* 1707 * This one looks different because of the assignability check 1708 */ 1709 CASE(_aastore): { 1710 oop rhsObject = STACK_OBJECT(-1); 1711 VERIFY_OOP(rhsObject); 1712 ARRAY_INTRO( -3); 1713 // arrObj, index are set 1714 if (rhsObject != NULL) { 1715 /* Check assignability of rhsObject into arrObj */ 1716 Klass* rhsKlassOop = rhsObject->klass(); // EBX (subclass) 1717 Klass* elemKlassOop = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX 1718 // 1719 // Check for compatibilty. This check must not GC!! 1720 // Seems way more expensive now that we must dispatch 1721 // 1722 if (rhsKlassOop != elemKlassOop && !rhsKlassOop->is_subtype_of(elemKlassOop)) { // ebx->is... 1723 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), ""); 1724 } 1725 } 1726 ((objArrayOopDesc *) arrObj)->obj_at_put(index, rhsObject); 1727 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); 1728 } 1729 CASE(_bastore): 1730 ARRAY_STOREFROM32(T_BYTE, jbyte, "%d", STACK_INT, 0); 1731 CASE(_castore): 1732 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0); 1733 CASE(_sastore): 1734 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0); 1735 CASE(_lastore): 1736 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0); 1737 CASE(_dastore): 1738 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1739 1740 CASE(_arraylength): 1741 { 1742 arrayOop ary = (arrayOop) STACK_OBJECT(-1); 1743 CHECK_NULL(ary); 1744 SET_STACK_INT(ary->length(), -1); 1745 UPDATE_PC_AND_CONTINUE(1); 1746 } 1747 1748 /* monitorenter and monitorexit for locking/unlocking an object */ 1749 1750 CASE(_monitorenter): { 1751 oop lockee = STACK_OBJECT(-1); 1752 // derefing's lockee ought to provoke implicit null check 1753 CHECK_NULL(lockee); 1754 // find a free monitor or one already allocated for this object 1755 // if we find a matching object then we need a new monitor 1756 // since this is recursive enter 1757 BasicObjectLock* limit = istate->monitor_base(); 1758 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1759 BasicObjectLock* entry = NULL; 1760 while (most_recent != limit ) { 1761 if (most_recent->obj() == NULL) entry = most_recent; 1762 else if (most_recent->obj() == lockee) break; 1763 most_recent++; 1764 } 1765 if (entry != NULL) { 1766 entry->set_obj(lockee); 1767 int success = false; 1768 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 1769 1770 markOop mark = lockee->mark(); 1771 intptr_t hash = (intptr_t) markOopDesc::no_hash; 1772 // implies UseBiasedLocking 1773 if (mark->has_bias_pattern()) { 1774 uintptr_t thread_ident; 1775 uintptr_t anticipated_bias_locking_value; 1776 thread_ident = (uintptr_t)istate->thread(); 1777 anticipated_bias_locking_value = 1778 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 1779 ~((uintptr_t) markOopDesc::age_mask_in_place); 1780 1781 if (anticipated_bias_locking_value == 0) { 1782 // already biased towards this thread, nothing to do 1783 if (PrintBiasedLockingStatistics) { 1784 (* BiasedLocking::biased_lock_entry_count_addr())++; 1785 } 1786 success = true; 1787 } 1788 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 1789 // try revoke bias 1790 markOop header = lockee->klass()->prototype_header(); 1791 if (hash != markOopDesc::no_hash) { 1792 header = header->copy_set_hash(hash); 1793 } 1794 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { 1795 if (PrintBiasedLockingStatistics) 1796 (*BiasedLocking::revoked_lock_entry_count_addr())++; 1797 } 1798 } 1799 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { 1800 // try rebias 1801 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); 1802 if (hash != markOopDesc::no_hash) { 1803 new_header = new_header->copy_set_hash(hash); 1804 } 1805 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { 1806 if (PrintBiasedLockingStatistics) 1807 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 1808 } 1809 else { 1810 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1811 } 1812 success = true; 1813 } 1814 else { 1815 // try to bias towards thread in case object is anonymously biased 1816 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 1817 (uintptr_t)markOopDesc::age_mask_in_place | 1818 epoch_mask_in_place)); 1819 if (hash != markOopDesc::no_hash) { 1820 header = header->copy_set_hash(hash); 1821 } 1822 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 1823 // debugging hint 1824 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 1825 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { 1826 if (PrintBiasedLockingStatistics) 1827 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 1828 } 1829 else { 1830 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1831 } 1832 success = true; 1833 } 1834 } 1835 1836 // traditional lightweight locking 1837 if (!success) { 1838 markOop displaced = lockee->mark()->set_unlocked(); 1839 entry->lock()->set_displaced_header(displaced); 1840 bool call_vm = UseHeavyMonitors; 1841 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { 1842 // Is it simple recursive case? 1843 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 1844 entry->lock()->set_displaced_header(NULL); 1845 } else { 1846 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1847 } 1848 } 1849 } 1850 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1851 } else { 1852 istate->set_msg(more_monitors); 1853 UPDATE_PC_AND_RETURN(0); // Re-execute 1854 } 1855 } 1856 1857 CASE(_monitorexit): { 1858 oop lockee = STACK_OBJECT(-1); 1859 CHECK_NULL(lockee); 1860 // derefing's lockee ought to provoke implicit null check 1861 // find our monitor slot 1862 BasicObjectLock* limit = istate->monitor_base(); 1863 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1864 while (most_recent != limit ) { 1865 if ((most_recent)->obj() == lockee) { 1866 BasicLock* lock = most_recent->lock(); 1867 markOop header = lock->displaced_header(); 1868 most_recent->set_obj(NULL); 1869 if (!lockee->mark()->has_bias_pattern()) { 1870 bool call_vm = UseHeavyMonitors; 1871 // If it isn't recursive we either must swap old header or call the runtime 1872 if (header != NULL || call_vm) { 1873 if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { 1874 // restore object for the slow case 1875 most_recent->set_obj(lockee); 1876 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception); 1877 } 1878 } 1879 } 1880 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1881 } 1882 most_recent++; 1883 } 1884 // Need to throw illegal monitor state exception 1885 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception); 1886 ShouldNotReachHere(); 1887 } 1888 1889 /* All of the non-quick opcodes. */ 1890 1891 /* -Set clobbersCpIndex true if the quickened opcode clobbers the 1892 * constant pool index in the instruction. 1893 */ 1894 CASE(_getfield): 1895 CASE(_getstatic): 1896 { 1897 u2 index; 1898 ConstantPoolCacheEntry* cache; 1899 index = Bytes::get_native_u2(pc+1); 1900 1901 // QQQ Need to make this as inlined as possible. Probably need to 1902 // split all the bytecode cases out so c++ compiler has a chance 1903 // for constant prop to fold everything possible away. 1904 1905 cache = cp->entry_at(index); 1906 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 1907 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode), 1908 handle_exception); 1909 cache = cp->entry_at(index); 1910 } 1911 1912 #ifdef VM_JVMTI 1913 if (_jvmti_interp_events) { 1914 int *count_addr; 1915 oop obj; 1916 // Check to see if a field modification watch has been set 1917 // before we take the time to call into the VM. 1918 count_addr = (int *)JvmtiExport::get_field_access_count_addr(); 1919 if ( *count_addr > 0 ) { 1920 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { 1921 obj = (oop)NULL; 1922 } else { 1923 obj = (oop) STACK_OBJECT(-1); 1924 VERIFY_OOP(obj); 1925 } 1926 CALL_VM(InterpreterRuntime::post_field_access(THREAD, 1927 obj, 1928 cache), 1929 handle_exception); 1930 } 1931 } 1932 #endif /* VM_JVMTI */ 1933 1934 oop obj; 1935 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { 1936 Klass* k = cache->f1_as_klass(); 1937 obj = k->java_mirror(); 1938 MORE_STACK(1); // Assume single slot push 1939 } else { 1940 obj = (oop) STACK_OBJECT(-1); 1941 CHECK_NULL(obj); 1942 } 1943 1944 // 1945 // Now store the result on the stack 1946 // 1947 TosState tos_type = cache->flag_state(); 1948 int field_offset = cache->f2_as_index(); 1949 if (cache->is_volatile()) { 1950 if (tos_type == atos) { 1951 VERIFY_OOP(obj->obj_field_acquire(field_offset)); 1952 SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1); 1953 } else if (tos_type == itos) { 1954 SET_STACK_INT(obj->int_field_acquire(field_offset), -1); 1955 } else if (tos_type == ltos) { 1956 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0); 1957 MORE_STACK(1); 1958 } else if (tos_type == btos) { 1959 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1); 1960 } else if (tos_type == ctos) { 1961 SET_STACK_INT(obj->char_field_acquire(field_offset), -1); 1962 } else if (tos_type == stos) { 1963 SET_STACK_INT(obj->short_field_acquire(field_offset), -1); 1964 } else if (tos_type == ftos) { 1965 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1); 1966 } else { 1967 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0); 1968 MORE_STACK(1); 1969 } 1970 } else { 1971 if (tos_type == atos) { 1972 VERIFY_OOP(obj->obj_field(field_offset)); 1973 SET_STACK_OBJECT(obj->obj_field(field_offset), -1); 1974 } else if (tos_type == itos) { 1975 SET_STACK_INT(obj->int_field(field_offset), -1); 1976 } else if (tos_type == ltos) { 1977 SET_STACK_LONG(obj->long_field(field_offset), 0); 1978 MORE_STACK(1); 1979 } else if (tos_type == btos) { 1980 SET_STACK_INT(obj->byte_field(field_offset), -1); 1981 } else if (tos_type == ctos) { 1982 SET_STACK_INT(obj->char_field(field_offset), -1); 1983 } else if (tos_type == stos) { 1984 SET_STACK_INT(obj->short_field(field_offset), -1); 1985 } else if (tos_type == ftos) { 1986 SET_STACK_FLOAT(obj->float_field(field_offset), -1); 1987 } else { 1988 SET_STACK_DOUBLE(obj->double_field(field_offset), 0); 1989 MORE_STACK(1); 1990 } 1991 } 1992 1993 UPDATE_PC_AND_CONTINUE(3); 1994 } 1995 1996 CASE(_putfield): 1997 CASE(_putstatic): 1998 { 1999 u2 index = Bytes::get_native_u2(pc+1); 2000 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2001 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2002 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode), 2003 handle_exception); 2004 cache = cp->entry_at(index); 2005 } 2006 2007 #ifdef VM_JVMTI 2008 if (_jvmti_interp_events) { 2009 int *count_addr; 2010 oop obj; 2011 // Check to see if a field modification watch has been set 2012 // before we take the time to call into the VM. 2013 count_addr = (int *)JvmtiExport::get_field_modification_count_addr(); 2014 if ( *count_addr > 0 ) { 2015 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 2016 obj = (oop)NULL; 2017 } 2018 else { 2019 if (cache->is_long() || cache->is_double()) { 2020 obj = (oop) STACK_OBJECT(-3); 2021 } else { 2022 obj = (oop) STACK_OBJECT(-2); 2023 } 2024 VERIFY_OOP(obj); 2025 } 2026 2027 CALL_VM(InterpreterRuntime::post_field_modification(THREAD, 2028 obj, 2029 cache, 2030 (jvalue *)STACK_SLOT(-1)), 2031 handle_exception); 2032 } 2033 } 2034 #endif /* VM_JVMTI */ 2035 2036 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2037 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2038 2039 oop obj; 2040 int count; 2041 TosState tos_type = cache->flag_state(); 2042 2043 count = -1; 2044 if (tos_type == ltos || tos_type == dtos) { 2045 --count; 2046 } 2047 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 2048 Klass* k = cache->f1_as_klass(); 2049 obj = k->java_mirror(); 2050 } else { 2051 --count; 2052 obj = (oop) STACK_OBJECT(count); 2053 CHECK_NULL(obj); 2054 } 2055 2056 // 2057 // Now store the result 2058 // 2059 int field_offset = cache->f2_as_index(); 2060 if (cache->is_volatile()) { 2061 if (tos_type == itos) { 2062 obj->release_int_field_put(field_offset, STACK_INT(-1)); 2063 } else if (tos_type == atos) { 2064 VERIFY_OOP(STACK_OBJECT(-1)); 2065 obj->release_obj_field_put(field_offset, STACK_OBJECT(-1)); 2066 } else if (tos_type == btos) { 2067 obj->release_byte_field_put(field_offset, STACK_INT(-1)); 2068 } else if (tos_type == ltos) { 2069 obj->release_long_field_put(field_offset, STACK_LONG(-1)); 2070 } else if (tos_type == ctos) { 2071 obj->release_char_field_put(field_offset, STACK_INT(-1)); 2072 } else if (tos_type == stos) { 2073 obj->release_short_field_put(field_offset, STACK_INT(-1)); 2074 } else if (tos_type == ftos) { 2075 obj->release_float_field_put(field_offset, STACK_FLOAT(-1)); 2076 } else { 2077 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1)); 2078 } 2079 OrderAccess::storeload(); 2080 } else { 2081 if (tos_type == itos) { 2082 obj->int_field_put(field_offset, STACK_INT(-1)); 2083 } else if (tos_type == atos) { 2084 VERIFY_OOP(STACK_OBJECT(-1)); 2085 obj->obj_field_put(field_offset, STACK_OBJECT(-1)); 2086 } else if (tos_type == btos) { 2087 obj->byte_field_put(field_offset, STACK_INT(-1)); 2088 } else if (tos_type == ltos) { 2089 obj->long_field_put(field_offset, STACK_LONG(-1)); 2090 } else if (tos_type == ctos) { 2091 obj->char_field_put(field_offset, STACK_INT(-1)); 2092 } else if (tos_type == stos) { 2093 obj->short_field_put(field_offset, STACK_INT(-1)); 2094 } else if (tos_type == ftos) { 2095 obj->float_field_put(field_offset, STACK_FLOAT(-1)); 2096 } else { 2097 obj->double_field_put(field_offset, STACK_DOUBLE(-1)); 2098 } 2099 } 2100 2101 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count); 2102 } 2103 2104 CASE(_new): { 2105 u2 index = Bytes::get_Java_u2(pc+1); 2106 ConstantPool* constants = istate->method()->constants(); 2107 if (!constants->tag_at(index).is_unresolved_klass()) { 2108 // Make sure klass is initialized and doesn't have a finalizer 2109 Klass* entry = constants->slot_at(index).get_klass(); 2110 assert(entry->is_klass(), "Should be resolved klass"); 2111 Klass* k_entry = (Klass*) entry; 2112 assert(k_entry->oop_is_instance(), "Should be InstanceKlass"); 2113 InstanceKlass* ik = (InstanceKlass*) k_entry; 2114 if ( ik->is_initialized() && ik->can_be_fastpath_allocated() ) { 2115 size_t obj_size = ik->size_helper(); 2116 oop result = NULL; 2117 // If the TLAB isn't pre-zeroed then we'll have to do it 2118 bool need_zero = !ZeroTLAB; 2119 if (UseTLAB) { 2120 result = (oop) THREAD->tlab().allocate(obj_size); 2121 } 2122 if (result == NULL) { 2123 need_zero = true; 2124 // Try allocate in shared eden 2125 retry: 2126 HeapWord* compare_to = *Universe::heap()->top_addr(); 2127 HeapWord* new_top = compare_to + obj_size; 2128 if (new_top <= *Universe::heap()->end_addr()) { 2129 if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) { 2130 goto retry; 2131 } 2132 result = (oop) compare_to; 2133 } 2134 } 2135 if (result != NULL) { 2136 // Initialize object (if nonzero size and need) and then the header 2137 if (need_zero ) { 2138 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize; 2139 obj_size -= sizeof(oopDesc) / oopSize; 2140 if (obj_size > 0 ) { 2141 memset(to_zero, 0, obj_size * HeapWordSize); 2142 } 2143 } 2144 if (UseBiasedLocking) { 2145 result->set_mark(ik->prototype_header()); 2146 } else { 2147 result->set_mark(markOopDesc::prototype()); 2148 } 2149 result->set_klass_gap(0); 2150 result->set_klass(k_entry); 2151 SET_STACK_OBJECT(result, 0); 2152 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2153 } 2154 } 2155 } 2156 // Slow case allocation 2157 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index), 2158 handle_exception); 2159 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2160 THREAD->set_vm_result(NULL); 2161 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2162 } 2163 CASE(_anewarray): { 2164 u2 index = Bytes::get_Java_u2(pc+1); 2165 jint size = STACK_INT(-1); 2166 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size), 2167 handle_exception); 2168 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2169 THREAD->set_vm_result(NULL); 2170 UPDATE_PC_AND_CONTINUE(3); 2171 } 2172 CASE(_multianewarray): { 2173 jint dims = *(pc+3); 2174 jint size = STACK_INT(-1); 2175 // stack grows down, dimensions are up! 2176 jint *dimarray = 2177 (jint*)&topOfStack[dims * Interpreter::stackElementWords+ 2178 Interpreter::stackElementWords-1]; 2179 //adjust pointer to start of stack element 2180 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray), 2181 handle_exception); 2182 SET_STACK_OBJECT(THREAD->vm_result(), -dims); 2183 THREAD->set_vm_result(NULL); 2184 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1)); 2185 } 2186 CASE(_checkcast): 2187 if (STACK_OBJECT(-1) != NULL) { 2188 VERIFY_OOP(STACK_OBJECT(-1)); 2189 u2 index = Bytes::get_Java_u2(pc+1); 2190 if (ProfileInterpreter) { 2191 // needs Profile_checkcast QQQ 2192 ShouldNotReachHere(); 2193 } 2194 // Constant pool may have actual klass or unresolved klass. If it is 2195 // unresolved we must resolve it 2196 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2197 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2198 } 2199 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); 2200 Klass* objKlassOop = STACK_OBJECT(-1)->klass(); //ebx 2201 // 2202 // Check for compatibilty. This check must not GC!! 2203 // Seems way more expensive now that we must dispatch 2204 // 2205 if (objKlassOop != klassOf && 2206 !objKlassOop->is_subtype_of(klassOf)) { 2207 ResourceMark rm(THREAD); 2208 const char* objName = objKlassOop->external_name(); 2209 const char* klassName = klassOf->external_name(); 2210 char* message = SharedRuntime::generate_class_cast_message( 2211 objName, klassName); 2212 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message); 2213 } 2214 } else { 2215 if (UncommonNullCast) { 2216 // istate->method()->set_null_cast_seen(); 2217 // [RGV] Not sure what to do here! 2218 2219 } 2220 } 2221 UPDATE_PC_AND_CONTINUE(3); 2222 2223 CASE(_instanceof): 2224 if (STACK_OBJECT(-1) == NULL) { 2225 SET_STACK_INT(0, -1); 2226 } else { 2227 VERIFY_OOP(STACK_OBJECT(-1)); 2228 u2 index = Bytes::get_Java_u2(pc+1); 2229 // Constant pool may have actual klass or unresolved klass. If it is 2230 // unresolved we must resolve it 2231 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2232 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2233 } 2234 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); 2235 Klass* objKlassOop = STACK_OBJECT(-1)->klass(); 2236 // 2237 // Check for compatibilty. This check must not GC!! 2238 // Seems way more expensive now that we must dispatch 2239 // 2240 if ( objKlassOop == klassOf || objKlassOop->is_subtype_of(klassOf)) { 2241 SET_STACK_INT(1, -1); 2242 } else { 2243 SET_STACK_INT(0, -1); 2244 } 2245 } 2246 UPDATE_PC_AND_CONTINUE(3); 2247 2248 CASE(_ldc_w): 2249 CASE(_ldc): 2250 { 2251 u2 index; 2252 bool wide = false; 2253 int incr = 2; // frequent case 2254 if (opcode == Bytecodes::_ldc) { 2255 index = pc[1]; 2256 } else { 2257 index = Bytes::get_Java_u2(pc+1); 2258 incr = 3; 2259 wide = true; 2260 } 2261 2262 ConstantPool* constants = METHOD->constants(); 2263 switch (constants->tag_at(index).value()) { 2264 case JVM_CONSTANT_Integer: 2265 SET_STACK_INT(constants->int_at(index), 0); 2266 break; 2267 2268 case JVM_CONSTANT_Float: 2269 SET_STACK_FLOAT(constants->float_at(index), 0); 2270 break; 2271 2272 case JVM_CONSTANT_String: 2273 { 2274 oop result = constants->resolved_references()->obj_at(index); 2275 if (result == NULL) { 2276 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception); 2277 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2278 THREAD->set_vm_result(NULL); 2279 } else { 2280 VERIFY_OOP(result); 2281 SET_STACK_OBJECT(result, 0); 2282 } 2283 break; 2284 } 2285 2286 case JVM_CONSTANT_Class: 2287 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror()); 2288 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0); 2289 break; 2290 2291 case JVM_CONSTANT_UnresolvedClass: 2292 case JVM_CONSTANT_UnresolvedClassInError: 2293 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception); 2294 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2295 THREAD->set_vm_result(NULL); 2296 break; 2297 2298 default: ShouldNotReachHere(); 2299 } 2300 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2301 } 2302 2303 CASE(_ldc2_w): 2304 { 2305 u2 index = Bytes::get_Java_u2(pc+1); 2306 2307 ConstantPool* constants = METHOD->constants(); 2308 switch (constants->tag_at(index).value()) { 2309 2310 case JVM_CONSTANT_Long: 2311 SET_STACK_LONG(constants->long_at(index), 1); 2312 break; 2313 2314 case JVM_CONSTANT_Double: 2315 SET_STACK_DOUBLE(constants->double_at(index), 1); 2316 break; 2317 default: ShouldNotReachHere(); 2318 } 2319 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2); 2320 } 2321 2322 CASE(_fast_aldc_w): 2323 CASE(_fast_aldc): { 2324 u2 index; 2325 int incr; 2326 if (opcode == Bytecodes::_fast_aldc) { 2327 index = pc[1]; 2328 incr = 2; 2329 } else { 2330 index = Bytes::get_native_u2(pc+1); 2331 incr = 3; 2332 } 2333 2334 // We are resolved if the f1 field contains a non-null object (CallSite, etc.) 2335 // This kind of CP cache entry does not need to match the flags byte, because 2336 // there is a 1-1 relation between bytecode type and CP entry type. 2337 ConstantPool* constants = METHOD->constants(); 2338 oop result = constants->resolved_references()->obj_at(index); 2339 if (result == NULL) { 2340 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), 2341 handle_exception); 2342 result = THREAD->vm_result(); 2343 } 2344 2345 VERIFY_OOP(result); 2346 SET_STACK_OBJECT(result, 0); 2347 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2348 } 2349 2350 CASE(_invokedynamic): { 2351 2352 if (!EnableInvokeDynamic) { 2353 // We should not encounter this bytecode if !EnableInvokeDynamic. 2354 // The verifier will stop it. However, if we get past the verifier, 2355 // this will stop the thread in a reasonable way, without crashing the JVM. 2356 CALL_VM(InterpreterRuntime::throw_IncompatibleClassChangeError(THREAD), 2357 handle_exception); 2358 ShouldNotReachHere(); 2359 } 2360 2361 u4 index = Bytes::get_native_u4(pc+1); 2362 ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2363 2364 // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.) 2365 // This kind of CP cache entry does not need to match the flags byte, because 2366 // there is a 1-1 relation between bytecode type and CP entry type. 2367 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2368 CALL_VM(InterpreterRuntime::resolve_invokedynamic(THREAD), 2369 handle_exception); 2370 cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2371 } 2372 2373 Method* method = cache->f1_as_method(); 2374 if (VerifyOops) method->verify(); 2375 2376 if (cache->has_appendix()) { 2377 ConstantPool* constants = METHOD->constants(); 2378 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2379 MORE_STACK(1); 2380 } 2381 2382 istate->set_msg(call_method); 2383 istate->set_callee(method); 2384 istate->set_callee_entry_point(method->from_interpreted_entry()); 2385 istate->set_bcp_advance(5); 2386 2387 UPDATE_PC_AND_RETURN(0); // I'll be back... 2388 } 2389 2390 CASE(_invokehandle): { 2391 2392 if (!EnableInvokeDynamic) { 2393 ShouldNotReachHere(); 2394 } 2395 2396 u2 index = Bytes::get_native_u2(pc+1); 2397 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2398 2399 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2400 CALL_VM(InterpreterRuntime::resolve_invokehandle(THREAD), 2401 handle_exception); 2402 cache = cp->entry_at(index); 2403 } 2404 2405 Method* method = cache->f1_as_method(); 2406 if (VerifyOops) method->verify(); 2407 2408 if (cache->has_appendix()) { 2409 ConstantPool* constants = METHOD->constants(); 2410 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2411 MORE_STACK(1); 2412 } 2413 2414 istate->set_msg(call_method); 2415 istate->set_callee(method); 2416 istate->set_callee_entry_point(method->from_interpreted_entry()); 2417 istate->set_bcp_advance(3); 2418 2419 UPDATE_PC_AND_RETURN(0); // I'll be back... 2420 } 2421 2422 CASE(_invokeinterface): { 2423 u2 index = Bytes::get_native_u2(pc+1); 2424 2425 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2426 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2427 2428 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2429 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2430 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode), 2431 handle_exception); 2432 cache = cp->entry_at(index); 2433 } 2434 2435 istate->set_msg(call_method); 2436 2437 // Special case of invokeinterface called for virtual method of 2438 // java.lang.Object. See cpCacheOop.cpp for details. 2439 // This code isn't produced by javac, but could be produced by 2440 // another compliant java compiler. 2441 if (cache->is_forced_virtual()) { 2442 Method* callee; 2443 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2444 if (cache->is_vfinal()) { 2445 callee = cache->f2_as_vfinal_method(); 2446 } else { 2447 // get receiver 2448 int parms = cache->parameter_size(); 2449 // Same comments as invokevirtual apply here 2450 VERIFY_OOP(STACK_OBJECT(-parms)); 2451 InstanceKlass* rcvrKlass = (InstanceKlass*) 2452 STACK_OBJECT(-parms)->klass(); 2453 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; 2454 } 2455 istate->set_callee(callee); 2456 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2457 #ifdef VM_JVMTI 2458 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2459 istate->set_callee_entry_point(callee->interpreter_entry()); 2460 } 2461 #endif /* VM_JVMTI */ 2462 istate->set_bcp_advance(5); 2463 UPDATE_PC_AND_RETURN(0); // I'll be back... 2464 } 2465 2466 // this could definitely be cleaned up QQQ 2467 Method* callee; 2468 Klass* iclass = cache->f1_as_klass(); 2469 // InstanceKlass* interface = (InstanceKlass*) iclass; 2470 // get receiver 2471 int parms = cache->parameter_size(); 2472 oop rcvr = STACK_OBJECT(-parms); 2473 CHECK_NULL(rcvr); 2474 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass(); 2475 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable(); 2476 int i; 2477 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) { 2478 if (ki->interface_klass() == iclass) break; 2479 } 2480 // If the interface isn't found, this class doesn't implement this 2481 // interface. The link resolver checks this but only for the first 2482 // time this interface is called. 2483 if (i == int2->itable_length()) { 2484 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), ""); 2485 } 2486 int mindex = cache->f2_as_index(); 2487 itableMethodEntry* im = ki->first_method_entry(rcvr->klass()); 2488 callee = im[mindex].method(); 2489 if (callee == NULL) { 2490 VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), ""); 2491 } 2492 2493 istate->set_callee(callee); 2494 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2495 #ifdef VM_JVMTI 2496 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2497 istate->set_callee_entry_point(callee->interpreter_entry()); 2498 } 2499 #endif /* VM_JVMTI */ 2500 istate->set_bcp_advance(5); 2501 UPDATE_PC_AND_RETURN(0); // I'll be back... 2502 } 2503 2504 CASE(_invokevirtual): 2505 CASE(_invokespecial): 2506 CASE(_invokestatic): { 2507 u2 index = Bytes::get_native_u2(pc+1); 2508 2509 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2510 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2511 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2512 2513 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2514 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode), 2515 handle_exception); 2516 cache = cp->entry_at(index); 2517 } 2518 2519 istate->set_msg(call_method); 2520 { 2521 Method* callee; 2522 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) { 2523 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2524 if (cache->is_vfinal()) callee = cache->f2_as_vfinal_method(); 2525 else { 2526 // get receiver 2527 int parms = cache->parameter_size(); 2528 // this works but needs a resourcemark and seems to create a vtable on every call: 2529 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index()); 2530 // 2531 // this fails with an assert 2532 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass()); 2533 // but this works 2534 VERIFY_OOP(STACK_OBJECT(-parms)); 2535 InstanceKlass* rcvrKlass = (InstanceKlass*) STACK_OBJECT(-parms)->klass(); 2536 /* 2537 Executing this code in java.lang.String: 2538 public String(char value[]) { 2539 this.count = value.length; 2540 this.value = (char[])value.clone(); 2541 } 2542 2543 a find on rcvr->klass() reports: 2544 {type array char}{type array class} 2545 - klass: {other class} 2546 2547 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure 2548 because rcvr->klass()->oop_is_instance() == 0 2549 However it seems to have a vtable in the right location. Huh? 2550 2551 */ 2552 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; 2553 } 2554 } else { 2555 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) { 2556 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2557 } 2558 callee = cache->f1_as_method(); 2559 } 2560 2561 istate->set_callee(callee); 2562 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2563 #ifdef VM_JVMTI 2564 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2565 istate->set_callee_entry_point(callee->interpreter_entry()); 2566 } 2567 #endif /* VM_JVMTI */ 2568 istate->set_bcp_advance(3); 2569 UPDATE_PC_AND_RETURN(0); // I'll be back... 2570 } 2571 } 2572 2573 /* Allocate memory for a new java object. */ 2574 2575 CASE(_newarray): { 2576 BasicType atype = (BasicType) *(pc+1); 2577 jint size = STACK_INT(-1); 2578 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size), 2579 handle_exception); 2580 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2581 THREAD->set_vm_result(NULL); 2582 2583 UPDATE_PC_AND_CONTINUE(2); 2584 } 2585 2586 /* Throw an exception. */ 2587 2588 CASE(_athrow): { 2589 oop except_oop = STACK_OBJECT(-1); 2590 CHECK_NULL(except_oop); 2591 // set pending_exception so we use common code 2592 THREAD->set_pending_exception(except_oop, NULL, 0); 2593 goto handle_exception; 2594 } 2595 2596 /* goto and jsr. They are exactly the same except jsr pushes 2597 * the address of the next instruction first. 2598 */ 2599 2600 CASE(_jsr): { 2601 /* push bytecode index on stack */ 2602 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0); 2603 MORE_STACK(1); 2604 /* FALL THROUGH */ 2605 } 2606 2607 CASE(_goto): 2608 { 2609 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1); 2610 address branch_pc = pc; 2611 UPDATE_PC(offset); 2612 DO_BACKEDGE_CHECKS(offset, branch_pc); 2613 CONTINUE; 2614 } 2615 2616 CASE(_jsr_w): { 2617 /* push return address on the stack */ 2618 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0); 2619 MORE_STACK(1); 2620 /* FALL THROUGH */ 2621 } 2622 2623 CASE(_goto_w): 2624 { 2625 int32_t offset = Bytes::get_Java_u4(pc + 1); 2626 address branch_pc = pc; 2627 UPDATE_PC(offset); 2628 DO_BACKEDGE_CHECKS(offset, branch_pc); 2629 CONTINUE; 2630 } 2631 2632 /* return from a jsr or jsr_w */ 2633 2634 CASE(_ret): { 2635 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1])); 2636 UPDATE_PC_AND_CONTINUE(0); 2637 } 2638 2639 /* debugger breakpoint */ 2640 2641 CASE(_breakpoint): { 2642 Bytecodes::Code original_bytecode; 2643 DECACHE_STATE(); 2644 SET_LAST_JAVA_FRAME(); 2645 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD, 2646 METHOD, pc); 2647 RESET_LAST_JAVA_FRAME(); 2648 CACHE_STATE(); 2649 if (THREAD->has_pending_exception()) goto handle_exception; 2650 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc), 2651 handle_exception); 2652 2653 opcode = (jubyte)original_bytecode; 2654 goto opcode_switch; 2655 } 2656 2657 DEFAULT: 2658 fatal(err_msg("Unimplemented opcode %d = %s", opcode, 2659 Bytecodes::name((Bytecodes::Code)opcode))); 2660 goto finish; 2661 2662 } /* switch(opc) */ 2663 2664 2665 #ifdef USELABELS 2666 check_for_exception: 2667 #endif 2668 { 2669 if (!THREAD->has_pending_exception()) { 2670 CONTINUE; 2671 } 2672 /* We will be gcsafe soon, so flush our state. */ 2673 DECACHE_PC(); 2674 goto handle_exception; 2675 } 2676 do_continue: ; 2677 2678 } /* while (1) interpreter loop */ 2679 2680 2681 // An exception exists in the thread state see whether this activation can handle it 2682 handle_exception: { 2683 2684 HandleMarkCleaner __hmc(THREAD); 2685 Handle except_oop(THREAD, THREAD->pending_exception()); 2686 // Prevent any subsequent HandleMarkCleaner in the VM 2687 // from freeing the except_oop handle. 2688 HandleMark __hm(THREAD); 2689 2690 THREAD->clear_pending_exception(); 2691 assert(except_oop(), "No exception to process"); 2692 intptr_t continuation_bci; 2693 // expression stack is emptied 2694 topOfStack = istate->stack_base() - Interpreter::stackElementWords; 2695 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()), 2696 handle_exception); 2697 2698 except_oop = THREAD->vm_result(); 2699 THREAD->set_vm_result(NULL); 2700 if (continuation_bci >= 0) { 2701 // Place exception on top of stack 2702 SET_STACK_OBJECT(except_oop(), 0); 2703 MORE_STACK(1); 2704 pc = METHOD->code_base() + continuation_bci; 2705 if (TraceExceptions) { 2706 ttyLocker ttyl; 2707 ResourceMark rm; 2708 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop()); 2709 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); 2710 tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT, 2711 istate->bcp() - (intptr_t)METHOD->code_base(), 2712 continuation_bci, THREAD); 2713 } 2714 // for AbortVMOnException flag 2715 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); 2716 goto run; 2717 } 2718 if (TraceExceptions) { 2719 ttyLocker ttyl; 2720 ResourceMark rm; 2721 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop()); 2722 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); 2723 tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT, 2724 istate->bcp() - (intptr_t)METHOD->code_base(), 2725 THREAD); 2726 } 2727 // for AbortVMOnException flag 2728 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); 2729 // No handler in this activation, unwind and try again 2730 THREAD->set_pending_exception(except_oop(), NULL, 0); 2731 goto handle_return; 2732 } // handle_exception: 2733 2734 // Return from an interpreter invocation with the result of the interpretation 2735 // on the top of the Java Stack (or a pending exception) 2736 2737 handle_Pop_Frame: { 2738 2739 // We don't really do anything special here except we must be aware 2740 // that we can get here without ever locking the method (if sync). 2741 // Also we skip the notification of the exit. 2742 2743 istate->set_msg(popping_frame); 2744 // Clear pending so while the pop is in process 2745 // we don't start another one if a call_vm is done. 2746 THREAD->clr_pop_frame_pending(); 2747 // Let interpreter (only) see the we're in the process of popping a frame 2748 THREAD->set_pop_frame_in_process(); 2749 2750 goto handle_return; 2751 2752 } // handle_Pop_Frame 2753 2754 // ForceEarlyReturn ends a method, and returns to the caller with a return value 2755 // given by the invoker of the early return. 2756 handle_Early_Return: { 2757 2758 istate->set_msg(early_return); 2759 2760 // Clear expression stack. 2761 topOfStack = istate->stack_base() - Interpreter::stackElementWords; 2762 2763 JvmtiThreadState *ts = THREAD->jvmti_thread_state(); 2764 2765 // Push the value to be returned. 2766 switch (istate->method()->result_type()) { 2767 case T_BOOLEAN: 2768 case T_SHORT: 2769 case T_BYTE: 2770 case T_CHAR: 2771 case T_INT: 2772 SET_STACK_INT(ts->earlyret_value().i, 0); 2773 MORE_STACK(1); 2774 break; 2775 case T_LONG: 2776 SET_STACK_LONG(ts->earlyret_value().j, 1); 2777 MORE_STACK(2); 2778 break; 2779 case T_FLOAT: 2780 SET_STACK_FLOAT(ts->earlyret_value().f, 0); 2781 MORE_STACK(1); 2782 break; 2783 case T_DOUBLE: 2784 SET_STACK_DOUBLE(ts->earlyret_value().d, 1); 2785 MORE_STACK(2); 2786 break; 2787 case T_ARRAY: 2788 case T_OBJECT: 2789 SET_STACK_OBJECT(ts->earlyret_oop(), 0); 2790 MORE_STACK(1); 2791 break; 2792 } 2793 2794 ts->clr_earlyret_value(); 2795 ts->set_earlyret_oop(NULL); 2796 ts->clr_earlyret_pending(); 2797 2798 // Fall through to handle_return. 2799 2800 } // handle_Early_Return 2801 2802 handle_return: { 2803 DECACHE_STATE(); 2804 2805 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return; 2806 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame; 2807 Handle original_exception(THREAD, THREAD->pending_exception()); 2808 Handle illegal_state_oop(THREAD, NULL); 2809 2810 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner 2811 // in any following VM entries from freeing our live handles, but illegal_state_oop 2812 // isn't really allocated yet and so doesn't become live until later and 2813 // in unpredicatable places. Instead we must protect the places where we enter the 2814 // VM. It would be much simpler (and safer) if we could allocate a real handle with 2815 // a NULL oop in it and then overwrite the oop later as needed. This isn't 2816 // unfortunately isn't possible. 2817 2818 THREAD->clear_pending_exception(); 2819 2820 // 2821 // As far as we are concerned we have returned. If we have a pending exception 2822 // that will be returned as this invocation's result. However if we get any 2823 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions 2824 // will be our final result (i.e. monitor exception trumps a pending exception). 2825 // 2826 2827 // If we never locked the method (or really passed the point where we would have), 2828 // there is no need to unlock it (or look for other monitors), since that 2829 // could not have happened. 2830 2831 if (THREAD->do_not_unlock()) { 2832 2833 // Never locked, reset the flag now because obviously any caller must 2834 // have passed their point of locking for us to have gotten here. 2835 2836 THREAD->clr_do_not_unlock(); 2837 } else { 2838 // At this point we consider that we have returned. We now check that the 2839 // locks were properly block structured. If we find that they were not 2840 // used properly we will return with an illegal monitor exception. 2841 // The exception is checked by the caller not the callee since this 2842 // checking is considered to be part of the invocation and therefore 2843 // in the callers scope (JVM spec 8.13). 2844 // 2845 // Another weird thing to watch for is if the method was locked 2846 // recursively and then not exited properly. This means we must 2847 // examine all the entries in reverse time(and stack) order and 2848 // unlock as we find them. If we find the method monitor before 2849 // we are at the initial entry then we should throw an exception. 2850 // It is not clear the template based interpreter does this 2851 // correctly 2852 2853 BasicObjectLock* base = istate->monitor_base(); 2854 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base(); 2855 bool method_unlock_needed = METHOD->is_synchronized(); 2856 // We know the initial monitor was used for the method don't check that 2857 // slot in the loop 2858 if (method_unlock_needed) base--; 2859 2860 // Check all the monitors to see they are unlocked. Install exception if found to be locked. 2861 while (end < base) { 2862 oop lockee = end->obj(); 2863 if (lockee != NULL) { 2864 BasicLock* lock = end->lock(); 2865 markOop header = lock->displaced_header(); 2866 end->set_obj(NULL); 2867 2868 if (!lockee->mark()->has_bias_pattern()) { 2869 // If it isn't recursive we either must swap old header or call the runtime 2870 if (header != NULL) { 2871 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { 2872 // restore object for the slow case 2873 end->set_obj(lockee); 2874 { 2875 // Prevent any HandleMarkCleaner from freeing our live handles 2876 HandleMark __hm(THREAD); 2877 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end)); 2878 } 2879 } 2880 } 2881 } 2882 // One error is plenty 2883 if (illegal_state_oop() == NULL && !suppress_error) { 2884 { 2885 // Prevent any HandleMarkCleaner from freeing our live handles 2886 HandleMark __hm(THREAD); 2887 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 2888 } 2889 assert(THREAD->has_pending_exception(), "Lost our exception!"); 2890 illegal_state_oop = THREAD->pending_exception(); 2891 THREAD->clear_pending_exception(); 2892 } 2893 } 2894 end++; 2895 } 2896 // Unlock the method if needed 2897 if (method_unlock_needed) { 2898 if (base->obj() == NULL) { 2899 // The method is already unlocked this is not good. 2900 if (illegal_state_oop() == NULL && !suppress_error) { 2901 { 2902 // Prevent any HandleMarkCleaner from freeing our live handles 2903 HandleMark __hm(THREAD); 2904 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 2905 } 2906 assert(THREAD->has_pending_exception(), "Lost our exception!"); 2907 illegal_state_oop = THREAD->pending_exception(); 2908 THREAD->clear_pending_exception(); 2909 } 2910 } else { 2911 // 2912 // The initial monitor is always used for the method 2913 // However if that slot is no longer the oop for the method it was unlocked 2914 // and reused by something that wasn't unlocked! 2915 // 2916 // deopt can come in with rcvr dead because c2 knows 2917 // its value is preserved in the monitor. So we can't use locals[0] at all 2918 // and must use first monitor slot. 2919 // 2920 oop rcvr = base->obj(); 2921 if (rcvr == NULL) { 2922 if (!suppress_error) { 2923 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), ""); 2924 illegal_state_oop = THREAD->pending_exception(); 2925 THREAD->clear_pending_exception(); 2926 } 2927 } else if (UseHeavyMonitors) { 2928 { 2929 // Prevent any HandleMarkCleaner from freeing our live handles. 2930 HandleMark __hm(THREAD); 2931 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 2932 } 2933 if (THREAD->has_pending_exception()) { 2934 if (!suppress_error) illegal_state_oop = THREAD->pending_exception(); 2935 THREAD->clear_pending_exception(); 2936 } 2937 } else { 2938 BasicLock* lock = base->lock(); 2939 markOop header = lock->displaced_header(); 2940 base->set_obj(NULL); 2941 2942 if (!rcvr->mark()->has_bias_pattern()) { 2943 base->set_obj(NULL); 2944 // If it isn't recursive we either must swap old header or call the runtime 2945 if (header != NULL) { 2946 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) { 2947 // restore object for the slow case 2948 base->set_obj(rcvr); 2949 { 2950 // Prevent any HandleMarkCleaner from freeing our live handles 2951 HandleMark __hm(THREAD); 2952 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 2953 } 2954 if (THREAD->has_pending_exception()) { 2955 if (!suppress_error) illegal_state_oop = THREAD->pending_exception(); 2956 THREAD->clear_pending_exception(); 2957 } 2958 } 2959 } 2960 } 2961 } 2962 } 2963 } 2964 } 2965 // Clear the do_not_unlock flag now. 2966 THREAD->clr_do_not_unlock(); 2967 2968 // 2969 // Notify jvmti/jvmdi 2970 // 2971 // NOTE: we do not notify a method_exit if we have a pending exception, 2972 // including an exception we generate for unlocking checks. In the former 2973 // case, JVMDI has already been notified by our call for the exception handler 2974 // and in both cases as far as JVMDI is concerned we have already returned. 2975 // If we notify it again JVMDI will be all confused about how many frames 2976 // are still on the stack (4340444). 2977 // 2978 // NOTE Further! It turns out the the JVMTI spec in fact expects to see 2979 // method_exit events whenever we leave an activation unless it was done 2980 // for popframe. This is nothing like jvmdi. However we are passing the 2981 // tests at the moment (apparently because they are jvmdi based) so rather 2982 // than change this code and possibly fail tests we will leave it alone 2983 // (with this note) in anticipation of changing the vm and the tests 2984 // simultaneously. 2985 2986 2987 // 2988 suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL; 2989 2990 2991 2992 #ifdef VM_JVMTI 2993 if (_jvmti_interp_events) { 2994 // Whenever JVMTI puts a thread in interp_only_mode, method 2995 // entry/exit events are sent for that thread to track stack depth. 2996 if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) { 2997 { 2998 // Prevent any HandleMarkCleaner from freeing our live handles 2999 HandleMark __hm(THREAD); 3000 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD)); 3001 } 3002 } 3003 } 3004 #endif /* VM_JVMTI */ 3005 3006 // 3007 // See if we are returning any exception 3008 // A pending exception that was pending prior to a possible popping frame 3009 // overrides the popping frame. 3010 // 3011 assert(!suppress_error || suppress_error && illegal_state_oop() == NULL, "Error was not suppressed"); 3012 if (illegal_state_oop() != NULL || original_exception() != NULL) { 3013 // inform the frame manager we have no result 3014 istate->set_msg(throwing_exception); 3015 if (illegal_state_oop() != NULL) 3016 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0); 3017 else 3018 THREAD->set_pending_exception(original_exception(), NULL, 0); 3019 UPDATE_PC_AND_RETURN(0); 3020 } 3021 3022 if (istate->msg() == popping_frame) { 3023 // Make it simpler on the assembly code and set the message for the frame pop. 3024 // returns 3025 if (istate->prev() == NULL) { 3026 // We must be returning to a deoptimized frame (because popframe only happens between 3027 // two interpreted frames). We need to save the current arguments in C heap so that 3028 // the deoptimized frame when it restarts can copy the arguments to its expression 3029 // stack and re-execute the call. We also have to notify deoptimization that this 3030 // has occurred and to pick the preserved args copy them to the deoptimized frame's 3031 // java expression stack. Yuck. 3032 // 3033 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize), 3034 LOCALS_SLOT(METHOD->size_of_parameters() - 1)); 3035 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit); 3036 } 3037 } else { 3038 istate->set_msg(return_from_method); 3039 } 3040 3041 // Normal return 3042 // Advance the pc and return to frame manager 3043 UPDATE_PC_AND_RETURN(1); 3044 } /* handle_return: */ 3045 3046 // This is really a fatal error return 3047 3048 finish: 3049 DECACHE_TOS(); 3050 DECACHE_PC(); 3051 3052 return; 3053 } 3054 3055 /* 3056 * All the code following this point is only produced once and is not present 3057 * in the JVMTI version of the interpreter 3058 */ 3059 3060 #ifndef VM_JVMTI 3061 3062 // This constructor should only be used to contruct the object to signal 3063 // interpreter initialization. All other instances should be created by 3064 // the frame manager. 3065 BytecodeInterpreter::BytecodeInterpreter(messages msg) { 3066 if (msg != initialize) ShouldNotReachHere(); 3067 _msg = msg; 3068 _self_link = this; 3069 _prev_link = NULL; 3070 } 3071 3072 // Inline static functions for Java Stack and Local manipulation 3073 3074 // The implementations are platform dependent. We have to worry about alignment 3075 // issues on some machines which can change on the same platform depending on 3076 // whether it is an LP64 machine also. 3077 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) { 3078 return (address) tos[Interpreter::expr_index_at(-offset)]; 3079 } 3080 3081 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) { 3082 return *((jint*) &tos[Interpreter::expr_index_at(-offset)]); 3083 } 3084 3085 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) { 3086 return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]); 3087 } 3088 3089 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) { 3090 return (oop)tos [Interpreter::expr_index_at(-offset)]; 3091 } 3092 3093 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) { 3094 return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d; 3095 } 3096 3097 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) { 3098 return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l; 3099 } 3100 3101 // only used for value types 3102 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value, 3103 int offset) { 3104 *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3105 } 3106 3107 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value, 3108 int offset) { 3109 *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3110 } 3111 3112 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value, 3113 int offset) { 3114 *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3115 } 3116 3117 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value, 3118 int offset) { 3119 *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3120 } 3121 3122 // needs to be platform dep for the 32 bit platforms. 3123 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value, 3124 int offset) { 3125 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value; 3126 } 3127 3128 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos, 3129 address addr, int offset) { 3130 (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = 3131 ((VMJavaVal64*)addr)->d); 3132 } 3133 3134 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value, 3135 int offset) { 3136 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3137 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value; 3138 } 3139 3140 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos, 3141 address addr, int offset) { 3142 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3143 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = 3144 ((VMJavaVal64*)addr)->l; 3145 } 3146 3147 // Locals 3148 3149 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) { 3150 return (address)locals[Interpreter::local_index_at(-offset)]; 3151 } 3152 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) { 3153 return (jint)locals[Interpreter::local_index_at(-offset)]; 3154 } 3155 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) { 3156 return (jfloat)locals[Interpreter::local_index_at(-offset)]; 3157 } 3158 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) { 3159 return (oop)locals[Interpreter::local_index_at(-offset)]; 3160 } 3161 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) { 3162 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d; 3163 } 3164 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) { 3165 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l; 3166 } 3167 3168 // Returns the address of locals value. 3169 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) { 3170 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3171 } 3172 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) { 3173 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3174 } 3175 3176 // Used for local value or returnAddress 3177 void BytecodeInterpreter::set_locals_slot(intptr_t *locals, 3178 address value, int offset) { 3179 *((address*)&locals[Interpreter::local_index_at(-offset)]) = value; 3180 } 3181 void BytecodeInterpreter::set_locals_int(intptr_t *locals, 3182 jint value, int offset) { 3183 *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value; 3184 } 3185 void BytecodeInterpreter::set_locals_float(intptr_t *locals, 3186 jfloat value, int offset) { 3187 *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value; 3188 } 3189 void BytecodeInterpreter::set_locals_object(intptr_t *locals, 3190 oop value, int offset) { 3191 *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value; 3192 } 3193 void BytecodeInterpreter::set_locals_double(intptr_t *locals, 3194 jdouble value, int offset) { 3195 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value; 3196 } 3197 void BytecodeInterpreter::set_locals_long(intptr_t *locals, 3198 jlong value, int offset) { 3199 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value; 3200 } 3201 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals, 3202 address addr, int offset) { 3203 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d; 3204 } 3205 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals, 3206 address addr, int offset) { 3207 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l; 3208 } 3209 3210 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset, 3211 intptr_t* locals, int locals_offset) { 3212 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)]; 3213 locals[Interpreter::local_index_at(-locals_offset)] = value; 3214 } 3215 3216 3217 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset, 3218 int to_offset) { 3219 tos[Interpreter::expr_index_at(-to_offset)] = 3220 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)]; 3221 } 3222 3223 void BytecodeInterpreter::dup(intptr_t *tos) { 3224 copy_stack_slot(tos, -1, 0); 3225 } 3226 void BytecodeInterpreter::dup2(intptr_t *tos) { 3227 copy_stack_slot(tos, -2, 0); 3228 copy_stack_slot(tos, -1, 1); 3229 } 3230 3231 void BytecodeInterpreter::dup_x1(intptr_t *tos) { 3232 /* insert top word two down */ 3233 copy_stack_slot(tos, -1, 0); 3234 copy_stack_slot(tos, -2, -1); 3235 copy_stack_slot(tos, 0, -2); 3236 } 3237 3238 void BytecodeInterpreter::dup_x2(intptr_t *tos) { 3239 /* insert top word three down */ 3240 copy_stack_slot(tos, -1, 0); 3241 copy_stack_slot(tos, -2, -1); 3242 copy_stack_slot(tos, -3, -2); 3243 copy_stack_slot(tos, 0, -3); 3244 } 3245 void BytecodeInterpreter::dup2_x1(intptr_t *tos) { 3246 /* insert top 2 slots three down */ 3247 copy_stack_slot(tos, -1, 1); 3248 copy_stack_slot(tos, -2, 0); 3249 copy_stack_slot(tos, -3, -1); 3250 copy_stack_slot(tos, 1, -2); 3251 copy_stack_slot(tos, 0, -3); 3252 } 3253 void BytecodeInterpreter::dup2_x2(intptr_t *tos) { 3254 /* insert top 2 slots four down */ 3255 copy_stack_slot(tos, -1, 1); 3256 copy_stack_slot(tos, -2, 0); 3257 copy_stack_slot(tos, -3, -1); 3258 copy_stack_slot(tos, -4, -2); 3259 copy_stack_slot(tos, 1, -3); 3260 copy_stack_slot(tos, 0, -4); 3261 } 3262 3263 3264 void BytecodeInterpreter::swap(intptr_t *tos) { 3265 // swap top two elements 3266 intptr_t val = tos[Interpreter::expr_index_at(1)]; 3267 // Copy -2 entry to -1 3268 copy_stack_slot(tos, -2, -1); 3269 // Store saved -1 entry into -2 3270 tos[Interpreter::expr_index_at(2)] = val; 3271 } 3272 // -------------------------------------------------------------------------------- 3273 // Non-product code 3274 #ifndef PRODUCT 3275 3276 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) { 3277 switch (msg) { 3278 case BytecodeInterpreter::no_request: return("no_request"); 3279 case BytecodeInterpreter::initialize: return("initialize"); 3280 // status message to C++ interpreter 3281 case BytecodeInterpreter::method_entry: return("method_entry"); 3282 case BytecodeInterpreter::method_resume: return("method_resume"); 3283 case BytecodeInterpreter::got_monitors: return("got_monitors"); 3284 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception"); 3285 // requests to frame manager from C++ interpreter 3286 case BytecodeInterpreter::call_method: return("call_method"); 3287 case BytecodeInterpreter::return_from_method: return("return_from_method"); 3288 case BytecodeInterpreter::more_monitors: return("more_monitors"); 3289 case BytecodeInterpreter::throwing_exception: return("throwing_exception"); 3290 case BytecodeInterpreter::popping_frame: return("popping_frame"); 3291 case BytecodeInterpreter::do_osr: return("do_osr"); 3292 // deopt 3293 case BytecodeInterpreter::deopt_resume: return("deopt_resume"); 3294 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2"); 3295 default: return("BAD MSG"); 3296 } 3297 } 3298 void 3299 BytecodeInterpreter::print() { 3300 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread); 3301 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp); 3302 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals); 3303 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants); 3304 { 3305 ResourceMark rm; 3306 char *method_name = _method->name_and_sig_as_C_string(); 3307 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name); 3308 } 3309 tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx); 3310 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack); 3311 tty->print_cr("msg: %s", C_msg(this->_msg)); 3312 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee); 3313 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point); 3314 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance); 3315 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf); 3316 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry); 3317 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link); 3318 tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) this->_oop_temp); 3319 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base); 3320 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit); 3321 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base); 3322 #ifdef SPARC 3323 tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc); 3324 tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom); 3325 tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult); 3326 tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult); 3327 #endif 3328 #if !defined(ZERO) 3329 tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp); 3330 #endif // !ZERO 3331 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link); 3332 } 3333 3334 extern "C" { 3335 void PI(uintptr_t arg) { 3336 ((BytecodeInterpreter*)arg)->print(); 3337 } 3338 } 3339 #endif // PRODUCT 3340 3341 #endif // JVMTI 3342 #endif // CC_INTERP