1 /* 2 * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "classfile/vmSymbols.hpp" 27 #include "gc_interface/collectedHeap.hpp" 28 #include "interpreter/bytecodeHistogram.hpp" 29 #include "interpreter/bytecodeInterpreter.hpp" 30 #include "interpreter/bytecodeInterpreter.inline.hpp" 31 #include "interpreter/bytecodeInterpreterProfiling.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/interpreterRuntime.hpp" 34 #include "memory/resourceArea.hpp" 35 #include "oops/methodCounters.hpp" 36 #include "oops/objArrayKlass.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "prims/jvmtiExport.hpp" 39 #include "prims/jvmtiThreadState.hpp" 40 #include "runtime/biasedLocking.hpp" 41 #include "runtime/frame.inline.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/interfaceSupport.hpp" 44 #include "runtime/orderAccess.inline.hpp" 45 #include "runtime/sharedRuntime.hpp" 46 #include "runtime/threadCritical.hpp" 47 #include "utilities/exceptions.hpp" 48 49 // no precompiled headers 50 #ifdef CC_INTERP 51 52 /* 53 * USELABELS - If using GCC, then use labels for the opcode dispatching 54 * rather -then a switch statement. This improves performance because it 55 * gives us the oportunity to have the instructions that calculate the 56 * next opcode to jump to be intermixed with the rest of the instructions 57 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro). 58 */ 59 #undef USELABELS 60 #ifdef __GNUC__ 61 /* 62 ASSERT signifies debugging. It is much easier to step thru bytecodes if we 63 don't use the computed goto approach. 64 */ 65 #ifndef ASSERT 66 #define USELABELS 67 #endif 68 #endif 69 70 #undef CASE 71 #ifdef USELABELS 72 #define CASE(opcode) opc ## opcode 73 #define DEFAULT opc_default 74 #else 75 #define CASE(opcode) case Bytecodes:: opcode 76 #define DEFAULT default 77 #endif 78 79 /* 80 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next 81 * opcode before going back to the top of the while loop, rather then having 82 * the top of the while loop handle it. This provides a better opportunity 83 * for instruction scheduling. Some compilers just do this prefetch 84 * automatically. Some actually end up with worse performance if you 85 * force the prefetch. Solaris gcc seems to do better, but cc does worse. 86 */ 87 #undef PREFETCH_OPCCODE 88 #define PREFETCH_OPCCODE 89 90 /* 91 Interpreter safepoint: it is expected that the interpreter will have no live 92 handles of its own creation live at an interpreter safepoint. Therefore we 93 run a HandleMarkCleaner and trash all handles allocated in the call chain 94 since the JavaCalls::call_helper invocation that initiated the chain. 95 There really shouldn't be any handles remaining to trash but this is cheap 96 in relation to a safepoint. 97 */ 98 #define SAFEPOINT \ 99 if ( SafepointSynchronize::is_synchronizing()) { \ 100 { \ 101 /* zap freed handles rather than GC'ing them */ \ 102 HandleMarkCleaner __hmc(THREAD); \ 103 } \ 104 CALL_VM(SafepointSynchronize::block(THREAD), handle_exception); \ 105 } 106 107 /* 108 * VM_JAVA_ERROR - Macro for throwing a java exception from 109 * the interpreter loop. Should really be a CALL_VM but there 110 * is no entry point to do the transition to vm so we just 111 * do it by hand here. 112 */ 113 #define VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \ 114 DECACHE_STATE(); \ 115 SET_LAST_JAVA_FRAME(); \ 116 { \ 117 InterpreterRuntime::note_a_trap(THREAD, istate->method(), BCI()); \ 118 ThreadInVMfromJava trans(THREAD); \ 119 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \ 120 } \ 121 RESET_LAST_JAVA_FRAME(); \ 122 CACHE_STATE(); 123 124 // Normal throw of a java error. 125 #define VM_JAVA_ERROR(name, msg, note_a_trap) \ 126 VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \ 127 goto handle_exception; 128 129 #ifdef PRODUCT 130 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) 131 #else 132 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \ 133 { \ 134 BytecodeCounter::_counter_value++; \ 135 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \ 136 if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \ 137 if (TraceBytecodes) { \ 138 CALL_VM((void)SharedRuntime::trace_bytecode(THREAD, 0, \ 139 topOfStack[Interpreter::expr_index_at(1)], \ 140 topOfStack[Interpreter::expr_index_at(2)]), \ 141 handle_exception); \ 142 } \ 143 } 144 #endif 145 146 #undef DEBUGGER_SINGLE_STEP_NOTIFY 147 #ifdef VM_JVMTI 148 /* NOTE: (kbr) This macro must be called AFTER the PC has been 149 incremented. JvmtiExport::at_single_stepping_point() may cause a 150 breakpoint opcode to get inserted at the current PC to allow the 151 debugger to coalesce single-step events. 152 153 As a result if we call at_single_stepping_point() we refetch opcode 154 to get the current opcode. This will override any other prefetching 155 that might have occurred. 156 */ 157 #define DEBUGGER_SINGLE_STEP_NOTIFY() \ 158 { \ 159 if (_jvmti_interp_events) { \ 160 if (JvmtiExport::should_post_single_step()) { \ 161 DECACHE_STATE(); \ 162 SET_LAST_JAVA_FRAME(); \ 163 ThreadInVMfromJava trans(THREAD); \ 164 JvmtiExport::at_single_stepping_point(THREAD, \ 165 istate->method(), \ 166 pc); \ 167 RESET_LAST_JAVA_FRAME(); \ 168 CACHE_STATE(); \ 169 if (THREAD->pop_frame_pending() && \ 170 !THREAD->pop_frame_in_process()) { \ 171 goto handle_Pop_Frame; \ 172 } \ 173 if (THREAD->jvmti_thread_state() && \ 174 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ 175 goto handle_Early_Return; \ 176 } \ 177 opcode = *pc; \ 178 } \ 179 } \ 180 } 181 #else 182 #define DEBUGGER_SINGLE_STEP_NOTIFY() 183 #endif 184 185 /* 186 * CONTINUE - Macro for executing the next opcode. 187 */ 188 #undef CONTINUE 189 #ifdef USELABELS 190 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an 191 // initialization (which is is the initialization of the table pointer...) 192 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode] 193 #define CONTINUE { \ 194 opcode = *pc; \ 195 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 196 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 197 DISPATCH(opcode); \ 198 } 199 #else 200 #ifdef PREFETCH_OPCCODE 201 #define CONTINUE { \ 202 opcode = *pc; \ 203 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 204 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 205 continue; \ 206 } 207 #else 208 #define CONTINUE { \ 209 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 210 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 211 continue; \ 212 } 213 #endif 214 #endif 215 216 217 #define UPDATE_PC(opsize) {pc += opsize; } 218 /* 219 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack. 220 */ 221 #undef UPDATE_PC_AND_TOS 222 #define UPDATE_PC_AND_TOS(opsize, stack) \ 223 {pc += opsize; MORE_STACK(stack); } 224 225 /* 226 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack, 227 * and executing the next opcode. It's somewhat similar to the combination 228 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations. 229 */ 230 #undef UPDATE_PC_AND_TOS_AND_CONTINUE 231 #ifdef USELABELS 232 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 233 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 234 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 235 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 236 DISPATCH(opcode); \ 237 } 238 239 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 240 pc += opsize; opcode = *pc; \ 241 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 242 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 243 DISPATCH(opcode); \ 244 } 245 #else 246 #ifdef PREFETCH_OPCCODE 247 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 248 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 249 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 250 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 251 goto do_continue; \ 252 } 253 254 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 255 pc += opsize; opcode = *pc; \ 256 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 257 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 258 goto do_continue; \ 259 } 260 #else 261 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 262 pc += opsize; MORE_STACK(stack); \ 263 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 264 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 265 goto do_continue; \ 266 } 267 268 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 269 pc += opsize; \ 270 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 271 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 272 goto do_continue; \ 273 } 274 #endif /* PREFETCH_OPCCODE */ 275 #endif /* USELABELS */ 276 277 // About to call a new method, update the save the adjusted pc and return to frame manager 278 #define UPDATE_PC_AND_RETURN(opsize) \ 279 DECACHE_TOS(); \ 280 istate->set_bcp(pc+opsize); \ 281 return; 282 283 284 #define METHOD istate->method() 285 #define GET_METHOD_COUNTERS(res) \ 286 res = METHOD->method_counters(); \ 287 if (res == NULL) { \ 288 CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \ 289 } 290 291 #define OSR_REQUEST(res, branch_pc) \ 292 CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception); 293 /* 294 * For those opcodes that need to have a GC point on a backwards branch 295 */ 296 297 // Backedge counting is kind of strange. The asm interpreter will increment 298 // the backedge counter as a separate counter but it does it's comparisons 299 // to the sum (scaled) of invocation counter and backedge count to make 300 // a decision. Seems kind of odd to sum them together like that 301 302 // skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp 303 304 305 #define DO_BACKEDGE_CHECKS(skip, branch_pc) \ 306 if ((skip) <= 0) { \ 307 MethodCounters* mcs; \ 308 GET_METHOD_COUNTERS(mcs); \ 309 if (UseLoopCounter) { \ 310 bool do_OSR = UseOnStackReplacement; \ 311 mcs->backedge_counter()->increment(); \ 312 if (ProfileInterpreter) { \ 313 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); \ 314 /* Check for overflow against MDO count. */ \ 315 do_OSR = do_OSR \ 316 && (mdo_last_branch_taken_count >= (uint)InvocationCounter::InterpreterBackwardBranchLimit)\ 317 /* When ProfileInterpreter is on, the backedge_count comes */ \ 318 /* from the methodDataOop, which value does not get reset on */ \ 319 /* the call to frequency_counter_overflow(). To avoid */ \ 320 /* excessive calls to the overflow routine while the method is */ \ 321 /* being compiled, add a second test to make sure the overflow */ \ 322 /* function is called only once every overflow_frequency. */ \ 323 && (!(mdo_last_branch_taken_count & 1023)); \ 324 } else { \ 325 /* check for overflow of backedge counter */ \ 326 do_OSR = do_OSR \ 327 && mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter()); \ 328 } \ 329 if (do_OSR) { \ 330 nmethod* osr_nmethod; \ 331 OSR_REQUEST(osr_nmethod, branch_pc); \ 332 if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) { \ 333 intptr_t* buf; \ 334 /* Call OSR migration with last java frame only, no checks. */ \ 335 CALL_VM_NAKED_LJF(buf=SharedRuntime::OSR_migration_begin(THREAD)); \ 336 istate->set_msg(do_osr); \ 337 istate->set_osr_buf((address)buf); \ 338 istate->set_osr_entry(osr_nmethod->osr_entry()); \ 339 return; \ 340 } \ 341 } \ 342 } /* UseCompiler ... */ \ 343 SAFEPOINT; \ 344 } 345 346 /* 347 * For those opcodes that need to have a GC point on a backwards branch 348 */ 349 350 /* 351 * Macros for caching and flushing the interpreter state. Some local 352 * variables need to be flushed out to the frame before we do certain 353 * things (like pushing frames or becomming gc safe) and some need to 354 * be recached later (like after popping a frame). We could use one 355 * macro to cache or decache everything, but this would be less then 356 * optimal because we don't always need to cache or decache everything 357 * because some things we know are already cached or decached. 358 */ 359 #undef DECACHE_TOS 360 #undef CACHE_TOS 361 #undef CACHE_PREV_TOS 362 #define DECACHE_TOS() istate->set_stack(topOfStack); 363 364 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack(); 365 366 #undef DECACHE_PC 367 #undef CACHE_PC 368 #define DECACHE_PC() istate->set_bcp(pc); 369 #define CACHE_PC() pc = istate->bcp(); 370 #define CACHE_CP() cp = istate->constants(); 371 #define CACHE_LOCALS() locals = istate->locals(); 372 #undef CACHE_FRAME 373 #define CACHE_FRAME() 374 375 // BCI() returns the current bytecode-index. 376 #undef BCI 377 #define BCI() ((int)(intptr_t)(pc - (intptr_t)istate->method()->code_base())) 378 379 /* 380 * CHECK_NULL - Macro for throwing a NullPointerException if the object 381 * passed is a null ref. 382 * On some architectures/platforms it should be possible to do this implicitly 383 */ 384 #undef CHECK_NULL 385 #define CHECK_NULL(obj_) \ 386 if ((obj_) == NULL) { \ 387 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), NULL, note_nullCheck_trap); \ 388 } \ 389 VERIFY_OOP(obj_) 390 391 #define VMdoubleConstZero() 0.0 392 #define VMdoubleConstOne() 1.0 393 #define VMlongConstZero() (max_jlong-max_jlong) 394 #define VMlongConstOne() ((max_jlong-max_jlong)+1) 395 396 /* 397 * Alignment 398 */ 399 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3) 400 401 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod) 402 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS(); 403 404 // Reload interpreter state after calling the VM or a possible GC 405 #define CACHE_STATE() \ 406 CACHE_TOS(); \ 407 CACHE_PC(); \ 408 CACHE_CP(); \ 409 CACHE_LOCALS(); 410 411 // Call the VM with last java frame only. 412 #define CALL_VM_NAKED_LJF(func) \ 413 DECACHE_STATE(); \ 414 SET_LAST_JAVA_FRAME(); \ 415 func; \ 416 RESET_LAST_JAVA_FRAME(); \ 417 CACHE_STATE(); 418 419 // Call the VM. Don't check for pending exceptions. 420 #define CALL_VM_NOCHECK(func) \ 421 CALL_VM_NAKED_LJF(func) \ 422 if (THREAD->pop_frame_pending() && \ 423 !THREAD->pop_frame_in_process()) { \ 424 goto handle_Pop_Frame; \ 425 } \ 426 if (THREAD->jvmti_thread_state() && \ 427 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ 428 goto handle_Early_Return; \ 429 } 430 431 // Call the VM and check for pending exceptions 432 #define CALL_VM(func, label) { \ 433 CALL_VM_NOCHECK(func); \ 434 if (THREAD->has_pending_exception()) goto label; \ 435 } 436 437 /* 438 * BytecodeInterpreter::run(interpreterState istate) 439 * BytecodeInterpreter::runWithChecks(interpreterState istate) 440 * 441 * The real deal. This is where byte codes actually get interpreted. 442 * Basically it's a big while loop that iterates until we return from 443 * the method passed in. 444 * 445 * The runWithChecks is used if JVMTI is enabled. 446 * 447 */ 448 #if defined(VM_JVMTI) 449 void 450 BytecodeInterpreter::runWithChecks(interpreterState istate) { 451 #else 452 void 453 BytecodeInterpreter::run(interpreterState istate) { 454 #endif 455 456 // In order to simplify some tests based on switches set at runtime 457 // we invoke the interpreter a single time after switches are enabled 458 // and set simpler to to test variables rather than method calls or complex 459 // boolean expressions. 460 461 static int initialized = 0; 462 static int checkit = 0; 463 static intptr_t* c_addr = NULL; 464 static intptr_t c_value; 465 466 if (checkit && *c_addr != c_value) { 467 os::breakpoint(); 468 } 469 #ifdef VM_JVMTI 470 static bool _jvmti_interp_events = 0; 471 #endif 472 473 static int _compiling; // (UseCompiler || CountCompiledCalls) 474 475 #ifdef ASSERT 476 if (istate->_msg != initialize) { 477 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit"); 478 #ifndef SHARK 479 IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong")); 480 #endif // !SHARK 481 } 482 // Verify linkages. 483 interpreterState l = istate; 484 do { 485 assert(l == l->_self_link, "bad link"); 486 l = l->_prev_link; 487 } while (l != NULL); 488 // Screwups with stack management usually cause us to overwrite istate 489 // save a copy so we can verify it. 490 interpreterState orig = istate; 491 #endif 492 493 register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */ 494 register address pc = istate->bcp(); 495 register jubyte opcode; 496 register intptr_t* locals = istate->locals(); 497 register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache() 498 #ifdef LOTS_OF_REGS 499 register JavaThread* THREAD = istate->thread(); 500 #else 501 #undef THREAD 502 #define THREAD istate->thread() 503 #endif 504 505 #ifdef USELABELS 506 const static void* const opclabels_data[256] = { 507 /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0, 508 /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4, 509 /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0, 510 /* 0x0C */ &&opc_fconst_1,&&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1, 511 512 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w, 513 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload, 514 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0,&&opc_iload_1, 515 /* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1, 516 517 /* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1, 518 /* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1, 519 /* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1, 520 /* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload, 521 522 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload, 523 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore, 524 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0, 525 /* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0, 526 527 /* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0, 528 /* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0, 529 /* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0, 530 /* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore, 531 532 /* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore, 533 /* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop, 534 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2, 535 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap, 536 537 /* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd, 538 /* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub, 539 /* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul, 540 /* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv, 541 542 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem, 543 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg, 544 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr, 545 /* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land, 546 547 /* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor, 548 /* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d, 549 /* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i, 550 /* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l, 551 552 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s, 553 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl, 554 /* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt, 555 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq, 556 557 /* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge, &&opc_if_icmpgt, 558 /* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne, &&opc_goto, 559 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch,&&opc_lookupswitch, 560 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn, 561 562 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic, 563 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial, 564 /* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new, 565 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow, 566 567 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit, 568 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull, 569 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_default, 570 /* 0xCC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 571 572 /* 0xD0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 573 /* 0xD4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 574 /* 0xD8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 575 /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 576 577 /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 578 /* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer, 579 /* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default, 580 /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 581 582 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 583 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 584 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 585 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default 586 }; 587 register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0]; 588 #endif /* USELABELS */ 589 590 #ifdef ASSERT 591 // this will trigger a VERIFY_OOP on entry 592 if (istate->msg() != initialize && ! METHOD->is_static()) { 593 oop rcvr = LOCALS_OBJECT(0); 594 VERIFY_OOP(rcvr); 595 } 596 #endif 597 // #define HACK 598 #ifdef HACK 599 bool interesting = false; 600 #endif // HACK 601 602 /* QQQ this should be a stack method so we don't know actual direction */ 603 guarantee(istate->msg() == initialize || 604 topOfStack >= istate->stack_limit() && 605 topOfStack < istate->stack_base(), 606 "Stack top out of range"); 607 608 #ifdef CC_INTERP_PROFILE 609 // MethodData's last branch taken count. 610 uint mdo_last_branch_taken_count = 0; 611 #else 612 const uint mdo_last_branch_taken_count = 0; 613 #endif 614 615 switch (istate->msg()) { 616 case initialize: { 617 if (initialized++) ShouldNotReachHere(); // Only one initialize call. 618 _compiling = (UseCompiler || CountCompiledCalls); 619 #ifdef VM_JVMTI 620 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events(); 621 #endif 622 return; 623 } 624 break; 625 case method_entry: { 626 THREAD->set_do_not_unlock(); 627 // count invocations 628 assert(initialized, "Interpreter not initialized"); 629 if (_compiling) { 630 MethodCounters* mcs; 631 GET_METHOD_COUNTERS(mcs); 632 if (ProfileInterpreter) { 633 METHOD->increment_interpreter_invocation_count(THREAD); 634 } 635 mcs->invocation_counter()->increment(); 636 if (mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter())) { 637 CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception); 638 // We no longer retry on a counter overflow. 639 } 640 // Get or create profile data. Check for pending (async) exceptions. 641 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 642 SAFEPOINT; 643 } 644 645 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 646 // initialize 647 os::breakpoint(); 648 } 649 650 #ifdef HACK 651 { 652 ResourceMark rm; 653 char *method_name = istate->method()->name_and_sig_as_C_string(); 654 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { 655 tty->print_cr("entering: depth %d bci: %d", 656 (istate->_stack_base - istate->_stack), 657 istate->_bcp - istate->_method->code_base()); 658 interesting = true; 659 } 660 } 661 #endif // HACK 662 663 // Lock method if synchronized. 664 if (METHOD->is_synchronized()) { 665 // oop rcvr = locals[0].j.r; 666 oop rcvr; 667 if (METHOD->is_static()) { 668 rcvr = METHOD->constants()->pool_holder()->java_mirror(); 669 } else { 670 rcvr = LOCALS_OBJECT(0); 671 VERIFY_OOP(rcvr); 672 } 673 // The initial monitor is ours for the taking. 674 // Monitor not filled in frame manager any longer as this caused race condition with biased locking. 675 BasicObjectLock* mon = &istate->monitor_base()[-1]; 676 mon->set_obj(rcvr); 677 bool success = false; 678 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 679 markOop mark = rcvr->mark(); 680 intptr_t hash = (intptr_t) markOopDesc::no_hash; 681 // Implies UseBiasedLocking. 682 if (mark->has_bias_pattern()) { 683 uintptr_t thread_ident; 684 uintptr_t anticipated_bias_locking_value; 685 thread_ident = (uintptr_t)istate->thread(); 686 anticipated_bias_locking_value = 687 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 688 ~((uintptr_t) markOopDesc::age_mask_in_place); 689 690 if (anticipated_bias_locking_value == 0) { 691 // Already biased towards this thread, nothing to do. 692 if (PrintBiasedLockingStatistics) { 693 (* BiasedLocking::biased_lock_entry_count_addr())++; 694 } 695 success = true; 696 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 697 // Try to revoke bias. 698 markOop header = rcvr->klass()->prototype_header(); 699 if (hash != markOopDesc::no_hash) { 700 header = header->copy_set_hash(hash); 701 } 702 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) { 703 if (PrintBiasedLockingStatistics) 704 (*BiasedLocking::revoked_lock_entry_count_addr())++; 705 } 706 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) { 707 // Try to rebias. 708 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident); 709 if (hash != markOopDesc::no_hash) { 710 new_header = new_header->copy_set_hash(hash); 711 } 712 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) { 713 if (PrintBiasedLockingStatistics) { 714 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 715 } 716 } else { 717 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 718 } 719 success = true; 720 } else { 721 // Try to bias towards thread in case object is anonymously biased. 722 markOop header = (markOop) ((uintptr_t) mark & 723 ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 724 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); 725 if (hash != markOopDesc::no_hash) { 726 header = header->copy_set_hash(hash); 727 } 728 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 729 // Debugging hint. 730 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 731 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) { 732 if (PrintBiasedLockingStatistics) { 733 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 734 } 735 } else { 736 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 737 } 738 success = true; 739 } 740 } 741 742 // Traditional lightweight locking. 743 if (!success) { 744 markOop displaced = rcvr->mark()->set_unlocked(); 745 mon->lock()->set_displaced_header(displaced); 746 bool call_vm = UseHeavyMonitors; 747 if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) { 748 // Is it simple recursive case? 749 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 750 mon->lock()->set_displaced_header(NULL); 751 } else { 752 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 753 } 754 } 755 } 756 } 757 THREAD->clr_do_not_unlock(); 758 759 // Notify jvmti 760 #ifdef VM_JVMTI 761 if (_jvmti_interp_events) { 762 // Whenever JVMTI puts a thread in interp_only_mode, method 763 // entry/exit events are sent for that thread to track stack depth. 764 if (THREAD->is_interp_only_mode()) { 765 CALL_VM(InterpreterRuntime::post_method_entry(THREAD), 766 handle_exception); 767 } 768 } 769 #endif /* VM_JVMTI */ 770 771 goto run; 772 } 773 774 case popping_frame: { 775 // returned from a java call to pop the frame, restart the call 776 // clear the message so we don't confuse ourselves later 777 assert(THREAD->pop_frame_in_process(), "wrong frame pop state"); 778 istate->set_msg(no_request); 779 if (_compiling) { 780 // Set MDX back to the ProfileData of the invoke bytecode that will be 781 // restarted. 782 SET_MDX(NULL); 783 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 784 } 785 THREAD->clr_pop_frame_in_process(); 786 goto run; 787 } 788 789 case method_resume: { 790 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 791 // resume 792 os::breakpoint(); 793 } 794 #ifdef HACK 795 { 796 ResourceMark rm; 797 char *method_name = istate->method()->name_and_sig_as_C_string(); 798 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { 799 tty->print_cr("resume: depth %d bci: %d", 800 (istate->_stack_base - istate->_stack) , 801 istate->_bcp - istate->_method->code_base()); 802 interesting = true; 803 } 804 } 805 #endif // HACK 806 // returned from a java call, continue executing. 807 if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) { 808 goto handle_Pop_Frame; 809 } 810 if (THREAD->jvmti_thread_state() && 811 THREAD->jvmti_thread_state()->is_earlyret_pending()) { 812 goto handle_Early_Return; 813 } 814 815 if (THREAD->has_pending_exception()) goto handle_exception; 816 // Update the pc by the saved amount of the invoke bytecode size 817 UPDATE_PC(istate->bcp_advance()); 818 819 if (_compiling) { 820 // Get or create profile data. Check for pending (async) exceptions. 821 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 822 } 823 goto run; 824 } 825 826 case deopt_resume2: { 827 // Returned from an opcode that will reexecute. Deopt was 828 // a result of a PopFrame request. 829 // 830 831 if (_compiling) { 832 // Get or create profile data. Check for pending (async) exceptions. 833 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 834 } 835 goto run; 836 } 837 838 case deopt_resume: { 839 // Returned from an opcode that has completed. The stack has 840 // the result all we need to do is skip across the bytecode 841 // and continue (assuming there is no exception pending) 842 // 843 // compute continuation length 844 // 845 // Note: it is possible to deopt at a return_register_finalizer opcode 846 // because this requires entering the vm to do the registering. While the 847 // opcode is complete we can't advance because there are no more opcodes 848 // much like trying to deopt at a poll return. In that has we simply 849 // get out of here 850 // 851 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) { 852 // this will do the right thing even if an exception is pending. 853 goto handle_return; 854 } 855 UPDATE_PC(Bytecodes::length_at(METHOD, pc)); 856 if (THREAD->has_pending_exception()) goto handle_exception; 857 858 if (_compiling) { 859 // Get or create profile data. Check for pending (async) exceptions. 860 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 861 } 862 goto run; 863 } 864 case got_monitors: { 865 // continue locking now that we have a monitor to use 866 // we expect to find newly allocated monitor at the "top" of the monitor stack. 867 oop lockee = STACK_OBJECT(-1); 868 VERIFY_OOP(lockee); 869 // derefing's lockee ought to provoke implicit null check 870 // find a free monitor 871 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base(); 872 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor"); 873 entry->set_obj(lockee); 874 bool success = false; 875 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 876 877 markOop mark = lockee->mark(); 878 intptr_t hash = (intptr_t) markOopDesc::no_hash; 879 // implies UseBiasedLocking 880 if (mark->has_bias_pattern()) { 881 uintptr_t thread_ident; 882 uintptr_t anticipated_bias_locking_value; 883 thread_ident = (uintptr_t)istate->thread(); 884 anticipated_bias_locking_value = 885 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 886 ~((uintptr_t) markOopDesc::age_mask_in_place); 887 888 if (anticipated_bias_locking_value == 0) { 889 // already biased towards this thread, nothing to do 890 if (PrintBiasedLockingStatistics) { 891 (* BiasedLocking::biased_lock_entry_count_addr())++; 892 } 893 success = true; 894 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 895 // try revoke bias 896 markOop header = lockee->klass()->prototype_header(); 897 if (hash != markOopDesc::no_hash) { 898 header = header->copy_set_hash(hash); 899 } 900 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { 901 if (PrintBiasedLockingStatistics) { 902 (*BiasedLocking::revoked_lock_entry_count_addr())++; 903 } 904 } 905 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { 906 // try rebias 907 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); 908 if (hash != markOopDesc::no_hash) { 909 new_header = new_header->copy_set_hash(hash); 910 } 911 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { 912 if (PrintBiasedLockingStatistics) { 913 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 914 } 915 } else { 916 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 917 } 918 success = true; 919 } else { 920 // try to bias towards thread in case object is anonymously biased 921 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 922 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); 923 if (hash != markOopDesc::no_hash) { 924 header = header->copy_set_hash(hash); 925 } 926 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 927 // debugging hint 928 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 929 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { 930 if (PrintBiasedLockingStatistics) { 931 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 932 } 933 } else { 934 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 935 } 936 success = true; 937 } 938 } 939 940 // traditional lightweight locking 941 if (!success) { 942 markOop displaced = lockee->mark()->set_unlocked(); 943 entry->lock()->set_displaced_header(displaced); 944 bool call_vm = UseHeavyMonitors; 945 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { 946 // Is it simple recursive case? 947 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 948 entry->lock()->set_displaced_header(NULL); 949 } else { 950 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 951 } 952 } 953 } 954 UPDATE_PC_AND_TOS(1, -1); 955 goto run; 956 } 957 default: { 958 fatal("Unexpected message from frame manager"); 959 } 960 } 961 962 run: 963 964 DO_UPDATE_INSTRUCTION_COUNT(*pc) 965 DEBUGGER_SINGLE_STEP_NOTIFY(); 966 #ifdef PREFETCH_OPCCODE 967 opcode = *pc; /* prefetch first opcode */ 968 #endif 969 970 #ifndef USELABELS 971 while (1) 972 #endif 973 { 974 #ifndef PREFETCH_OPCCODE 975 opcode = *pc; 976 #endif 977 // Seems like this happens twice per opcode. At worst this is only 978 // need at entry to the loop. 979 // DEBUGGER_SINGLE_STEP_NOTIFY(); 980 /* Using this labels avoids double breakpoints when quickening and 981 * when returing from transition frames. 982 */ 983 opcode_switch: 984 assert(istate == orig, "Corrupted istate"); 985 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */ 986 assert(topOfStack >= istate->stack_limit(), "Stack overrun"); 987 assert(topOfStack < istate->stack_base(), "Stack underrun"); 988 989 #ifdef USELABELS 990 DISPATCH(opcode); 991 #else 992 switch (opcode) 993 #endif 994 { 995 CASE(_nop): 996 UPDATE_PC_AND_CONTINUE(1); 997 998 /* Push miscellaneous constants onto the stack. */ 999 1000 CASE(_aconst_null): 1001 SET_STACK_OBJECT(NULL, 0); 1002 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1003 1004 #undef OPC_CONST_n 1005 #define OPC_CONST_n(opcode, const_type, value) \ 1006 CASE(opcode): \ 1007 SET_STACK_ ## const_type(value, 0); \ 1008 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1009 1010 OPC_CONST_n(_iconst_m1, INT, -1); 1011 OPC_CONST_n(_iconst_0, INT, 0); 1012 OPC_CONST_n(_iconst_1, INT, 1); 1013 OPC_CONST_n(_iconst_2, INT, 2); 1014 OPC_CONST_n(_iconst_3, INT, 3); 1015 OPC_CONST_n(_iconst_4, INT, 4); 1016 OPC_CONST_n(_iconst_5, INT, 5); 1017 OPC_CONST_n(_fconst_0, FLOAT, 0.0); 1018 OPC_CONST_n(_fconst_1, FLOAT, 1.0); 1019 OPC_CONST_n(_fconst_2, FLOAT, 2.0); 1020 1021 #undef OPC_CONST2_n 1022 #define OPC_CONST2_n(opcname, value, key, kind) \ 1023 CASE(_##opcname): \ 1024 { \ 1025 SET_STACK_ ## kind(VM##key##Const##value(), 1); \ 1026 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1027 } 1028 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE); 1029 OPC_CONST2_n(dconst_1, One, double, DOUBLE); 1030 OPC_CONST2_n(lconst_0, Zero, long, LONG); 1031 OPC_CONST2_n(lconst_1, One, long, LONG); 1032 1033 /* Load constant from constant pool: */ 1034 1035 /* Push a 1-byte signed integer value onto the stack. */ 1036 CASE(_bipush): 1037 SET_STACK_INT((jbyte)(pc[1]), 0); 1038 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1039 1040 /* Push a 2-byte signed integer constant onto the stack. */ 1041 CASE(_sipush): 1042 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0); 1043 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 1044 1045 /* load from local variable */ 1046 1047 CASE(_aload): 1048 VERIFY_OOP(LOCALS_OBJECT(pc[1])); 1049 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0); 1050 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1051 1052 CASE(_iload): 1053 CASE(_fload): 1054 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0); 1055 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1056 1057 CASE(_lload): 1058 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1); 1059 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1060 1061 CASE(_dload): 1062 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1); 1063 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1064 1065 #undef OPC_LOAD_n 1066 #define OPC_LOAD_n(num) \ 1067 CASE(_aload_##num): \ 1068 VERIFY_OOP(LOCALS_OBJECT(num)); \ 1069 SET_STACK_OBJECT(LOCALS_OBJECT(num), 0); \ 1070 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1071 \ 1072 CASE(_iload_##num): \ 1073 CASE(_fload_##num): \ 1074 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \ 1075 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1076 \ 1077 CASE(_lload_##num): \ 1078 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \ 1079 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1080 CASE(_dload_##num): \ 1081 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \ 1082 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1083 1084 OPC_LOAD_n(0); 1085 OPC_LOAD_n(1); 1086 OPC_LOAD_n(2); 1087 OPC_LOAD_n(3); 1088 1089 /* store to a local variable */ 1090 1091 CASE(_astore): 1092 astore(topOfStack, -1, locals, pc[1]); 1093 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1094 1095 CASE(_istore): 1096 CASE(_fstore): 1097 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]); 1098 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1099 1100 CASE(_lstore): 1101 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]); 1102 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1103 1104 CASE(_dstore): 1105 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]); 1106 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1107 1108 CASE(_wide): { 1109 uint16_t reg = Bytes::get_Java_u2(pc + 2); 1110 1111 opcode = pc[1]; 1112 1113 // Wide and it's sub-bytecode are counted as separate instructions. If we 1114 // don't account for this here, the bytecode trace skips the next bytecode. 1115 DO_UPDATE_INSTRUCTION_COUNT(opcode); 1116 1117 switch(opcode) { 1118 case Bytecodes::_aload: 1119 VERIFY_OOP(LOCALS_OBJECT(reg)); 1120 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0); 1121 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1122 1123 case Bytecodes::_iload: 1124 case Bytecodes::_fload: 1125 SET_STACK_SLOT(LOCALS_SLOT(reg), 0); 1126 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1127 1128 case Bytecodes::_lload: 1129 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1130 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1131 1132 case Bytecodes::_dload: 1133 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1134 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1135 1136 case Bytecodes::_astore: 1137 astore(topOfStack, -1, locals, reg); 1138 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1139 1140 case Bytecodes::_istore: 1141 case Bytecodes::_fstore: 1142 SET_LOCALS_SLOT(STACK_SLOT(-1), reg); 1143 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1144 1145 case Bytecodes::_lstore: 1146 SET_LOCALS_LONG(STACK_LONG(-1), reg); 1147 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1148 1149 case Bytecodes::_dstore: 1150 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg); 1151 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1152 1153 case Bytecodes::_iinc: { 1154 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4); 1155 // Be nice to see what this generates.... QQQ 1156 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg); 1157 UPDATE_PC_AND_CONTINUE(6); 1158 } 1159 case Bytecodes::_ret: 1160 // Profile ret. 1161 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(reg)))); 1162 // Now, update the pc. 1163 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg)); 1164 UPDATE_PC_AND_CONTINUE(0); 1165 default: 1166 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode", note_no_trap); 1167 } 1168 } 1169 1170 1171 #undef OPC_STORE_n 1172 #define OPC_STORE_n(num) \ 1173 CASE(_astore_##num): \ 1174 astore(topOfStack, -1, locals, num); \ 1175 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1176 CASE(_istore_##num): \ 1177 CASE(_fstore_##num): \ 1178 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \ 1179 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1180 1181 OPC_STORE_n(0); 1182 OPC_STORE_n(1); 1183 OPC_STORE_n(2); 1184 OPC_STORE_n(3); 1185 1186 #undef OPC_DSTORE_n 1187 #define OPC_DSTORE_n(num) \ 1188 CASE(_dstore_##num): \ 1189 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \ 1190 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1191 CASE(_lstore_##num): \ 1192 SET_LOCALS_LONG(STACK_LONG(-1), num); \ 1193 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1194 1195 OPC_DSTORE_n(0); 1196 OPC_DSTORE_n(1); 1197 OPC_DSTORE_n(2); 1198 OPC_DSTORE_n(3); 1199 1200 /* stack pop, dup, and insert opcodes */ 1201 1202 1203 CASE(_pop): /* Discard the top item on the stack */ 1204 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1205 1206 1207 CASE(_pop2): /* Discard the top 2 items on the stack */ 1208 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1209 1210 1211 CASE(_dup): /* Duplicate the top item on the stack */ 1212 dup(topOfStack); 1213 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1214 1215 CASE(_dup2): /* Duplicate the top 2 items on the stack */ 1216 dup2(topOfStack); 1217 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1218 1219 CASE(_dup_x1): /* insert top word two down */ 1220 dup_x1(topOfStack); 1221 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1222 1223 CASE(_dup_x2): /* insert top word three down */ 1224 dup_x2(topOfStack); 1225 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1226 1227 CASE(_dup2_x1): /* insert top 2 slots three down */ 1228 dup2_x1(topOfStack); 1229 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1230 1231 CASE(_dup2_x2): /* insert top 2 slots four down */ 1232 dup2_x2(topOfStack); 1233 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1234 1235 CASE(_swap): { /* swap top two elements on the stack */ 1236 swap(topOfStack); 1237 UPDATE_PC_AND_CONTINUE(1); 1238 } 1239 1240 /* Perform various binary integer operations */ 1241 1242 #undef OPC_INT_BINARY 1243 #define OPC_INT_BINARY(opcname, opname, test) \ 1244 CASE(_i##opcname): \ 1245 if (test && (STACK_INT(-1) == 0)) { \ 1246 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1247 "/ by zero", note_div0Check_trap); \ 1248 } \ 1249 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1250 STACK_INT(-1)), \ 1251 -2); \ 1252 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1253 CASE(_l##opcname): \ 1254 { \ 1255 if (test) { \ 1256 jlong l1 = STACK_LONG(-1); \ 1257 if (VMlongEqz(l1)) { \ 1258 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1259 "/ by long zero", note_div0Check_trap); \ 1260 } \ 1261 } \ 1262 /* First long at (-1,-2) next long at (-3,-4) */ \ 1263 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \ 1264 STACK_LONG(-1)), \ 1265 -3); \ 1266 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1267 } 1268 1269 OPC_INT_BINARY(add, Add, 0); 1270 OPC_INT_BINARY(sub, Sub, 0); 1271 OPC_INT_BINARY(mul, Mul, 0); 1272 OPC_INT_BINARY(and, And, 0); 1273 OPC_INT_BINARY(or, Or, 0); 1274 OPC_INT_BINARY(xor, Xor, 0); 1275 OPC_INT_BINARY(div, Div, 1); 1276 OPC_INT_BINARY(rem, Rem, 1); 1277 1278 1279 /* Perform various binary floating number operations */ 1280 /* On some machine/platforms/compilers div zero check can be implicit */ 1281 1282 #undef OPC_FLOAT_BINARY 1283 #define OPC_FLOAT_BINARY(opcname, opname) \ 1284 CASE(_d##opcname): { \ 1285 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \ 1286 STACK_DOUBLE(-1)), \ 1287 -3); \ 1288 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1289 } \ 1290 CASE(_f##opcname): \ 1291 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \ 1292 STACK_FLOAT(-1)), \ 1293 -2); \ 1294 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1295 1296 1297 OPC_FLOAT_BINARY(add, Add); 1298 OPC_FLOAT_BINARY(sub, Sub); 1299 OPC_FLOAT_BINARY(mul, Mul); 1300 OPC_FLOAT_BINARY(div, Div); 1301 OPC_FLOAT_BINARY(rem, Rem); 1302 1303 /* Shift operations 1304 * Shift left int and long: ishl, lshl 1305 * Logical shift right int and long w/zero extension: iushr, lushr 1306 * Arithmetic shift right int and long w/sign extension: ishr, lshr 1307 */ 1308 1309 #undef OPC_SHIFT_BINARY 1310 #define OPC_SHIFT_BINARY(opcname, opname) \ 1311 CASE(_i##opcname): \ 1312 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1313 STACK_INT(-1)), \ 1314 -2); \ 1315 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1316 CASE(_l##opcname): \ 1317 { \ 1318 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \ 1319 STACK_INT(-1)), \ 1320 -2); \ 1321 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1322 } 1323 1324 OPC_SHIFT_BINARY(shl, Shl); 1325 OPC_SHIFT_BINARY(shr, Shr); 1326 OPC_SHIFT_BINARY(ushr, Ushr); 1327 1328 /* Increment local variable by constant */ 1329 CASE(_iinc): 1330 { 1331 // locals[pc[1]].j.i += (jbyte)(pc[2]); 1332 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]); 1333 UPDATE_PC_AND_CONTINUE(3); 1334 } 1335 1336 /* negate the value on the top of the stack */ 1337 1338 CASE(_ineg): 1339 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1); 1340 UPDATE_PC_AND_CONTINUE(1); 1341 1342 CASE(_fneg): 1343 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1); 1344 UPDATE_PC_AND_CONTINUE(1); 1345 1346 CASE(_lneg): 1347 { 1348 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1); 1349 UPDATE_PC_AND_CONTINUE(1); 1350 } 1351 1352 CASE(_dneg): 1353 { 1354 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1); 1355 UPDATE_PC_AND_CONTINUE(1); 1356 } 1357 1358 /* Conversion operations */ 1359 1360 CASE(_i2f): /* convert top of stack int to float */ 1361 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1); 1362 UPDATE_PC_AND_CONTINUE(1); 1363 1364 CASE(_i2l): /* convert top of stack int to long */ 1365 { 1366 // this is ugly QQQ 1367 jlong r = VMint2Long(STACK_INT(-1)); 1368 MORE_STACK(-1); // Pop 1369 SET_STACK_LONG(r, 1); 1370 1371 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1372 } 1373 1374 CASE(_i2d): /* convert top of stack int to double */ 1375 { 1376 // this is ugly QQQ (why cast to jlong?? ) 1377 jdouble r = (jlong)STACK_INT(-1); 1378 MORE_STACK(-1); // Pop 1379 SET_STACK_DOUBLE(r, 1); 1380 1381 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1382 } 1383 1384 CASE(_l2i): /* convert top of stack long to int */ 1385 { 1386 jint r = VMlong2Int(STACK_LONG(-1)); 1387 MORE_STACK(-2); // Pop 1388 SET_STACK_INT(r, 0); 1389 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1390 } 1391 1392 CASE(_l2f): /* convert top of stack long to float */ 1393 { 1394 jlong r = STACK_LONG(-1); 1395 MORE_STACK(-2); // Pop 1396 SET_STACK_FLOAT(VMlong2Float(r), 0); 1397 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1398 } 1399 1400 CASE(_l2d): /* convert top of stack long to double */ 1401 { 1402 jlong r = STACK_LONG(-1); 1403 MORE_STACK(-2); // Pop 1404 SET_STACK_DOUBLE(VMlong2Double(r), 1); 1405 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1406 } 1407 1408 CASE(_f2i): /* Convert top of stack float to int */ 1409 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1); 1410 UPDATE_PC_AND_CONTINUE(1); 1411 1412 CASE(_f2l): /* convert top of stack float to long */ 1413 { 1414 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1)); 1415 MORE_STACK(-1); // POP 1416 SET_STACK_LONG(r, 1); 1417 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1418 } 1419 1420 CASE(_f2d): /* convert top of stack float to double */ 1421 { 1422 jfloat f; 1423 jdouble r; 1424 f = STACK_FLOAT(-1); 1425 r = (jdouble) f; 1426 MORE_STACK(-1); // POP 1427 SET_STACK_DOUBLE(r, 1); 1428 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1429 } 1430 1431 CASE(_d2i): /* convert top of stack double to int */ 1432 { 1433 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1)); 1434 MORE_STACK(-2); 1435 SET_STACK_INT(r1, 0); 1436 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1437 } 1438 1439 CASE(_d2f): /* convert top of stack double to float */ 1440 { 1441 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1)); 1442 MORE_STACK(-2); 1443 SET_STACK_FLOAT(r1, 0); 1444 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1445 } 1446 1447 CASE(_d2l): /* convert top of stack double to long */ 1448 { 1449 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1)); 1450 MORE_STACK(-2); 1451 SET_STACK_LONG(r1, 1); 1452 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1453 } 1454 1455 CASE(_i2b): 1456 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1); 1457 UPDATE_PC_AND_CONTINUE(1); 1458 1459 CASE(_i2c): 1460 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1); 1461 UPDATE_PC_AND_CONTINUE(1); 1462 1463 CASE(_i2s): 1464 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1); 1465 UPDATE_PC_AND_CONTINUE(1); 1466 1467 /* comparison operators */ 1468 1469 1470 #define COMPARISON_OP(name, comparison) \ 1471 CASE(_if_icmp##name): { \ 1472 const bool cmp = (STACK_INT(-2) comparison STACK_INT(-1)); \ 1473 int skip = cmp \ 1474 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1475 address branch_pc = pc; \ 1476 /* Profile branch. */ \ 1477 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1478 UPDATE_PC_AND_TOS(skip, -2); \ 1479 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1480 CONTINUE; \ 1481 } \ 1482 CASE(_if##name): { \ 1483 const bool cmp = (STACK_INT(-1) comparison 0); \ 1484 int skip = cmp \ 1485 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1486 address branch_pc = pc; \ 1487 /* Profile branch. */ \ 1488 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1489 UPDATE_PC_AND_TOS(skip, -1); \ 1490 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1491 CONTINUE; \ 1492 } 1493 1494 #define COMPARISON_OP2(name, comparison) \ 1495 COMPARISON_OP(name, comparison) \ 1496 CASE(_if_acmp##name): { \ 1497 const bool cmp = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)); \ 1498 int skip = cmp \ 1499 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1500 address branch_pc = pc; \ 1501 /* Profile branch. */ \ 1502 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1503 UPDATE_PC_AND_TOS(skip, -2); \ 1504 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1505 CONTINUE; \ 1506 } 1507 1508 #define NULL_COMPARISON_NOT_OP(name) \ 1509 CASE(_if##name): { \ 1510 const bool cmp = (!(STACK_OBJECT(-1) == NULL)); \ 1511 int skip = cmp \ 1512 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1513 address branch_pc = pc; \ 1514 /* Profile branch. */ \ 1515 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1516 UPDATE_PC_AND_TOS(skip, -1); \ 1517 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1518 CONTINUE; \ 1519 } 1520 1521 #define NULL_COMPARISON_OP(name) \ 1522 CASE(_if##name): { \ 1523 const bool cmp = ((STACK_OBJECT(-1) == NULL)); \ 1524 int skip = cmp \ 1525 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1526 address branch_pc = pc; \ 1527 /* Profile branch. */ \ 1528 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1529 UPDATE_PC_AND_TOS(skip, -1); \ 1530 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1531 CONTINUE; \ 1532 } 1533 COMPARISON_OP(lt, <); 1534 COMPARISON_OP(gt, >); 1535 COMPARISON_OP(le, <=); 1536 COMPARISON_OP(ge, >=); 1537 COMPARISON_OP2(eq, ==); /* include ref comparison */ 1538 COMPARISON_OP2(ne, !=); /* include ref comparison */ 1539 NULL_COMPARISON_OP(null); 1540 NULL_COMPARISON_NOT_OP(nonnull); 1541 1542 /* Goto pc at specified offset in switch table. */ 1543 1544 CASE(_tableswitch): { 1545 jint* lpc = (jint*)VMalignWordUp(pc+1); 1546 int32_t key = STACK_INT(-1); 1547 int32_t low = Bytes::get_Java_u4((address)&lpc[1]); 1548 int32_t high = Bytes::get_Java_u4((address)&lpc[2]); 1549 int32_t skip; 1550 key -= low; 1551 if (((uint32_t) key > (uint32_t)(high - low))) { 1552 key = -1; 1553 skip = Bytes::get_Java_u4((address)&lpc[0]); 1554 } else { 1555 skip = Bytes::get_Java_u4((address)&lpc[key + 3]); 1556 } 1557 // Profile switch. 1558 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/key); 1559 // Does this really need a full backedge check (osr)? 1560 address branch_pc = pc; 1561 UPDATE_PC_AND_TOS(skip, -1); 1562 DO_BACKEDGE_CHECKS(skip, branch_pc); 1563 CONTINUE; 1564 } 1565 1566 /* Goto pc whose table entry matches specified key. */ 1567 1568 CASE(_lookupswitch): { 1569 jint* lpc = (jint*)VMalignWordUp(pc+1); 1570 int32_t key = STACK_INT(-1); 1571 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */ 1572 // Remember index. 1573 int index = -1; 1574 int newindex = 0; 1575 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]); 1576 while (--npairs >= 0) { 1577 lpc += 2; 1578 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) { 1579 skip = Bytes::get_Java_u4((address)&lpc[1]); 1580 index = newindex; 1581 break; 1582 } 1583 newindex += 1; 1584 } 1585 // Profile switch. 1586 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/index); 1587 address branch_pc = pc; 1588 UPDATE_PC_AND_TOS(skip, -1); 1589 DO_BACKEDGE_CHECKS(skip, branch_pc); 1590 CONTINUE; 1591 } 1592 1593 CASE(_fcmpl): 1594 CASE(_fcmpg): 1595 { 1596 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2), 1597 STACK_FLOAT(-1), 1598 (opcode == Bytecodes::_fcmpl ? -1 : 1)), 1599 -2); 1600 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1601 } 1602 1603 CASE(_dcmpl): 1604 CASE(_dcmpg): 1605 { 1606 int r = VMdoubleCompare(STACK_DOUBLE(-3), 1607 STACK_DOUBLE(-1), 1608 (opcode == Bytecodes::_dcmpl ? -1 : 1)); 1609 MORE_STACK(-4); // Pop 1610 SET_STACK_INT(r, 0); 1611 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1612 } 1613 1614 CASE(_lcmp): 1615 { 1616 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1)); 1617 MORE_STACK(-4); 1618 SET_STACK_INT(r, 0); 1619 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1620 } 1621 1622 1623 /* Return from a method */ 1624 1625 CASE(_areturn): 1626 CASE(_ireturn): 1627 CASE(_freturn): 1628 { 1629 // Allow a safepoint before returning to frame manager. 1630 SAFEPOINT; 1631 1632 goto handle_return; 1633 } 1634 1635 CASE(_lreturn): 1636 CASE(_dreturn): 1637 { 1638 // Allow a safepoint before returning to frame manager. 1639 SAFEPOINT; 1640 goto handle_return; 1641 } 1642 1643 CASE(_return_register_finalizer): { 1644 1645 oop rcvr = LOCALS_OBJECT(0); 1646 VERIFY_OOP(rcvr); 1647 if (rcvr->klass()->has_finalizer()) { 1648 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception); 1649 } 1650 goto handle_return; 1651 } 1652 CASE(_return): { 1653 1654 // Allow a safepoint before returning to frame manager. 1655 SAFEPOINT; 1656 goto handle_return; 1657 } 1658 1659 /* Array access byte-codes */ 1660 1661 /* Every array access byte-code starts out like this */ 1662 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff); 1663 #define ARRAY_INTRO(arrayOff) \ 1664 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \ 1665 jint index = STACK_INT(arrayOff + 1); \ 1666 char message[jintAsStringSize]; \ 1667 CHECK_NULL(arrObj); \ 1668 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \ 1669 sprintf(message, "%d", index); \ 1670 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \ 1671 message, note_rangeCheck_trap); \ 1672 } 1673 1674 /* 32-bit loads. These handle conversion from < 32-bit types */ 1675 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \ 1676 { \ 1677 ARRAY_INTRO(-2); \ 1678 (void)extra; \ 1679 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \ 1680 -2); \ 1681 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1682 } 1683 1684 /* 64-bit loads */ 1685 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \ 1686 { \ 1687 ARRAY_INTRO(-2); \ 1688 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \ 1689 (void)extra; \ 1690 UPDATE_PC_AND_CONTINUE(1); \ 1691 } 1692 1693 CASE(_iaload): 1694 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0); 1695 CASE(_faload): 1696 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1697 CASE(_aaload): { 1698 ARRAY_INTRO(-2); 1699 SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2); 1700 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1701 } 1702 CASE(_baload): 1703 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0); 1704 CASE(_caload): 1705 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0); 1706 CASE(_saload): 1707 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0); 1708 CASE(_laload): 1709 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0); 1710 CASE(_daload): 1711 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1712 1713 /* 32-bit stores. These handle conversion to < 32-bit types */ 1714 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \ 1715 { \ 1716 ARRAY_INTRO(-3); \ 1717 (void)extra; \ 1718 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1719 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \ 1720 } 1721 1722 /* 64-bit stores */ 1723 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \ 1724 { \ 1725 ARRAY_INTRO(-4); \ 1726 (void)extra; \ 1727 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1728 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \ 1729 } 1730 1731 CASE(_iastore): 1732 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0); 1733 CASE(_fastore): 1734 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1735 /* 1736 * This one looks different because of the assignability check 1737 */ 1738 CASE(_aastore): { 1739 oop rhsObject = STACK_OBJECT(-1); 1740 VERIFY_OOP(rhsObject); 1741 ARRAY_INTRO( -3); 1742 // arrObj, index are set 1743 if (rhsObject != NULL) { 1744 /* Check assignability of rhsObject into arrObj */ 1745 Klass* rhsKlass = rhsObject->klass(); // EBX (subclass) 1746 Klass* elemKlass = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX 1747 // 1748 // Check for compatibilty. This check must not GC!! 1749 // Seems way more expensive now that we must dispatch 1750 // 1751 if (rhsKlass != elemKlass && !rhsKlass->is_subtype_of(elemKlass)) { // ebx->is... 1752 // Decrement counter if subtype check failed. 1753 BI_PROFILE_SUBTYPECHECK_FAILED(rhsKlass); 1754 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "", note_arrayCheck_trap); 1755 } 1756 // Profile checkcast with null_seen and receiver. 1757 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, rhsKlass); 1758 } else { 1759 // Profile checkcast with null_seen and receiver. 1760 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL); 1761 } 1762 ((objArrayOop) arrObj)->obj_at_put(index, rhsObject); 1763 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); 1764 } 1765 CASE(_bastore): 1766 ARRAY_STOREFROM32(T_BYTE, jbyte, "%d", STACK_INT, 0); 1767 CASE(_castore): 1768 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0); 1769 CASE(_sastore): 1770 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0); 1771 CASE(_lastore): 1772 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0); 1773 CASE(_dastore): 1774 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1775 1776 CASE(_arraylength): 1777 { 1778 arrayOop ary = (arrayOop) STACK_OBJECT(-1); 1779 CHECK_NULL(ary); 1780 SET_STACK_INT(ary->length(), -1); 1781 UPDATE_PC_AND_CONTINUE(1); 1782 } 1783 1784 /* monitorenter and monitorexit for locking/unlocking an object */ 1785 1786 CASE(_monitorenter): { 1787 oop lockee = STACK_OBJECT(-1); 1788 // derefing's lockee ought to provoke implicit null check 1789 CHECK_NULL(lockee); 1790 // find a free monitor or one already allocated for this object 1791 // if we find a matching object then we need a new monitor 1792 // since this is recursive enter 1793 BasicObjectLock* limit = istate->monitor_base(); 1794 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1795 BasicObjectLock* entry = NULL; 1796 while (most_recent != limit ) { 1797 if (most_recent->obj() == NULL) entry = most_recent; 1798 else if (most_recent->obj() == lockee) break; 1799 most_recent++; 1800 } 1801 if (entry != NULL) { 1802 entry->set_obj(lockee); 1803 int success = false; 1804 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 1805 1806 markOop mark = lockee->mark(); 1807 intptr_t hash = (intptr_t) markOopDesc::no_hash; 1808 // implies UseBiasedLocking 1809 if (mark->has_bias_pattern()) { 1810 uintptr_t thread_ident; 1811 uintptr_t anticipated_bias_locking_value; 1812 thread_ident = (uintptr_t)istate->thread(); 1813 anticipated_bias_locking_value = 1814 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 1815 ~((uintptr_t) markOopDesc::age_mask_in_place); 1816 1817 if (anticipated_bias_locking_value == 0) { 1818 // already biased towards this thread, nothing to do 1819 if (PrintBiasedLockingStatistics) { 1820 (* BiasedLocking::biased_lock_entry_count_addr())++; 1821 } 1822 success = true; 1823 } 1824 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 1825 // try revoke bias 1826 markOop header = lockee->klass()->prototype_header(); 1827 if (hash != markOopDesc::no_hash) { 1828 header = header->copy_set_hash(hash); 1829 } 1830 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { 1831 if (PrintBiasedLockingStatistics) 1832 (*BiasedLocking::revoked_lock_entry_count_addr())++; 1833 } 1834 } 1835 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { 1836 // try rebias 1837 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); 1838 if (hash != markOopDesc::no_hash) { 1839 new_header = new_header->copy_set_hash(hash); 1840 } 1841 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { 1842 if (PrintBiasedLockingStatistics) 1843 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 1844 } 1845 else { 1846 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1847 } 1848 success = true; 1849 } 1850 else { 1851 // try to bias towards thread in case object is anonymously biased 1852 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 1853 (uintptr_t)markOopDesc::age_mask_in_place | 1854 epoch_mask_in_place)); 1855 if (hash != markOopDesc::no_hash) { 1856 header = header->copy_set_hash(hash); 1857 } 1858 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 1859 // debugging hint 1860 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 1861 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { 1862 if (PrintBiasedLockingStatistics) 1863 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 1864 } 1865 else { 1866 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1867 } 1868 success = true; 1869 } 1870 } 1871 1872 // traditional lightweight locking 1873 if (!success) { 1874 markOop displaced = lockee->mark()->set_unlocked(); 1875 entry->lock()->set_displaced_header(displaced); 1876 bool call_vm = UseHeavyMonitors; 1877 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { 1878 // Is it simple recursive case? 1879 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 1880 entry->lock()->set_displaced_header(NULL); 1881 } else { 1882 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1883 } 1884 } 1885 } 1886 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1887 } else { 1888 istate->set_msg(more_monitors); 1889 UPDATE_PC_AND_RETURN(0); // Re-execute 1890 } 1891 } 1892 1893 CASE(_monitorexit): { 1894 oop lockee = STACK_OBJECT(-1); 1895 CHECK_NULL(lockee); 1896 // derefing's lockee ought to provoke implicit null check 1897 // find our monitor slot 1898 BasicObjectLock* limit = istate->monitor_base(); 1899 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1900 while (most_recent != limit ) { 1901 if ((most_recent)->obj() == lockee) { 1902 BasicLock* lock = most_recent->lock(); 1903 markOop header = lock->displaced_header(); 1904 most_recent->set_obj(NULL); 1905 if (!lockee->mark()->has_bias_pattern()) { 1906 bool call_vm = UseHeavyMonitors; 1907 // If it isn't recursive we either must swap old header or call the runtime 1908 if (header != NULL || call_vm) { 1909 if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { 1910 // restore object for the slow case 1911 most_recent->set_obj(lockee); 1912 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception); 1913 } 1914 } 1915 } 1916 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1917 } 1918 most_recent++; 1919 } 1920 // Need to throw illegal monitor state exception 1921 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception); 1922 ShouldNotReachHere(); 1923 } 1924 1925 /* All of the non-quick opcodes. */ 1926 1927 /* -Set clobbersCpIndex true if the quickened opcode clobbers the 1928 * constant pool index in the instruction. 1929 */ 1930 CASE(_getfield): 1931 CASE(_getstatic): 1932 { 1933 u2 index; 1934 ConstantPoolCacheEntry* cache; 1935 index = Bytes::get_native_u2(pc+1); 1936 1937 // QQQ Need to make this as inlined as possible. Probably need to 1938 // split all the bytecode cases out so c++ compiler has a chance 1939 // for constant prop to fold everything possible away. 1940 1941 cache = cp->entry_at(index); 1942 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 1943 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode), 1944 handle_exception); 1945 cache = cp->entry_at(index); 1946 } 1947 1948 #ifdef VM_JVMTI 1949 if (_jvmti_interp_events) { 1950 int *count_addr; 1951 oop obj; 1952 // Check to see if a field modification watch has been set 1953 // before we take the time to call into the VM. 1954 count_addr = (int *)JvmtiExport::get_field_access_count_addr(); 1955 if ( *count_addr > 0 ) { 1956 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { 1957 obj = (oop)NULL; 1958 } else { 1959 obj = (oop) STACK_OBJECT(-1); 1960 VERIFY_OOP(obj); 1961 } 1962 CALL_VM(InterpreterRuntime::post_field_access(THREAD, 1963 obj, 1964 cache), 1965 handle_exception); 1966 } 1967 } 1968 #endif /* VM_JVMTI */ 1969 1970 oop obj; 1971 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { 1972 Klass* k = cache->f1_as_klass(); 1973 obj = k->java_mirror(); 1974 MORE_STACK(1); // Assume single slot push 1975 } else { 1976 obj = (oop) STACK_OBJECT(-1); 1977 CHECK_NULL(obj); 1978 } 1979 1980 // 1981 // Now store the result on the stack 1982 // 1983 TosState tos_type = cache->flag_state(); 1984 int field_offset = cache->f2_as_index(); 1985 if (cache->is_volatile()) { 1986 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 1987 OrderAccess::fence(); 1988 } 1989 if (tos_type == atos) { 1990 VERIFY_OOP(obj->obj_field_acquire(field_offset)); 1991 SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1); 1992 } else if (tos_type == itos) { 1993 SET_STACK_INT(obj->int_field_acquire(field_offset), -1); 1994 } else if (tos_type == ltos) { 1995 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0); 1996 MORE_STACK(1); 1997 } else if (tos_type == btos) { 1998 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1); 1999 } else if (tos_type == ctos) { 2000 SET_STACK_INT(obj->char_field_acquire(field_offset), -1); 2001 } else if (tos_type == stos) { 2002 SET_STACK_INT(obj->short_field_acquire(field_offset), -1); 2003 } else if (tos_type == ftos) { 2004 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1); 2005 } else { 2006 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0); 2007 MORE_STACK(1); 2008 } 2009 } else { 2010 if (tos_type == atos) { 2011 VERIFY_OOP(obj->obj_field(field_offset)); 2012 SET_STACK_OBJECT(obj->obj_field(field_offset), -1); 2013 } else if (tos_type == itos) { 2014 SET_STACK_INT(obj->int_field(field_offset), -1); 2015 } else if (tos_type == ltos) { 2016 SET_STACK_LONG(obj->long_field(field_offset), 0); 2017 MORE_STACK(1); 2018 } else if (tos_type == btos) { 2019 SET_STACK_INT(obj->byte_field(field_offset), -1); 2020 } else if (tos_type == ctos) { 2021 SET_STACK_INT(obj->char_field(field_offset), -1); 2022 } else if (tos_type == stos) { 2023 SET_STACK_INT(obj->short_field(field_offset), -1); 2024 } else if (tos_type == ftos) { 2025 SET_STACK_FLOAT(obj->float_field(field_offset), -1); 2026 } else { 2027 SET_STACK_DOUBLE(obj->double_field(field_offset), 0); 2028 MORE_STACK(1); 2029 } 2030 } 2031 2032 UPDATE_PC_AND_CONTINUE(3); 2033 } 2034 2035 CASE(_putfield): 2036 CASE(_putstatic): 2037 { 2038 u2 index = Bytes::get_native_u2(pc+1); 2039 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2040 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2041 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode), 2042 handle_exception); 2043 cache = cp->entry_at(index); 2044 } 2045 2046 #ifdef VM_JVMTI 2047 if (_jvmti_interp_events) { 2048 int *count_addr; 2049 oop obj; 2050 // Check to see if a field modification watch has been set 2051 // before we take the time to call into the VM. 2052 count_addr = (int *)JvmtiExport::get_field_modification_count_addr(); 2053 if ( *count_addr > 0 ) { 2054 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 2055 obj = (oop)NULL; 2056 } 2057 else { 2058 if (cache->is_long() || cache->is_double()) { 2059 obj = (oop) STACK_OBJECT(-3); 2060 } else { 2061 obj = (oop) STACK_OBJECT(-2); 2062 } 2063 VERIFY_OOP(obj); 2064 } 2065 2066 CALL_VM(InterpreterRuntime::post_field_modification(THREAD, 2067 obj, 2068 cache, 2069 (jvalue *)STACK_SLOT(-1)), 2070 handle_exception); 2071 } 2072 } 2073 #endif /* VM_JVMTI */ 2074 2075 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2076 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2077 2078 oop obj; 2079 int count; 2080 TosState tos_type = cache->flag_state(); 2081 2082 count = -1; 2083 if (tos_type == ltos || tos_type == dtos) { 2084 --count; 2085 } 2086 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 2087 Klass* k = cache->f1_as_klass(); 2088 obj = k->java_mirror(); 2089 } else { 2090 --count; 2091 obj = (oop) STACK_OBJECT(count); 2092 CHECK_NULL(obj); 2093 } 2094 2095 // 2096 // Now store the result 2097 // 2098 int field_offset = cache->f2_as_index(); 2099 if (cache->is_volatile()) { 2100 if (tos_type == itos) { 2101 obj->release_int_field_put(field_offset, STACK_INT(-1)); 2102 } else if (tos_type == atos) { 2103 VERIFY_OOP(STACK_OBJECT(-1)); 2104 obj->release_obj_field_put(field_offset, STACK_OBJECT(-1)); 2105 } else if (tos_type == btos) { 2106 obj->release_byte_field_put(field_offset, STACK_INT(-1)); 2107 } else if (tos_type == ltos) { 2108 obj->release_long_field_put(field_offset, STACK_LONG(-1)); 2109 } else if (tos_type == ctos) { 2110 obj->release_char_field_put(field_offset, STACK_INT(-1)); 2111 } else if (tos_type == stos) { 2112 obj->release_short_field_put(field_offset, STACK_INT(-1)); 2113 } else if (tos_type == ftos) { 2114 obj->release_float_field_put(field_offset, STACK_FLOAT(-1)); 2115 } else { 2116 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1)); 2117 } 2118 OrderAccess::storeload(); 2119 } else { 2120 if (tos_type == itos) { 2121 obj->int_field_put(field_offset, STACK_INT(-1)); 2122 } else if (tos_type == atos) { 2123 VERIFY_OOP(STACK_OBJECT(-1)); 2124 obj->obj_field_put(field_offset, STACK_OBJECT(-1)); 2125 } else if (tos_type == btos) { 2126 obj->byte_field_put(field_offset, STACK_INT(-1)); 2127 } else if (tos_type == ltos) { 2128 obj->long_field_put(field_offset, STACK_LONG(-1)); 2129 } else if (tos_type == ctos) { 2130 obj->char_field_put(field_offset, STACK_INT(-1)); 2131 } else if (tos_type == stos) { 2132 obj->short_field_put(field_offset, STACK_INT(-1)); 2133 } else if (tos_type == ftos) { 2134 obj->float_field_put(field_offset, STACK_FLOAT(-1)); 2135 } else { 2136 obj->double_field_put(field_offset, STACK_DOUBLE(-1)); 2137 } 2138 } 2139 2140 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count); 2141 } 2142 2143 CASE(_new): { 2144 u2 index = Bytes::get_Java_u2(pc+1); 2145 ConstantPool* constants = istate->method()->constants(); 2146 if (!constants->tag_at(index).is_unresolved_klass()) { 2147 // Make sure klass is initialized and doesn't have a finalizer 2148 Klass* entry = constants->slot_at(index).get_klass(); 2149 assert(entry->is_klass(), "Should be resolved klass"); 2150 Klass* k_entry = (Klass*) entry; 2151 assert(k_entry->oop_is_instance(), "Should be InstanceKlass"); 2152 InstanceKlass* ik = (InstanceKlass*) k_entry; 2153 if ( ik->is_initialized() && ik->can_be_fastpath_allocated() ) { 2154 size_t obj_size = ik->size_helper(); 2155 oop result = NULL; 2156 // If the TLAB isn't pre-zeroed then we'll have to do it 2157 bool need_zero = !ZeroTLAB; 2158 if (UseTLAB) { 2159 result = (oop) THREAD->tlab().allocate(obj_size); 2160 } 2161 // Disable non-TLAB-based fast-path, because profiling requires that all 2162 // allocations go through InterpreterRuntime::_new() if THREAD->tlab().allocate 2163 // returns NULL. 2164 #ifndef CC_INTERP_PROFILE 2165 if (result == NULL) { 2166 need_zero = true; 2167 // Try allocate in shared eden 2168 retry: 2169 HeapWord* compare_to = *Universe::heap()->top_addr(); 2170 HeapWord* new_top = compare_to + obj_size; 2171 if (new_top <= *Universe::heap()->end_addr()) { 2172 if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) { 2173 goto retry; 2174 } 2175 result = (oop) compare_to; 2176 } 2177 } 2178 #endif 2179 if (result != NULL) { 2180 // Initialize object (if nonzero size and need) and then the header 2181 if (need_zero ) { 2182 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize; 2183 obj_size -= sizeof(oopDesc) / oopSize; 2184 if (obj_size > 0 ) { 2185 memset(to_zero, 0, obj_size * HeapWordSize); 2186 } 2187 } 2188 if (UseBiasedLocking) { 2189 result->set_mark(ik->prototype_header()); 2190 } else { 2191 result->set_mark(markOopDesc::prototype()); 2192 } 2193 result->set_klass_gap(0); 2194 result->set_klass(k_entry); 2195 // Must prevent reordering of stores for object initialization 2196 // with stores that publish the new object. 2197 OrderAccess::storestore(); 2198 SET_STACK_OBJECT(result, 0); 2199 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2200 } 2201 } 2202 } 2203 // Slow case allocation 2204 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index), 2205 handle_exception); 2206 // Must prevent reordering of stores for object initialization 2207 // with stores that publish the new object. 2208 OrderAccess::storestore(); 2209 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2210 THREAD->set_vm_result(NULL); 2211 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2212 } 2213 CASE(_anewarray): { 2214 u2 index = Bytes::get_Java_u2(pc+1); 2215 jint size = STACK_INT(-1); 2216 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size), 2217 handle_exception); 2218 // Must prevent reordering of stores for object initialization 2219 // with stores that publish the new object. 2220 OrderAccess::storestore(); 2221 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2222 THREAD->set_vm_result(NULL); 2223 UPDATE_PC_AND_CONTINUE(3); 2224 } 2225 CASE(_multianewarray): { 2226 jint dims = *(pc+3); 2227 jint size = STACK_INT(-1); 2228 // stack grows down, dimensions are up! 2229 jint *dimarray = 2230 (jint*)&topOfStack[dims * Interpreter::stackElementWords+ 2231 Interpreter::stackElementWords-1]; 2232 //adjust pointer to start of stack element 2233 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray), 2234 handle_exception); 2235 // Must prevent reordering of stores for object initialization 2236 // with stores that publish the new object. 2237 OrderAccess::storestore(); 2238 SET_STACK_OBJECT(THREAD->vm_result(), -dims); 2239 THREAD->set_vm_result(NULL); 2240 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1)); 2241 } 2242 CASE(_checkcast): 2243 if (STACK_OBJECT(-1) != NULL) { 2244 VERIFY_OOP(STACK_OBJECT(-1)); 2245 u2 index = Bytes::get_Java_u2(pc+1); 2246 // Constant pool may have actual klass or unresolved klass. If it is 2247 // unresolved we must resolve it. 2248 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2249 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2250 } 2251 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); 2252 Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx 2253 // 2254 // Check for compatibilty. This check must not GC!! 2255 // Seems way more expensive now that we must dispatch. 2256 // 2257 if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) { 2258 // Decrement counter at checkcast. 2259 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass); 2260 ResourceMark rm(THREAD); 2261 const char* objName = objKlass->external_name(); 2262 const char* klassName = klassOf->external_name(); 2263 char* message = SharedRuntime::generate_class_cast_message( 2264 objName, klassName); 2265 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message, note_classCheck_trap); 2266 } 2267 // Profile checkcast with null_seen and receiver. 2268 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, objKlass); 2269 } else { 2270 // Profile checkcast with null_seen and receiver. 2271 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL); 2272 } 2273 UPDATE_PC_AND_CONTINUE(3); 2274 2275 CASE(_instanceof): 2276 if (STACK_OBJECT(-1) == NULL) { 2277 SET_STACK_INT(0, -1); 2278 // Profile instanceof with null_seen and receiver. 2279 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/true, NULL); 2280 } else { 2281 VERIFY_OOP(STACK_OBJECT(-1)); 2282 u2 index = Bytes::get_Java_u2(pc+1); 2283 // Constant pool may have actual klass or unresolved klass. If it is 2284 // unresolved we must resolve it. 2285 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2286 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2287 } 2288 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); 2289 Klass* objKlass = STACK_OBJECT(-1)->klass(); 2290 // 2291 // Check for compatibilty. This check must not GC!! 2292 // Seems way more expensive now that we must dispatch. 2293 // 2294 if ( objKlass == klassOf || objKlass->is_subtype_of(klassOf)) { 2295 SET_STACK_INT(1, -1); 2296 } else { 2297 SET_STACK_INT(0, -1); 2298 // Decrement counter at checkcast. 2299 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass); 2300 } 2301 // Profile instanceof with null_seen and receiver. 2302 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/false, objKlass); 2303 } 2304 UPDATE_PC_AND_CONTINUE(3); 2305 2306 CASE(_ldc_w): 2307 CASE(_ldc): 2308 { 2309 u2 index; 2310 bool wide = false; 2311 int incr = 2; // frequent case 2312 if (opcode == Bytecodes::_ldc) { 2313 index = pc[1]; 2314 } else { 2315 index = Bytes::get_Java_u2(pc+1); 2316 incr = 3; 2317 wide = true; 2318 } 2319 2320 ConstantPool* constants = METHOD->constants(); 2321 switch (constants->tag_at(index).value()) { 2322 case JVM_CONSTANT_Integer: 2323 SET_STACK_INT(constants->int_at(index), 0); 2324 break; 2325 2326 case JVM_CONSTANT_Float: 2327 SET_STACK_FLOAT(constants->float_at(index), 0); 2328 break; 2329 2330 case JVM_CONSTANT_String: 2331 { 2332 oop result = constants->resolved_references()->obj_at(index); 2333 if (result == NULL) { 2334 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception); 2335 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2336 THREAD->set_vm_result(NULL); 2337 } else { 2338 VERIFY_OOP(result); 2339 SET_STACK_OBJECT(result, 0); 2340 } 2341 break; 2342 } 2343 2344 case JVM_CONSTANT_Class: 2345 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror()); 2346 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0); 2347 break; 2348 2349 case JVM_CONSTANT_UnresolvedClass: 2350 case JVM_CONSTANT_UnresolvedClassInError: 2351 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception); 2352 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2353 THREAD->set_vm_result(NULL); 2354 break; 2355 2356 default: ShouldNotReachHere(); 2357 } 2358 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2359 } 2360 2361 CASE(_ldc2_w): 2362 { 2363 u2 index = Bytes::get_Java_u2(pc+1); 2364 2365 ConstantPool* constants = METHOD->constants(); 2366 switch (constants->tag_at(index).value()) { 2367 2368 case JVM_CONSTANT_Long: 2369 SET_STACK_LONG(constants->long_at(index), 1); 2370 break; 2371 2372 case JVM_CONSTANT_Double: 2373 SET_STACK_DOUBLE(constants->double_at(index), 1); 2374 break; 2375 default: ShouldNotReachHere(); 2376 } 2377 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2); 2378 } 2379 2380 CASE(_fast_aldc_w): 2381 CASE(_fast_aldc): { 2382 u2 index; 2383 int incr; 2384 if (opcode == Bytecodes::_fast_aldc) { 2385 index = pc[1]; 2386 incr = 2; 2387 } else { 2388 index = Bytes::get_native_u2(pc+1); 2389 incr = 3; 2390 } 2391 2392 // We are resolved if the f1 field contains a non-null object (CallSite, etc.) 2393 // This kind of CP cache entry does not need to match the flags byte, because 2394 // there is a 1-1 relation between bytecode type and CP entry type. 2395 ConstantPool* constants = METHOD->constants(); 2396 oop result = constants->resolved_references()->obj_at(index); 2397 if (result == NULL) { 2398 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), 2399 handle_exception); 2400 result = THREAD->vm_result(); 2401 } 2402 2403 VERIFY_OOP(result); 2404 SET_STACK_OBJECT(result, 0); 2405 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2406 } 2407 2408 CASE(_invokedynamic): { 2409 2410 u4 index = Bytes::get_native_u4(pc+1); 2411 ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2412 2413 // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.) 2414 // This kind of CP cache entry does not need to match the flags byte, because 2415 // there is a 1-1 relation between bytecode type and CP entry type. 2416 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2417 CALL_VM(InterpreterRuntime::resolve_invokedynamic(THREAD), 2418 handle_exception); 2419 cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2420 } 2421 2422 Method* method = cache->f1_as_method(); 2423 if (VerifyOops) method->verify(); 2424 2425 if (cache->has_appendix()) { 2426 ConstantPool* constants = METHOD->constants(); 2427 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2428 MORE_STACK(1); 2429 } 2430 2431 istate->set_msg(call_method); 2432 istate->set_callee(method); 2433 istate->set_callee_entry_point(method->from_interpreted_entry()); 2434 istate->set_bcp_advance(5); 2435 2436 // Invokedynamic has got a call counter, just like an invokestatic -> increment! 2437 BI_PROFILE_UPDATE_CALL(); 2438 2439 UPDATE_PC_AND_RETURN(0); // I'll be back... 2440 } 2441 2442 CASE(_invokehandle): { 2443 2444 u2 index = Bytes::get_native_u2(pc+1); 2445 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2446 2447 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2448 CALL_VM(InterpreterRuntime::resolve_invokehandle(THREAD), 2449 handle_exception); 2450 cache = cp->entry_at(index); 2451 } 2452 2453 Method* method = cache->f1_as_method(); 2454 if (VerifyOops) method->verify(); 2455 2456 if (cache->has_appendix()) { 2457 ConstantPool* constants = METHOD->constants(); 2458 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2459 MORE_STACK(1); 2460 } 2461 2462 istate->set_msg(call_method); 2463 istate->set_callee(method); 2464 istate->set_callee_entry_point(method->from_interpreted_entry()); 2465 istate->set_bcp_advance(3); 2466 2467 // Invokehandle has got a call counter, just like a final call -> increment! 2468 BI_PROFILE_UPDATE_FINALCALL(); 2469 2470 UPDATE_PC_AND_RETURN(0); // I'll be back... 2471 } 2472 2473 CASE(_invokeinterface): { 2474 u2 index = Bytes::get_native_u2(pc+1); 2475 2476 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2477 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2478 2479 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2480 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2481 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode), 2482 handle_exception); 2483 cache = cp->entry_at(index); 2484 } 2485 2486 istate->set_msg(call_method); 2487 2488 // Special case of invokeinterface called for virtual method of 2489 // java.lang.Object. See cpCacheOop.cpp for details. 2490 // This code isn't produced by javac, but could be produced by 2491 // another compliant java compiler. 2492 if (cache->is_forced_virtual()) { 2493 Method* callee; 2494 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2495 if (cache->is_vfinal()) { 2496 callee = cache->f2_as_vfinal_method(); 2497 // Profile 'special case of invokeinterface' final call. 2498 BI_PROFILE_UPDATE_FINALCALL(); 2499 } else { 2500 // Get receiver. 2501 int parms = cache->parameter_size(); 2502 // Same comments as invokevirtual apply here. 2503 oop rcvr = STACK_OBJECT(-parms); 2504 VERIFY_OOP(rcvr); 2505 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass(); 2506 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; 2507 // Profile 'special case of invokeinterface' virtual call. 2508 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass()); 2509 } 2510 istate->set_callee(callee); 2511 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2512 #ifdef VM_JVMTI 2513 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2514 istate->set_callee_entry_point(callee->interpreter_entry()); 2515 } 2516 #endif /* VM_JVMTI */ 2517 istate->set_bcp_advance(5); 2518 UPDATE_PC_AND_RETURN(0); // I'll be back... 2519 } 2520 2521 // this could definitely be cleaned up QQQ 2522 Method* callee; 2523 Klass* iclass = cache->f1_as_klass(); 2524 // InstanceKlass* interface = (InstanceKlass*) iclass; 2525 // get receiver 2526 int parms = cache->parameter_size(); 2527 oop rcvr = STACK_OBJECT(-parms); 2528 CHECK_NULL(rcvr); 2529 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass(); 2530 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable(); 2531 int i; 2532 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) { 2533 if (ki->interface_klass() == iclass) break; 2534 } 2535 // If the interface isn't found, this class doesn't implement this 2536 // interface. The link resolver checks this but only for the first 2537 // time this interface is called. 2538 if (i == int2->itable_length()) { 2539 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "", note_no_trap); 2540 } 2541 int mindex = cache->f2_as_index(); 2542 itableMethodEntry* im = ki->first_method_entry(rcvr->klass()); 2543 callee = im[mindex].method(); 2544 if (callee == NULL) { 2545 VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "", note_no_trap); 2546 } 2547 2548 // Profile virtual call. 2549 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass()); 2550 2551 istate->set_callee(callee); 2552 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2553 #ifdef VM_JVMTI 2554 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2555 istate->set_callee_entry_point(callee->interpreter_entry()); 2556 } 2557 #endif /* VM_JVMTI */ 2558 istate->set_bcp_advance(5); 2559 UPDATE_PC_AND_RETURN(0); // I'll be back... 2560 } 2561 2562 CASE(_invokevirtual): 2563 CASE(_invokespecial): 2564 CASE(_invokestatic): { 2565 u2 index = Bytes::get_native_u2(pc+1); 2566 2567 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2568 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2569 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2570 2571 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2572 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode), 2573 handle_exception); 2574 cache = cp->entry_at(index); 2575 } 2576 2577 istate->set_msg(call_method); 2578 { 2579 Method* callee; 2580 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) { 2581 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2582 if (cache->is_vfinal()) { 2583 callee = cache->f2_as_vfinal_method(); 2584 // Profile final call. 2585 BI_PROFILE_UPDATE_FINALCALL(); 2586 } else { 2587 // get receiver 2588 int parms = cache->parameter_size(); 2589 // this works but needs a resourcemark and seems to create a vtable on every call: 2590 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index()); 2591 // 2592 // this fails with an assert 2593 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass()); 2594 // but this works 2595 oop rcvr = STACK_OBJECT(-parms); 2596 VERIFY_OOP(rcvr); 2597 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass(); 2598 /* 2599 Executing this code in java.lang.String: 2600 public String(char value[]) { 2601 this.count = value.length; 2602 this.value = (char[])value.clone(); 2603 } 2604 2605 a find on rcvr->klass() reports: 2606 {type array char}{type array class} 2607 - klass: {other class} 2608 2609 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure 2610 because rcvr->klass()->oop_is_instance() == 0 2611 However it seems to have a vtable in the right location. Huh? 2612 2613 */ 2614 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; 2615 // Profile virtual call. 2616 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass()); 2617 } 2618 } else { 2619 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) { 2620 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2621 } 2622 callee = cache->f1_as_method(); 2623 2624 // Profile call. 2625 BI_PROFILE_UPDATE_CALL(); 2626 } 2627 2628 istate->set_callee(callee); 2629 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2630 #ifdef VM_JVMTI 2631 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2632 istate->set_callee_entry_point(callee->interpreter_entry()); 2633 } 2634 #endif /* VM_JVMTI */ 2635 istate->set_bcp_advance(3); 2636 UPDATE_PC_AND_RETURN(0); // I'll be back... 2637 } 2638 } 2639 2640 /* Allocate memory for a new java object. */ 2641 2642 CASE(_newarray): { 2643 BasicType atype = (BasicType) *(pc+1); 2644 jint size = STACK_INT(-1); 2645 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size), 2646 handle_exception); 2647 // Must prevent reordering of stores for object initialization 2648 // with stores that publish the new object. 2649 OrderAccess::storestore(); 2650 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2651 THREAD->set_vm_result(NULL); 2652 2653 UPDATE_PC_AND_CONTINUE(2); 2654 } 2655 2656 /* Throw an exception. */ 2657 2658 CASE(_athrow): { 2659 oop except_oop = STACK_OBJECT(-1); 2660 CHECK_NULL(except_oop); 2661 // set pending_exception so we use common code 2662 THREAD->set_pending_exception(except_oop, NULL, 0); 2663 goto handle_exception; 2664 } 2665 2666 /* goto and jsr. They are exactly the same except jsr pushes 2667 * the address of the next instruction first. 2668 */ 2669 2670 CASE(_jsr): { 2671 /* push bytecode index on stack */ 2672 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0); 2673 MORE_STACK(1); 2674 /* FALL THROUGH */ 2675 } 2676 2677 CASE(_goto): 2678 { 2679 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1); 2680 // Profile jump. 2681 BI_PROFILE_UPDATE_JUMP(); 2682 address branch_pc = pc; 2683 UPDATE_PC(offset); 2684 DO_BACKEDGE_CHECKS(offset, branch_pc); 2685 CONTINUE; 2686 } 2687 2688 CASE(_jsr_w): { 2689 /* push return address on the stack */ 2690 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0); 2691 MORE_STACK(1); 2692 /* FALL THROUGH */ 2693 } 2694 2695 CASE(_goto_w): 2696 { 2697 int32_t offset = Bytes::get_Java_u4(pc + 1); 2698 // Profile jump. 2699 BI_PROFILE_UPDATE_JUMP(); 2700 address branch_pc = pc; 2701 UPDATE_PC(offset); 2702 DO_BACKEDGE_CHECKS(offset, branch_pc); 2703 CONTINUE; 2704 } 2705 2706 /* return from a jsr or jsr_w */ 2707 2708 CASE(_ret): { 2709 // Profile ret. 2710 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(pc[1])))); 2711 // Now, update the pc. 2712 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1])); 2713 UPDATE_PC_AND_CONTINUE(0); 2714 } 2715 2716 /* debugger breakpoint */ 2717 2718 CASE(_breakpoint): { 2719 Bytecodes::Code original_bytecode; 2720 DECACHE_STATE(); 2721 SET_LAST_JAVA_FRAME(); 2722 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD, 2723 METHOD, pc); 2724 RESET_LAST_JAVA_FRAME(); 2725 CACHE_STATE(); 2726 if (THREAD->has_pending_exception()) goto handle_exception; 2727 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc), 2728 handle_exception); 2729 2730 opcode = (jubyte)original_bytecode; 2731 goto opcode_switch; 2732 } 2733 2734 DEFAULT: 2735 fatal(err_msg("Unimplemented opcode %d = %s", opcode, 2736 Bytecodes::name((Bytecodes::Code)opcode))); 2737 goto finish; 2738 2739 } /* switch(opc) */ 2740 2741 2742 #ifdef USELABELS 2743 check_for_exception: 2744 #endif 2745 { 2746 if (!THREAD->has_pending_exception()) { 2747 CONTINUE; 2748 } 2749 /* We will be gcsafe soon, so flush our state. */ 2750 DECACHE_PC(); 2751 goto handle_exception; 2752 } 2753 do_continue: ; 2754 2755 } /* while (1) interpreter loop */ 2756 2757 2758 // An exception exists in the thread state see whether this activation can handle it 2759 handle_exception: { 2760 2761 HandleMarkCleaner __hmc(THREAD); 2762 Handle except_oop(THREAD, THREAD->pending_exception()); 2763 // Prevent any subsequent HandleMarkCleaner in the VM 2764 // from freeing the except_oop handle. 2765 HandleMark __hm(THREAD); 2766 2767 THREAD->clear_pending_exception(); 2768 assert(except_oop(), "No exception to process"); 2769 intptr_t continuation_bci; 2770 // expression stack is emptied 2771 topOfStack = istate->stack_base() - Interpreter::stackElementWords; 2772 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()), 2773 handle_exception); 2774 2775 except_oop = THREAD->vm_result(); 2776 THREAD->set_vm_result(NULL); 2777 if (continuation_bci >= 0) { 2778 // Place exception on top of stack 2779 SET_STACK_OBJECT(except_oop(), 0); 2780 MORE_STACK(1); 2781 pc = METHOD->code_base() + continuation_bci; 2782 if (TraceExceptions) { 2783 ttyLocker ttyl; 2784 ResourceMark rm; 2785 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), (void*)except_oop()); 2786 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); 2787 tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT, 2788 istate->bcp() - (intptr_t)METHOD->code_base(), 2789 continuation_bci, THREAD); 2790 } 2791 // for AbortVMOnException flag 2792 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); 2793 2794 // Update profiling data. 2795 BI_PROFILE_ALIGN_TO_CURRENT_BCI(); 2796 goto run; 2797 } 2798 if (TraceExceptions) { 2799 ttyLocker ttyl; 2800 ResourceMark rm; 2801 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), (void*)except_oop()); 2802 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); 2803 tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT, 2804 istate->bcp() - (intptr_t)METHOD->code_base(), 2805 THREAD); 2806 } 2807 // for AbortVMOnException flag 2808 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); 2809 // No handler in this activation, unwind and try again 2810 THREAD->set_pending_exception(except_oop(), NULL, 0); 2811 goto handle_return; 2812 } // handle_exception: 2813 2814 // Return from an interpreter invocation with the result of the interpretation 2815 // on the top of the Java Stack (or a pending exception) 2816 2817 handle_Pop_Frame: { 2818 2819 // We don't really do anything special here except we must be aware 2820 // that we can get here without ever locking the method (if sync). 2821 // Also we skip the notification of the exit. 2822 2823 istate->set_msg(popping_frame); 2824 // Clear pending so while the pop is in process 2825 // we don't start another one if a call_vm is done. 2826 THREAD->clr_pop_frame_pending(); 2827 // Let interpreter (only) see the we're in the process of popping a frame 2828 THREAD->set_pop_frame_in_process(); 2829 2830 goto handle_return; 2831 2832 } // handle_Pop_Frame 2833 2834 // ForceEarlyReturn ends a method, and returns to the caller with a return value 2835 // given by the invoker of the early return. 2836 handle_Early_Return: { 2837 2838 istate->set_msg(early_return); 2839 2840 // Clear expression stack. 2841 topOfStack = istate->stack_base() - Interpreter::stackElementWords; 2842 2843 JvmtiThreadState *ts = THREAD->jvmti_thread_state(); 2844 2845 // Push the value to be returned. 2846 switch (istate->method()->result_type()) { 2847 case T_BOOLEAN: 2848 case T_SHORT: 2849 case T_BYTE: 2850 case T_CHAR: 2851 case T_INT: 2852 SET_STACK_INT(ts->earlyret_value().i, 0); 2853 MORE_STACK(1); 2854 break; 2855 case T_LONG: 2856 SET_STACK_LONG(ts->earlyret_value().j, 1); 2857 MORE_STACK(2); 2858 break; 2859 case T_FLOAT: 2860 SET_STACK_FLOAT(ts->earlyret_value().f, 0); 2861 MORE_STACK(1); 2862 break; 2863 case T_DOUBLE: 2864 SET_STACK_DOUBLE(ts->earlyret_value().d, 1); 2865 MORE_STACK(2); 2866 break; 2867 case T_ARRAY: 2868 case T_OBJECT: 2869 SET_STACK_OBJECT(ts->earlyret_oop(), 0); 2870 MORE_STACK(1); 2871 break; 2872 } 2873 2874 ts->clr_earlyret_value(); 2875 ts->set_earlyret_oop(NULL); 2876 ts->clr_earlyret_pending(); 2877 2878 // Fall through to handle_return. 2879 2880 } // handle_Early_Return 2881 2882 handle_return: { 2883 // A storestore barrier is required to order initialization of 2884 // final fields with publishing the reference to the object that 2885 // holds the field. Without the barrier the value of final fields 2886 // can be observed to change. 2887 OrderAccess::storestore(); 2888 2889 DECACHE_STATE(); 2890 2891 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return; 2892 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame; 2893 Handle original_exception(THREAD, THREAD->pending_exception()); 2894 Handle illegal_state_oop(THREAD, NULL); 2895 2896 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner 2897 // in any following VM entries from freeing our live handles, but illegal_state_oop 2898 // isn't really allocated yet and so doesn't become live until later and 2899 // in unpredicatable places. Instead we must protect the places where we enter the 2900 // VM. It would be much simpler (and safer) if we could allocate a real handle with 2901 // a NULL oop in it and then overwrite the oop later as needed. This isn't 2902 // unfortunately isn't possible. 2903 2904 THREAD->clear_pending_exception(); 2905 2906 // 2907 // As far as we are concerned we have returned. If we have a pending exception 2908 // that will be returned as this invocation's result. However if we get any 2909 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions 2910 // will be our final result (i.e. monitor exception trumps a pending exception). 2911 // 2912 2913 // If we never locked the method (or really passed the point where we would have), 2914 // there is no need to unlock it (or look for other monitors), since that 2915 // could not have happened. 2916 2917 if (THREAD->do_not_unlock()) { 2918 2919 // Never locked, reset the flag now because obviously any caller must 2920 // have passed their point of locking for us to have gotten here. 2921 2922 THREAD->clr_do_not_unlock(); 2923 } else { 2924 // At this point we consider that we have returned. We now check that the 2925 // locks were properly block structured. If we find that they were not 2926 // used properly we will return with an illegal monitor exception. 2927 // The exception is checked by the caller not the callee since this 2928 // checking is considered to be part of the invocation and therefore 2929 // in the callers scope (JVM spec 8.13). 2930 // 2931 // Another weird thing to watch for is if the method was locked 2932 // recursively and then not exited properly. This means we must 2933 // examine all the entries in reverse time(and stack) order and 2934 // unlock as we find them. If we find the method monitor before 2935 // we are at the initial entry then we should throw an exception. 2936 // It is not clear the template based interpreter does this 2937 // correctly 2938 2939 BasicObjectLock* base = istate->monitor_base(); 2940 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base(); 2941 bool method_unlock_needed = METHOD->is_synchronized(); 2942 // We know the initial monitor was used for the method don't check that 2943 // slot in the loop 2944 if (method_unlock_needed) base--; 2945 2946 // Check all the monitors to see they are unlocked. Install exception if found to be locked. 2947 while (end < base) { 2948 oop lockee = end->obj(); 2949 if (lockee != NULL) { 2950 BasicLock* lock = end->lock(); 2951 markOop header = lock->displaced_header(); 2952 end->set_obj(NULL); 2953 2954 if (!lockee->mark()->has_bias_pattern()) { 2955 // If it isn't recursive we either must swap old header or call the runtime 2956 if (header != NULL) { 2957 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { 2958 // restore object for the slow case 2959 end->set_obj(lockee); 2960 { 2961 // Prevent any HandleMarkCleaner from freeing our live handles 2962 HandleMark __hm(THREAD); 2963 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end)); 2964 } 2965 } 2966 } 2967 } 2968 // One error is plenty 2969 if (illegal_state_oop() == NULL && !suppress_error) { 2970 { 2971 // Prevent any HandleMarkCleaner from freeing our live handles 2972 HandleMark __hm(THREAD); 2973 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 2974 } 2975 assert(THREAD->has_pending_exception(), "Lost our exception!"); 2976 illegal_state_oop = THREAD->pending_exception(); 2977 THREAD->clear_pending_exception(); 2978 } 2979 } 2980 end++; 2981 } 2982 // Unlock the method if needed 2983 if (method_unlock_needed) { 2984 if (base->obj() == NULL) { 2985 // The method is already unlocked this is not good. 2986 if (illegal_state_oop() == NULL && !suppress_error) { 2987 { 2988 // Prevent any HandleMarkCleaner from freeing our live handles 2989 HandleMark __hm(THREAD); 2990 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 2991 } 2992 assert(THREAD->has_pending_exception(), "Lost our exception!"); 2993 illegal_state_oop = THREAD->pending_exception(); 2994 THREAD->clear_pending_exception(); 2995 } 2996 } else { 2997 // 2998 // The initial monitor is always used for the method 2999 // However if that slot is no longer the oop for the method it was unlocked 3000 // and reused by something that wasn't unlocked! 3001 // 3002 // deopt can come in with rcvr dead because c2 knows 3003 // its value is preserved in the monitor. So we can't use locals[0] at all 3004 // and must use first monitor slot. 3005 // 3006 oop rcvr = base->obj(); 3007 if (rcvr == NULL) { 3008 if (!suppress_error) { 3009 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap); 3010 illegal_state_oop = THREAD->pending_exception(); 3011 THREAD->clear_pending_exception(); 3012 } 3013 } else if (UseHeavyMonitors) { 3014 { 3015 // Prevent any HandleMarkCleaner from freeing our live handles. 3016 HandleMark __hm(THREAD); 3017 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 3018 } 3019 if (THREAD->has_pending_exception()) { 3020 if (!suppress_error) illegal_state_oop = THREAD->pending_exception(); 3021 THREAD->clear_pending_exception(); 3022 } 3023 } else { 3024 BasicLock* lock = base->lock(); 3025 markOop header = lock->displaced_header(); 3026 base->set_obj(NULL); 3027 3028 if (!rcvr->mark()->has_bias_pattern()) { 3029 base->set_obj(NULL); 3030 // If it isn't recursive we either must swap old header or call the runtime 3031 if (header != NULL) { 3032 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) { 3033 // restore object for the slow case 3034 base->set_obj(rcvr); 3035 { 3036 // Prevent any HandleMarkCleaner from freeing our live handles 3037 HandleMark __hm(THREAD); 3038 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 3039 } 3040 if (THREAD->has_pending_exception()) { 3041 if (!suppress_error) illegal_state_oop = THREAD->pending_exception(); 3042 THREAD->clear_pending_exception(); 3043 } 3044 } 3045 } 3046 } 3047 } 3048 } 3049 } 3050 } 3051 // Clear the do_not_unlock flag now. 3052 THREAD->clr_do_not_unlock(); 3053 3054 // 3055 // Notify jvmti/jvmdi 3056 // 3057 // NOTE: we do not notify a method_exit if we have a pending exception, 3058 // including an exception we generate for unlocking checks. In the former 3059 // case, JVMDI has already been notified by our call for the exception handler 3060 // and in both cases as far as JVMDI is concerned we have already returned. 3061 // If we notify it again JVMDI will be all confused about how many frames 3062 // are still on the stack (4340444). 3063 // 3064 // NOTE Further! It turns out the the JVMTI spec in fact expects to see 3065 // method_exit events whenever we leave an activation unless it was done 3066 // for popframe. This is nothing like jvmdi. However we are passing the 3067 // tests at the moment (apparently because they are jvmdi based) so rather 3068 // than change this code and possibly fail tests we will leave it alone 3069 // (with this note) in anticipation of changing the vm and the tests 3070 // simultaneously. 3071 3072 3073 // 3074 suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL; 3075 3076 3077 3078 #ifdef VM_JVMTI 3079 if (_jvmti_interp_events) { 3080 // Whenever JVMTI puts a thread in interp_only_mode, method 3081 // entry/exit events are sent for that thread to track stack depth. 3082 if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) { 3083 { 3084 // Prevent any HandleMarkCleaner from freeing our live handles 3085 HandleMark __hm(THREAD); 3086 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD)); 3087 } 3088 } 3089 } 3090 #endif /* VM_JVMTI */ 3091 3092 // 3093 // See if we are returning any exception 3094 // A pending exception that was pending prior to a possible popping frame 3095 // overrides the popping frame. 3096 // 3097 assert(!suppress_error || (suppress_error && illegal_state_oop() == NULL), "Error was not suppressed"); 3098 if (illegal_state_oop() != NULL || original_exception() != NULL) { 3099 // Inform the frame manager we have no result. 3100 istate->set_msg(throwing_exception); 3101 if (illegal_state_oop() != NULL) 3102 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0); 3103 else 3104 THREAD->set_pending_exception(original_exception(), NULL, 0); 3105 UPDATE_PC_AND_RETURN(0); 3106 } 3107 3108 if (istate->msg() == popping_frame) { 3109 // Make it simpler on the assembly code and set the message for the frame pop. 3110 // returns 3111 if (istate->prev() == NULL) { 3112 // We must be returning to a deoptimized frame (because popframe only happens between 3113 // two interpreted frames). We need to save the current arguments in C heap so that 3114 // the deoptimized frame when it restarts can copy the arguments to its expression 3115 // stack and re-execute the call. We also have to notify deoptimization that this 3116 // has occurred and to pick the preserved args copy them to the deoptimized frame's 3117 // java expression stack. Yuck. 3118 // 3119 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize), 3120 LOCALS_SLOT(METHOD->size_of_parameters() - 1)); 3121 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit); 3122 } 3123 } else { 3124 istate->set_msg(return_from_method); 3125 } 3126 3127 // Normal return 3128 // Advance the pc and return to frame manager 3129 UPDATE_PC_AND_RETURN(1); 3130 } /* handle_return: */ 3131 3132 // This is really a fatal error return 3133 3134 finish: 3135 DECACHE_TOS(); 3136 DECACHE_PC(); 3137 3138 return; 3139 } 3140 3141 /* 3142 * All the code following this point is only produced once and is not present 3143 * in the JVMTI version of the interpreter 3144 */ 3145 3146 #ifndef VM_JVMTI 3147 3148 // This constructor should only be used to contruct the object to signal 3149 // interpreter initialization. All other instances should be created by 3150 // the frame manager. 3151 BytecodeInterpreter::BytecodeInterpreter(messages msg) { 3152 if (msg != initialize) ShouldNotReachHere(); 3153 _msg = msg; 3154 _self_link = this; 3155 _prev_link = NULL; 3156 } 3157 3158 // Inline static functions for Java Stack and Local manipulation 3159 3160 // The implementations are platform dependent. We have to worry about alignment 3161 // issues on some machines which can change on the same platform depending on 3162 // whether it is an LP64 machine also. 3163 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) { 3164 return (address) tos[Interpreter::expr_index_at(-offset)]; 3165 } 3166 3167 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) { 3168 return *((jint*) &tos[Interpreter::expr_index_at(-offset)]); 3169 } 3170 3171 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) { 3172 return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]); 3173 } 3174 3175 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) { 3176 return cast_to_oop(tos [Interpreter::expr_index_at(-offset)]); 3177 } 3178 3179 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) { 3180 return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d; 3181 } 3182 3183 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) { 3184 return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l; 3185 } 3186 3187 // only used for value types 3188 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value, 3189 int offset) { 3190 *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3191 } 3192 3193 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value, 3194 int offset) { 3195 *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3196 } 3197 3198 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value, 3199 int offset) { 3200 *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3201 } 3202 3203 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value, 3204 int offset) { 3205 *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3206 } 3207 3208 // needs to be platform dep for the 32 bit platforms. 3209 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value, 3210 int offset) { 3211 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value; 3212 } 3213 3214 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos, 3215 address addr, int offset) { 3216 (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = 3217 ((VMJavaVal64*)addr)->d); 3218 } 3219 3220 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value, 3221 int offset) { 3222 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3223 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value; 3224 } 3225 3226 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos, 3227 address addr, int offset) { 3228 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3229 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = 3230 ((VMJavaVal64*)addr)->l; 3231 } 3232 3233 // Locals 3234 3235 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) { 3236 return (address)locals[Interpreter::local_index_at(-offset)]; 3237 } 3238 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) { 3239 return (jint)locals[Interpreter::local_index_at(-offset)]; 3240 } 3241 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) { 3242 return (jfloat)locals[Interpreter::local_index_at(-offset)]; 3243 } 3244 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) { 3245 return cast_to_oop(locals[Interpreter::local_index_at(-offset)]); 3246 } 3247 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) { 3248 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d; 3249 } 3250 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) { 3251 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l; 3252 } 3253 3254 // Returns the address of locals value. 3255 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) { 3256 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3257 } 3258 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) { 3259 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3260 } 3261 3262 // Used for local value or returnAddress 3263 void BytecodeInterpreter::set_locals_slot(intptr_t *locals, 3264 address value, int offset) { 3265 *((address*)&locals[Interpreter::local_index_at(-offset)]) = value; 3266 } 3267 void BytecodeInterpreter::set_locals_int(intptr_t *locals, 3268 jint value, int offset) { 3269 *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value; 3270 } 3271 void BytecodeInterpreter::set_locals_float(intptr_t *locals, 3272 jfloat value, int offset) { 3273 *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value; 3274 } 3275 void BytecodeInterpreter::set_locals_object(intptr_t *locals, 3276 oop value, int offset) { 3277 *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value; 3278 } 3279 void BytecodeInterpreter::set_locals_double(intptr_t *locals, 3280 jdouble value, int offset) { 3281 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value; 3282 } 3283 void BytecodeInterpreter::set_locals_long(intptr_t *locals, 3284 jlong value, int offset) { 3285 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value; 3286 } 3287 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals, 3288 address addr, int offset) { 3289 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d; 3290 } 3291 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals, 3292 address addr, int offset) { 3293 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l; 3294 } 3295 3296 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset, 3297 intptr_t* locals, int locals_offset) { 3298 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)]; 3299 locals[Interpreter::local_index_at(-locals_offset)] = value; 3300 } 3301 3302 3303 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset, 3304 int to_offset) { 3305 tos[Interpreter::expr_index_at(-to_offset)] = 3306 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)]; 3307 } 3308 3309 void BytecodeInterpreter::dup(intptr_t *tos) { 3310 copy_stack_slot(tos, -1, 0); 3311 } 3312 void BytecodeInterpreter::dup2(intptr_t *tos) { 3313 copy_stack_slot(tos, -2, 0); 3314 copy_stack_slot(tos, -1, 1); 3315 } 3316 3317 void BytecodeInterpreter::dup_x1(intptr_t *tos) { 3318 /* insert top word two down */ 3319 copy_stack_slot(tos, -1, 0); 3320 copy_stack_slot(tos, -2, -1); 3321 copy_stack_slot(tos, 0, -2); 3322 } 3323 3324 void BytecodeInterpreter::dup_x2(intptr_t *tos) { 3325 /* insert top word three down */ 3326 copy_stack_slot(tos, -1, 0); 3327 copy_stack_slot(tos, -2, -1); 3328 copy_stack_slot(tos, -3, -2); 3329 copy_stack_slot(tos, 0, -3); 3330 } 3331 void BytecodeInterpreter::dup2_x1(intptr_t *tos) { 3332 /* insert top 2 slots three down */ 3333 copy_stack_slot(tos, -1, 1); 3334 copy_stack_slot(tos, -2, 0); 3335 copy_stack_slot(tos, -3, -1); 3336 copy_stack_slot(tos, 1, -2); 3337 copy_stack_slot(tos, 0, -3); 3338 } 3339 void BytecodeInterpreter::dup2_x2(intptr_t *tos) { 3340 /* insert top 2 slots four down */ 3341 copy_stack_slot(tos, -1, 1); 3342 copy_stack_slot(tos, -2, 0); 3343 copy_stack_slot(tos, -3, -1); 3344 copy_stack_slot(tos, -4, -2); 3345 copy_stack_slot(tos, 1, -3); 3346 copy_stack_slot(tos, 0, -4); 3347 } 3348 3349 3350 void BytecodeInterpreter::swap(intptr_t *tos) { 3351 // swap top two elements 3352 intptr_t val = tos[Interpreter::expr_index_at(1)]; 3353 // Copy -2 entry to -1 3354 copy_stack_slot(tos, -2, -1); 3355 // Store saved -1 entry into -2 3356 tos[Interpreter::expr_index_at(2)] = val; 3357 } 3358 // -------------------------------------------------------------------------------- 3359 // Non-product code 3360 #ifndef PRODUCT 3361 3362 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) { 3363 switch (msg) { 3364 case BytecodeInterpreter::no_request: return("no_request"); 3365 case BytecodeInterpreter::initialize: return("initialize"); 3366 // status message to C++ interpreter 3367 case BytecodeInterpreter::method_entry: return("method_entry"); 3368 case BytecodeInterpreter::method_resume: return("method_resume"); 3369 case BytecodeInterpreter::got_monitors: return("got_monitors"); 3370 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception"); 3371 // requests to frame manager from C++ interpreter 3372 case BytecodeInterpreter::call_method: return("call_method"); 3373 case BytecodeInterpreter::return_from_method: return("return_from_method"); 3374 case BytecodeInterpreter::more_monitors: return("more_monitors"); 3375 case BytecodeInterpreter::throwing_exception: return("throwing_exception"); 3376 case BytecodeInterpreter::popping_frame: return("popping_frame"); 3377 case BytecodeInterpreter::do_osr: return("do_osr"); 3378 // deopt 3379 case BytecodeInterpreter::deopt_resume: return("deopt_resume"); 3380 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2"); 3381 default: return("BAD MSG"); 3382 } 3383 } 3384 void 3385 BytecodeInterpreter::print() { 3386 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread); 3387 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp); 3388 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals); 3389 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants); 3390 { 3391 ResourceMark rm; 3392 char *method_name = _method->name_and_sig_as_C_string(); 3393 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name); 3394 } 3395 tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx); 3396 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack); 3397 tty->print_cr("msg: %s", C_msg(this->_msg)); 3398 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee); 3399 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point); 3400 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance); 3401 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf); 3402 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry); 3403 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link); 3404 tty->print_cr("native_mirror: " INTPTR_FORMAT, (void*) this->_oop_temp); 3405 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base); 3406 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit); 3407 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base); 3408 #ifdef SPARC 3409 tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc); 3410 tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom); 3411 tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult); 3412 tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult); 3413 #endif 3414 #if !defined(ZERO) && defined(PPC) 3415 tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp); 3416 #endif // !ZERO 3417 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link); 3418 } 3419 3420 extern "C" { 3421 void PI(uintptr_t arg) { 3422 ((BytecodeInterpreter*)arg)->print(); 3423 } 3424 } 3425 #endif // PRODUCT 3426 3427 #endif // JVMTI 3428 #endif // CC_INTERP