1 /* 2 * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "classfile/vmSymbols.hpp" 27 #include "gc/shared/collectedHeap.hpp" 28 #include "interpreter/bytecodeHistogram.hpp" 29 #include "interpreter/bytecodeInterpreter.hpp" 30 #include "interpreter/bytecodeInterpreter.inline.hpp" 31 #include "interpreter/bytecodeInterpreterProfiling.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/interpreterRuntime.hpp" 34 #include "logging/log.hpp" 35 #include "memory/resourceArea.hpp" 36 #include "oops/methodCounters.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "oops/objArrayOop.inline.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "prims/jvmtiExport.hpp" 41 #include "prims/jvmtiThreadState.hpp" 42 #include "runtime/atomic.inline.hpp" 43 #include "runtime/biasedLocking.hpp" 44 #include "runtime/frame.inline.hpp" 45 #include "runtime/handles.inline.hpp" 46 #include "runtime/interfaceSupport.hpp" 47 #include "runtime/orderAccess.inline.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 #include "runtime/threadCritical.hpp" 50 #include "utilities/exceptions.hpp" 51 52 // no precompiled headers 53 #ifdef CC_INTERP 54 55 /* 56 * USELABELS - If using GCC, then use labels for the opcode dispatching 57 * rather -then a switch statement. This improves performance because it 58 * gives us the opportunity to have the instructions that calculate the 59 * next opcode to jump to be intermixed with the rest of the instructions 60 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro). 61 */ 62 #undef USELABELS 63 #ifdef __GNUC__ 64 /* 65 ASSERT signifies debugging. It is much easier to step thru bytecodes if we 66 don't use the computed goto approach. 67 */ 68 #ifndef ASSERT 69 #define USELABELS 70 #endif 71 #endif 72 73 #undef CASE 74 #ifdef USELABELS 75 #define CASE(opcode) opc ## opcode 76 #define DEFAULT opc_default 77 #else 78 #define CASE(opcode) case Bytecodes:: opcode 79 #define DEFAULT default 80 #endif 81 82 /* 83 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next 84 * opcode before going back to the top of the while loop, rather then having 85 * the top of the while loop handle it. This provides a better opportunity 86 * for instruction scheduling. Some compilers just do this prefetch 87 * automatically. Some actually end up with worse performance if you 88 * force the prefetch. Solaris gcc seems to do better, but cc does worse. 89 */ 90 #undef PREFETCH_OPCCODE 91 #define PREFETCH_OPCCODE 92 93 /* 94 Interpreter safepoint: it is expected that the interpreter will have no live 95 handles of its own creation live at an interpreter safepoint. Therefore we 96 run a HandleMarkCleaner and trash all handles allocated in the call chain 97 since the JavaCalls::call_helper invocation that initiated the chain. 98 There really shouldn't be any handles remaining to trash but this is cheap 99 in relation to a safepoint. 100 */ 101 #define SAFEPOINT \ 102 if ( SafepointSynchronize::is_synchronizing()) { \ 103 { \ 104 /* zap freed handles rather than GC'ing them */ \ 105 HandleMarkCleaner __hmc(THREAD); \ 106 } \ 107 CALL_VM(SafepointSynchronize::block(THREAD), handle_exception); \ 108 } 109 110 /* 111 * VM_JAVA_ERROR - Macro for throwing a java exception from 112 * the interpreter loop. Should really be a CALL_VM but there 113 * is no entry point to do the transition to vm so we just 114 * do it by hand here. 115 */ 116 #define VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \ 117 DECACHE_STATE(); \ 118 SET_LAST_JAVA_FRAME(); \ 119 { \ 120 InterpreterRuntime::note_a_trap(THREAD, istate->method(), BCI()); \ 121 ThreadInVMfromJava trans(THREAD); \ 122 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \ 123 } \ 124 RESET_LAST_JAVA_FRAME(); \ 125 CACHE_STATE(); 126 127 // Normal throw of a java error. 128 #define VM_JAVA_ERROR(name, msg, note_a_trap) \ 129 VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \ 130 goto handle_exception; 131 132 #ifdef PRODUCT 133 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) 134 #else 135 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \ 136 { \ 137 BytecodeCounter::_counter_value++; \ 138 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \ 139 if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \ 140 if (TraceBytecodes) { \ 141 CALL_VM((void)SharedRuntime::trace_bytecode(THREAD, 0, \ 142 topOfStack[Interpreter::expr_index_at(1)], \ 143 topOfStack[Interpreter::expr_index_at(2)]), \ 144 handle_exception); \ 145 } \ 146 } 147 #endif 148 149 #undef DEBUGGER_SINGLE_STEP_NOTIFY 150 #ifdef VM_JVMTI 151 /* NOTE: (kbr) This macro must be called AFTER the PC has been 152 incremented. JvmtiExport::at_single_stepping_point() may cause a 153 breakpoint opcode to get inserted at the current PC to allow the 154 debugger to coalesce single-step events. 155 156 As a result if we call at_single_stepping_point() we refetch opcode 157 to get the current opcode. This will override any other prefetching 158 that might have occurred. 159 */ 160 #define DEBUGGER_SINGLE_STEP_NOTIFY() \ 161 { \ 162 if (_jvmti_interp_events) { \ 163 if (JvmtiExport::should_post_single_step()) { \ 164 DECACHE_STATE(); \ 165 SET_LAST_JAVA_FRAME(); \ 166 ThreadInVMfromJava trans(THREAD); \ 167 JvmtiExport::at_single_stepping_point(THREAD, \ 168 istate->method(), \ 169 pc); \ 170 RESET_LAST_JAVA_FRAME(); \ 171 CACHE_STATE(); \ 172 if (THREAD->pop_frame_pending() && \ 173 !THREAD->pop_frame_in_process()) { \ 174 goto handle_Pop_Frame; \ 175 } \ 176 if (THREAD->jvmti_thread_state() && \ 177 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ 178 goto handle_Early_Return; \ 179 } \ 180 opcode = *pc; \ 181 } \ 182 } \ 183 } 184 #else 185 #define DEBUGGER_SINGLE_STEP_NOTIFY() 186 #endif 187 188 /* 189 * CONTINUE - Macro for executing the next opcode. 190 */ 191 #undef CONTINUE 192 #ifdef USELABELS 193 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an 194 // initialization (which is is the initialization of the table pointer...) 195 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode] 196 #define CONTINUE { \ 197 opcode = *pc; \ 198 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 199 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 200 DISPATCH(opcode); \ 201 } 202 #else 203 #ifdef PREFETCH_OPCCODE 204 #define CONTINUE { \ 205 opcode = *pc; \ 206 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 207 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 208 continue; \ 209 } 210 #else 211 #define CONTINUE { \ 212 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 213 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 214 continue; \ 215 } 216 #endif 217 #endif 218 219 220 #define UPDATE_PC(opsize) {pc += opsize; } 221 /* 222 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack. 223 */ 224 #undef UPDATE_PC_AND_TOS 225 #define UPDATE_PC_AND_TOS(opsize, stack) \ 226 {pc += opsize; MORE_STACK(stack); } 227 228 /* 229 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack, 230 * and executing the next opcode. It's somewhat similar to the combination 231 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations. 232 */ 233 #undef UPDATE_PC_AND_TOS_AND_CONTINUE 234 #ifdef USELABELS 235 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 236 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 237 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 238 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 239 DISPATCH(opcode); \ 240 } 241 242 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 243 pc += opsize; opcode = *pc; \ 244 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 245 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 246 DISPATCH(opcode); \ 247 } 248 #else 249 #ifdef PREFETCH_OPCCODE 250 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 251 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 252 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 253 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 254 goto do_continue; \ 255 } 256 257 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 258 pc += opsize; opcode = *pc; \ 259 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 260 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 261 goto do_continue; \ 262 } 263 #else 264 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 265 pc += opsize; MORE_STACK(stack); \ 266 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 267 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 268 goto do_continue; \ 269 } 270 271 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 272 pc += opsize; \ 273 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 274 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 275 goto do_continue; \ 276 } 277 #endif /* PREFETCH_OPCCODE */ 278 #endif /* USELABELS */ 279 280 // About to call a new method, update the save the adjusted pc and return to frame manager 281 #define UPDATE_PC_AND_RETURN(opsize) \ 282 DECACHE_TOS(); \ 283 istate->set_bcp(pc+opsize); \ 284 return; 285 286 287 #define METHOD istate->method() 288 #define GET_METHOD_COUNTERS(res) \ 289 res = METHOD->method_counters(); \ 290 if (res == NULL) { \ 291 CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \ 292 } 293 294 #define OSR_REQUEST(res, branch_pc) \ 295 CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception); 296 /* 297 * For those opcodes that need to have a GC point on a backwards branch 298 */ 299 300 // Backedge counting is kind of strange. The asm interpreter will increment 301 // the backedge counter as a separate counter but it does it's comparisons 302 // to the sum (scaled) of invocation counter and backedge count to make 303 // a decision. Seems kind of odd to sum them together like that 304 305 // skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp 306 307 308 #define DO_BACKEDGE_CHECKS(skip, branch_pc) \ 309 if ((skip) <= 0) { \ 310 MethodCounters* mcs; \ 311 GET_METHOD_COUNTERS(mcs); \ 312 if (UseLoopCounter) { \ 313 bool do_OSR = UseOnStackReplacement; \ 314 mcs->backedge_counter()->increment(); \ 315 if (ProfileInterpreter) { \ 316 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); \ 317 /* Check for overflow against MDO count. */ \ 318 do_OSR = do_OSR \ 319 && (mdo_last_branch_taken_count >= (uint)InvocationCounter::InterpreterBackwardBranchLimit)\ 320 /* When ProfileInterpreter is on, the backedge_count comes */ \ 321 /* from the methodDataOop, which value does not get reset on */ \ 322 /* the call to frequency_counter_overflow(). To avoid */ \ 323 /* excessive calls to the overflow routine while the method is */ \ 324 /* being compiled, add a second test to make sure the overflow */ \ 325 /* function is called only once every overflow_frequency. */ \ 326 && (!(mdo_last_branch_taken_count & 1023)); \ 327 } else { \ 328 /* check for overflow of backedge counter */ \ 329 do_OSR = do_OSR \ 330 && mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter()); \ 331 } \ 332 if (do_OSR) { \ 333 nmethod* osr_nmethod; \ 334 OSR_REQUEST(osr_nmethod, branch_pc); \ 335 if (osr_nmethod != NULL && osr_nmethod->is_in_use()) { \ 336 intptr_t* buf; \ 337 /* Call OSR migration with last java frame only, no checks. */ \ 338 CALL_VM_NAKED_LJF(buf=SharedRuntime::OSR_migration_begin(THREAD)); \ 339 istate->set_msg(do_osr); \ 340 istate->set_osr_buf((address)buf); \ 341 istate->set_osr_entry(osr_nmethod->osr_entry()); \ 342 return; \ 343 } \ 344 } \ 345 } /* UseCompiler ... */ \ 346 SAFEPOINT; \ 347 } 348 349 /* 350 * For those opcodes that need to have a GC point on a backwards branch 351 */ 352 353 /* 354 * Macros for caching and flushing the interpreter state. Some local 355 * variables need to be flushed out to the frame before we do certain 356 * things (like pushing frames or becomming gc safe) and some need to 357 * be recached later (like after popping a frame). We could use one 358 * macro to cache or decache everything, but this would be less then 359 * optimal because we don't always need to cache or decache everything 360 * because some things we know are already cached or decached. 361 */ 362 #undef DECACHE_TOS 363 #undef CACHE_TOS 364 #undef CACHE_PREV_TOS 365 #define DECACHE_TOS() istate->set_stack(topOfStack); 366 367 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack(); 368 369 #undef DECACHE_PC 370 #undef CACHE_PC 371 #define DECACHE_PC() istate->set_bcp(pc); 372 #define CACHE_PC() pc = istate->bcp(); 373 #define CACHE_CP() cp = istate->constants(); 374 #define CACHE_LOCALS() locals = istate->locals(); 375 #undef CACHE_FRAME 376 #define CACHE_FRAME() 377 378 // BCI() returns the current bytecode-index. 379 #undef BCI 380 #define BCI() ((int)(intptr_t)(pc - (intptr_t)istate->method()->code_base())) 381 382 /* 383 * CHECK_NULL - Macro for throwing a NullPointerException if the object 384 * passed is a null ref. 385 * On some architectures/platforms it should be possible to do this implicitly 386 */ 387 #undef CHECK_NULL 388 #define CHECK_NULL(obj_) \ 389 if ((obj_) == NULL) { \ 390 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), NULL, note_nullCheck_trap); \ 391 } \ 392 VERIFY_OOP(obj_) 393 394 #define VMdoubleConstZero() 0.0 395 #define VMdoubleConstOne() 1.0 396 #define VMlongConstZero() (max_jlong-max_jlong) 397 #define VMlongConstOne() ((max_jlong-max_jlong)+1) 398 399 /* 400 * Alignment 401 */ 402 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3) 403 404 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod) 405 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS(); 406 407 // Reload interpreter state after calling the VM or a possible GC 408 #define CACHE_STATE() \ 409 CACHE_TOS(); \ 410 CACHE_PC(); \ 411 CACHE_CP(); \ 412 CACHE_LOCALS(); 413 414 // Call the VM with last java frame only. 415 #define CALL_VM_NAKED_LJF(func) \ 416 DECACHE_STATE(); \ 417 SET_LAST_JAVA_FRAME(); \ 418 func; \ 419 RESET_LAST_JAVA_FRAME(); \ 420 CACHE_STATE(); 421 422 // Call the VM. Don't check for pending exceptions. 423 #define CALL_VM_NOCHECK(func) \ 424 CALL_VM_NAKED_LJF(func) \ 425 if (THREAD->pop_frame_pending() && \ 426 !THREAD->pop_frame_in_process()) { \ 427 goto handle_Pop_Frame; \ 428 } \ 429 if (THREAD->jvmti_thread_state() && \ 430 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ 431 goto handle_Early_Return; \ 432 } 433 434 // Call the VM and check for pending exceptions 435 #define CALL_VM(func, label) { \ 436 CALL_VM_NOCHECK(func); \ 437 if (THREAD->has_pending_exception()) goto label; \ 438 } 439 440 /* 441 * BytecodeInterpreter::run(interpreterState istate) 442 * BytecodeInterpreter::runWithChecks(interpreterState istate) 443 * 444 * The real deal. This is where byte codes actually get interpreted. 445 * Basically it's a big while loop that iterates until we return from 446 * the method passed in. 447 * 448 * The runWithChecks is used if JVMTI is enabled. 449 * 450 */ 451 #if defined(VM_JVMTI) 452 void 453 BytecodeInterpreter::runWithChecks(interpreterState istate) { 454 #else 455 void 456 BytecodeInterpreter::run(interpreterState istate) { 457 #endif 458 459 // In order to simplify some tests based on switches set at runtime 460 // we invoke the interpreter a single time after switches are enabled 461 // and set simpler to to test variables rather than method calls or complex 462 // boolean expressions. 463 464 static int initialized = 0; 465 static int checkit = 0; 466 static intptr_t* c_addr = NULL; 467 static intptr_t c_value; 468 469 if (checkit && *c_addr != c_value) { 470 os::breakpoint(); 471 } 472 #ifdef VM_JVMTI 473 static bool _jvmti_interp_events = 0; 474 #endif 475 476 static int _compiling; // (UseCompiler || CountCompiledCalls) 477 478 #ifdef ASSERT 479 if (istate->_msg != initialize) { 480 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit"); 481 #ifndef SHARK 482 IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong")); 483 #endif // !SHARK 484 } 485 // Verify linkages. 486 interpreterState l = istate; 487 do { 488 assert(l == l->_self_link, "bad link"); 489 l = l->_prev_link; 490 } while (l != NULL); 491 // Screwups with stack management usually cause us to overwrite istate 492 // save a copy so we can verify it. 493 interpreterState orig = istate; 494 #endif 495 496 register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */ 497 register address pc = istate->bcp(); 498 register jubyte opcode; 499 register intptr_t* locals = istate->locals(); 500 register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache() 501 #ifdef LOTS_OF_REGS 502 register JavaThread* THREAD = istate->thread(); 503 #else 504 #undef THREAD 505 #define THREAD istate->thread() 506 #endif 507 508 #ifdef USELABELS 509 const static void* const opclabels_data[256] = { 510 /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0, 511 /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4, 512 /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0, 513 /* 0x0C */ &&opc_fconst_1,&&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1, 514 515 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w, 516 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload, 517 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0,&&opc_iload_1, 518 /* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1, 519 520 /* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1, 521 /* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1, 522 /* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1, 523 /* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload, 524 525 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload, 526 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore, 527 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0, 528 /* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0, 529 530 /* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0, 531 /* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0, 532 /* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0, 533 /* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore, 534 535 /* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore, 536 /* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop, 537 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2, 538 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap, 539 540 /* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd, 541 /* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub, 542 /* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul, 543 /* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv, 544 545 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem, 546 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg, 547 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr, 548 /* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land, 549 550 /* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor, 551 /* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d, 552 /* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i, 553 /* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l, 554 555 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s, 556 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl, 557 /* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt, 558 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq, 559 560 /* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge, &&opc_if_icmpgt, 561 /* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne, &&opc_goto, 562 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch,&&opc_lookupswitch, 563 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn, 564 565 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic, 566 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial, 567 /* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new, 568 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow, 569 570 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit, 571 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull, 572 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_default, 573 /* 0xCC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 574 575 /* 0xD0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 576 /* 0xD4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 577 /* 0xD8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 578 /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 579 580 /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 581 /* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer, 582 /* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default, 583 /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 584 585 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 586 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 587 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 588 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default 589 }; 590 register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0]; 591 #endif /* USELABELS */ 592 593 #ifdef ASSERT 594 // this will trigger a VERIFY_OOP on entry 595 if (istate->msg() != initialize && ! METHOD->is_static()) { 596 oop rcvr = LOCALS_OBJECT(0); 597 VERIFY_OOP(rcvr); 598 } 599 #endif 600 // #define HACK 601 #ifdef HACK 602 bool interesting = false; 603 #endif // HACK 604 605 /* QQQ this should be a stack method so we don't know actual direction */ 606 guarantee(istate->msg() == initialize || 607 topOfStack >= istate->stack_limit() && 608 topOfStack < istate->stack_base(), 609 "Stack top out of range"); 610 611 #ifdef CC_INTERP_PROFILE 612 // MethodData's last branch taken count. 613 uint mdo_last_branch_taken_count = 0; 614 #else 615 const uint mdo_last_branch_taken_count = 0; 616 #endif 617 618 switch (istate->msg()) { 619 case initialize: { 620 if (initialized++) ShouldNotReachHere(); // Only one initialize call. 621 _compiling = (UseCompiler || CountCompiledCalls); 622 #ifdef VM_JVMTI 623 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events(); 624 #endif 625 return; 626 } 627 break; 628 case method_entry: { 629 THREAD->set_do_not_unlock(); 630 // count invocations 631 assert(initialized, "Interpreter not initialized"); 632 if (_compiling) { 633 MethodCounters* mcs; 634 GET_METHOD_COUNTERS(mcs); 635 #if defined(COMPILER2) || INCLUDE_JVMCI 636 if (ProfileInterpreter) { 637 METHOD->increment_interpreter_invocation_count(THREAD); 638 } 639 #endif 640 mcs->invocation_counter()->increment(); 641 if (mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter())) { 642 CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception); 643 // We no longer retry on a counter overflow. 644 } 645 // Get or create profile data. Check for pending (async) exceptions. 646 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 647 SAFEPOINT; 648 } 649 650 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 651 // initialize 652 os::breakpoint(); 653 } 654 655 #ifdef HACK 656 { 657 ResourceMark rm; 658 char *method_name = istate->method()->name_and_sig_as_C_string(); 659 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { 660 tty->print_cr("entering: depth %d bci: %d", 661 (istate->_stack_base - istate->_stack), 662 istate->_bcp - istate->_method->code_base()); 663 interesting = true; 664 } 665 } 666 #endif // HACK 667 668 // Lock method if synchronized. 669 if (METHOD->is_synchronized()) { 670 // oop rcvr = locals[0].j.r; 671 oop rcvr; 672 if (METHOD->is_static()) { 673 rcvr = METHOD->constants()->pool_holder()->java_mirror(); 674 } else { 675 rcvr = LOCALS_OBJECT(0); 676 VERIFY_OOP(rcvr); 677 } 678 // The initial monitor is ours for the taking. 679 // Monitor not filled in frame manager any longer as this caused race condition with biased locking. 680 BasicObjectLock* mon = &istate->monitor_base()[-1]; 681 mon->set_obj(rcvr); 682 bool success = false; 683 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 684 markOop mark = rcvr->mark(); 685 intptr_t hash = (intptr_t) markOopDesc::no_hash; 686 // Implies UseBiasedLocking. 687 if (mark->has_bias_pattern()) { 688 uintptr_t thread_ident; 689 uintptr_t anticipated_bias_locking_value; 690 thread_ident = (uintptr_t)istate->thread(); 691 anticipated_bias_locking_value = 692 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 693 ~((uintptr_t) markOopDesc::age_mask_in_place); 694 695 if (anticipated_bias_locking_value == 0) { 696 // Already biased towards this thread, nothing to do. 697 if (PrintBiasedLockingStatistics) { 698 (* BiasedLocking::biased_lock_entry_count_addr())++; 699 } 700 success = true; 701 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 702 // Try to revoke bias. 703 markOop header = rcvr->klass()->prototype_header(); 704 if (hash != markOopDesc::no_hash) { 705 header = header->copy_set_hash(hash); 706 } 707 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) { 708 if (PrintBiasedLockingStatistics) 709 (*BiasedLocking::revoked_lock_entry_count_addr())++; 710 } 711 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) { 712 // Try to rebias. 713 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident); 714 if (hash != markOopDesc::no_hash) { 715 new_header = new_header->copy_set_hash(hash); 716 } 717 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) { 718 if (PrintBiasedLockingStatistics) { 719 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 720 } 721 } else { 722 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 723 } 724 success = true; 725 } else { 726 // Try to bias towards thread in case object is anonymously biased. 727 markOop header = (markOop) ((uintptr_t) mark & 728 ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 729 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); 730 if (hash != markOopDesc::no_hash) { 731 header = header->copy_set_hash(hash); 732 } 733 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 734 // Debugging hint. 735 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 736 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) { 737 if (PrintBiasedLockingStatistics) { 738 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 739 } 740 } else { 741 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 742 } 743 success = true; 744 } 745 } 746 747 // Traditional lightweight locking. 748 if (!success) { 749 markOop displaced = rcvr->mark()->set_unlocked(); 750 mon->lock()->set_displaced_header(displaced); 751 bool call_vm = UseHeavyMonitors; 752 if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) { 753 // Is it simple recursive case? 754 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 755 mon->lock()->set_displaced_header(NULL); 756 } else { 757 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 758 } 759 } 760 } 761 } 762 THREAD->clr_do_not_unlock(); 763 764 // Notify jvmti 765 #ifdef VM_JVMTI 766 if (_jvmti_interp_events) { 767 // Whenever JVMTI puts a thread in interp_only_mode, method 768 // entry/exit events are sent for that thread to track stack depth. 769 if (THREAD->is_interp_only_mode()) { 770 CALL_VM(InterpreterRuntime::post_method_entry(THREAD), 771 handle_exception); 772 } 773 } 774 #endif /* VM_JVMTI */ 775 776 goto run; 777 } 778 779 case popping_frame: { 780 // returned from a java call to pop the frame, restart the call 781 // clear the message so we don't confuse ourselves later 782 assert(THREAD->pop_frame_in_process(), "wrong frame pop state"); 783 istate->set_msg(no_request); 784 if (_compiling) { 785 // Set MDX back to the ProfileData of the invoke bytecode that will be 786 // restarted. 787 SET_MDX(NULL); 788 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 789 } 790 THREAD->clr_pop_frame_in_process(); 791 goto run; 792 } 793 794 case method_resume: { 795 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 796 // resume 797 os::breakpoint(); 798 } 799 #ifdef HACK 800 { 801 ResourceMark rm; 802 char *method_name = istate->method()->name_and_sig_as_C_string(); 803 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { 804 tty->print_cr("resume: depth %d bci: %d", 805 (istate->_stack_base - istate->_stack) , 806 istate->_bcp - istate->_method->code_base()); 807 interesting = true; 808 } 809 } 810 #endif // HACK 811 // returned from a java call, continue executing. 812 if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) { 813 goto handle_Pop_Frame; 814 } 815 if (THREAD->jvmti_thread_state() && 816 THREAD->jvmti_thread_state()->is_earlyret_pending()) { 817 goto handle_Early_Return; 818 } 819 820 if (THREAD->has_pending_exception()) goto handle_exception; 821 // Update the pc by the saved amount of the invoke bytecode size 822 UPDATE_PC(istate->bcp_advance()); 823 824 if (_compiling) { 825 // Get or create profile data. Check for pending (async) exceptions. 826 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 827 } 828 goto run; 829 } 830 831 case deopt_resume2: { 832 // Returned from an opcode that will reexecute. Deopt was 833 // a result of a PopFrame request. 834 // 835 836 if (_compiling) { 837 // Get or create profile data. Check for pending (async) exceptions. 838 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 839 } 840 goto run; 841 } 842 843 case deopt_resume: { 844 // Returned from an opcode that has completed. The stack has 845 // the result all we need to do is skip across the bytecode 846 // and continue (assuming there is no exception pending) 847 // 848 // compute continuation length 849 // 850 // Note: it is possible to deopt at a return_register_finalizer opcode 851 // because this requires entering the vm to do the registering. While the 852 // opcode is complete we can't advance because there are no more opcodes 853 // much like trying to deopt at a poll return. In that has we simply 854 // get out of here 855 // 856 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) { 857 // this will do the right thing even if an exception is pending. 858 goto handle_return; 859 } 860 UPDATE_PC(Bytecodes::length_at(METHOD, pc)); 861 if (THREAD->has_pending_exception()) goto handle_exception; 862 863 if (_compiling) { 864 // Get or create profile data. Check for pending (async) exceptions. 865 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 866 } 867 goto run; 868 } 869 case got_monitors: { 870 // continue locking now that we have a monitor to use 871 // we expect to find newly allocated monitor at the "top" of the monitor stack. 872 oop lockee = STACK_OBJECT(-1); 873 VERIFY_OOP(lockee); 874 // derefing's lockee ought to provoke implicit null check 875 // find a free monitor 876 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base(); 877 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor"); 878 entry->set_obj(lockee); 879 bool success = false; 880 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 881 882 markOop mark = lockee->mark(); 883 intptr_t hash = (intptr_t) markOopDesc::no_hash; 884 // implies UseBiasedLocking 885 if (mark->has_bias_pattern()) { 886 uintptr_t thread_ident; 887 uintptr_t anticipated_bias_locking_value; 888 thread_ident = (uintptr_t)istate->thread(); 889 anticipated_bias_locking_value = 890 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 891 ~((uintptr_t) markOopDesc::age_mask_in_place); 892 893 if (anticipated_bias_locking_value == 0) { 894 // already biased towards this thread, nothing to do 895 if (PrintBiasedLockingStatistics) { 896 (* BiasedLocking::biased_lock_entry_count_addr())++; 897 } 898 success = true; 899 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 900 // try revoke bias 901 markOop header = lockee->klass()->prototype_header(); 902 if (hash != markOopDesc::no_hash) { 903 header = header->copy_set_hash(hash); 904 } 905 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { 906 if (PrintBiasedLockingStatistics) { 907 (*BiasedLocking::revoked_lock_entry_count_addr())++; 908 } 909 } 910 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { 911 // try rebias 912 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); 913 if (hash != markOopDesc::no_hash) { 914 new_header = new_header->copy_set_hash(hash); 915 } 916 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { 917 if (PrintBiasedLockingStatistics) { 918 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 919 } 920 } else { 921 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 922 } 923 success = true; 924 } else { 925 // try to bias towards thread in case object is anonymously biased 926 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 927 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); 928 if (hash != markOopDesc::no_hash) { 929 header = header->copy_set_hash(hash); 930 } 931 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 932 // debugging hint 933 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 934 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { 935 if (PrintBiasedLockingStatistics) { 936 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 937 } 938 } else { 939 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 940 } 941 success = true; 942 } 943 } 944 945 // traditional lightweight locking 946 if (!success) { 947 markOop displaced = lockee->mark()->set_unlocked(); 948 entry->lock()->set_displaced_header(displaced); 949 bool call_vm = UseHeavyMonitors; 950 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { 951 // Is it simple recursive case? 952 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 953 entry->lock()->set_displaced_header(NULL); 954 } else { 955 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 956 } 957 } 958 } 959 UPDATE_PC_AND_TOS(1, -1); 960 goto run; 961 } 962 default: { 963 fatal("Unexpected message from frame manager"); 964 } 965 } 966 967 run: 968 969 DO_UPDATE_INSTRUCTION_COUNT(*pc) 970 DEBUGGER_SINGLE_STEP_NOTIFY(); 971 #ifdef PREFETCH_OPCCODE 972 opcode = *pc; /* prefetch first opcode */ 973 #endif 974 975 #ifndef USELABELS 976 while (1) 977 #endif 978 { 979 #ifndef PREFETCH_OPCCODE 980 opcode = *pc; 981 #endif 982 // Seems like this happens twice per opcode. At worst this is only 983 // need at entry to the loop. 984 // DEBUGGER_SINGLE_STEP_NOTIFY(); 985 /* Using this labels avoids double breakpoints when quickening and 986 * when returing from transition frames. 987 */ 988 opcode_switch: 989 assert(istate == orig, "Corrupted istate"); 990 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */ 991 assert(topOfStack >= istate->stack_limit(), "Stack overrun"); 992 assert(topOfStack < istate->stack_base(), "Stack underrun"); 993 994 #ifdef USELABELS 995 DISPATCH(opcode); 996 #else 997 switch (opcode) 998 #endif 999 { 1000 CASE(_nop): 1001 UPDATE_PC_AND_CONTINUE(1); 1002 1003 /* Push miscellaneous constants onto the stack. */ 1004 1005 CASE(_aconst_null): 1006 SET_STACK_OBJECT(NULL, 0); 1007 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1008 1009 #undef OPC_CONST_n 1010 #define OPC_CONST_n(opcode, const_type, value) \ 1011 CASE(opcode): \ 1012 SET_STACK_ ## const_type(value, 0); \ 1013 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1014 1015 OPC_CONST_n(_iconst_m1, INT, -1); 1016 OPC_CONST_n(_iconst_0, INT, 0); 1017 OPC_CONST_n(_iconst_1, INT, 1); 1018 OPC_CONST_n(_iconst_2, INT, 2); 1019 OPC_CONST_n(_iconst_3, INT, 3); 1020 OPC_CONST_n(_iconst_4, INT, 4); 1021 OPC_CONST_n(_iconst_5, INT, 5); 1022 OPC_CONST_n(_fconst_0, FLOAT, 0.0); 1023 OPC_CONST_n(_fconst_1, FLOAT, 1.0); 1024 OPC_CONST_n(_fconst_2, FLOAT, 2.0); 1025 1026 #undef OPC_CONST2_n 1027 #define OPC_CONST2_n(opcname, value, key, kind) \ 1028 CASE(_##opcname): \ 1029 { \ 1030 SET_STACK_ ## kind(VM##key##Const##value(), 1); \ 1031 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1032 } 1033 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE); 1034 OPC_CONST2_n(dconst_1, One, double, DOUBLE); 1035 OPC_CONST2_n(lconst_0, Zero, long, LONG); 1036 OPC_CONST2_n(lconst_1, One, long, LONG); 1037 1038 /* Load constant from constant pool: */ 1039 1040 /* Push a 1-byte signed integer value onto the stack. */ 1041 CASE(_bipush): 1042 SET_STACK_INT((jbyte)(pc[1]), 0); 1043 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1044 1045 /* Push a 2-byte signed integer constant onto the stack. */ 1046 CASE(_sipush): 1047 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0); 1048 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 1049 1050 /* load from local variable */ 1051 1052 CASE(_aload): 1053 VERIFY_OOP(LOCALS_OBJECT(pc[1])); 1054 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0); 1055 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1056 1057 CASE(_iload): 1058 CASE(_fload): 1059 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0); 1060 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1061 1062 CASE(_lload): 1063 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1); 1064 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1065 1066 CASE(_dload): 1067 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1); 1068 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1069 1070 #undef OPC_LOAD_n 1071 #define OPC_LOAD_n(num) \ 1072 CASE(_aload_##num): \ 1073 VERIFY_OOP(LOCALS_OBJECT(num)); \ 1074 SET_STACK_OBJECT(LOCALS_OBJECT(num), 0); \ 1075 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1076 \ 1077 CASE(_iload_##num): \ 1078 CASE(_fload_##num): \ 1079 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \ 1080 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1081 \ 1082 CASE(_lload_##num): \ 1083 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \ 1084 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1085 CASE(_dload_##num): \ 1086 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \ 1087 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1088 1089 OPC_LOAD_n(0); 1090 OPC_LOAD_n(1); 1091 OPC_LOAD_n(2); 1092 OPC_LOAD_n(3); 1093 1094 /* store to a local variable */ 1095 1096 CASE(_astore): 1097 astore(topOfStack, -1, locals, pc[1]); 1098 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1099 1100 CASE(_istore): 1101 CASE(_fstore): 1102 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]); 1103 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1104 1105 CASE(_lstore): 1106 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]); 1107 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1108 1109 CASE(_dstore): 1110 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]); 1111 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1112 1113 CASE(_wide): { 1114 uint16_t reg = Bytes::get_Java_u2(pc + 2); 1115 1116 opcode = pc[1]; 1117 1118 // Wide and it's sub-bytecode are counted as separate instructions. If we 1119 // don't account for this here, the bytecode trace skips the next bytecode. 1120 DO_UPDATE_INSTRUCTION_COUNT(opcode); 1121 1122 switch(opcode) { 1123 case Bytecodes::_aload: 1124 VERIFY_OOP(LOCALS_OBJECT(reg)); 1125 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0); 1126 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1127 1128 case Bytecodes::_iload: 1129 case Bytecodes::_fload: 1130 SET_STACK_SLOT(LOCALS_SLOT(reg), 0); 1131 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1132 1133 case Bytecodes::_lload: 1134 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1135 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1136 1137 case Bytecodes::_dload: 1138 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1139 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1140 1141 case Bytecodes::_astore: 1142 astore(topOfStack, -1, locals, reg); 1143 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1144 1145 case Bytecodes::_istore: 1146 case Bytecodes::_fstore: 1147 SET_LOCALS_SLOT(STACK_SLOT(-1), reg); 1148 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1149 1150 case Bytecodes::_lstore: 1151 SET_LOCALS_LONG(STACK_LONG(-1), reg); 1152 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1153 1154 case Bytecodes::_dstore: 1155 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg); 1156 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1157 1158 case Bytecodes::_iinc: { 1159 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4); 1160 // Be nice to see what this generates.... QQQ 1161 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg); 1162 UPDATE_PC_AND_CONTINUE(6); 1163 } 1164 case Bytecodes::_ret: 1165 // Profile ret. 1166 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(reg)))); 1167 // Now, update the pc. 1168 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg)); 1169 UPDATE_PC_AND_CONTINUE(0); 1170 default: 1171 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode", note_no_trap); 1172 } 1173 } 1174 1175 1176 #undef OPC_STORE_n 1177 #define OPC_STORE_n(num) \ 1178 CASE(_astore_##num): \ 1179 astore(topOfStack, -1, locals, num); \ 1180 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1181 CASE(_istore_##num): \ 1182 CASE(_fstore_##num): \ 1183 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \ 1184 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1185 1186 OPC_STORE_n(0); 1187 OPC_STORE_n(1); 1188 OPC_STORE_n(2); 1189 OPC_STORE_n(3); 1190 1191 #undef OPC_DSTORE_n 1192 #define OPC_DSTORE_n(num) \ 1193 CASE(_dstore_##num): \ 1194 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \ 1195 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1196 CASE(_lstore_##num): \ 1197 SET_LOCALS_LONG(STACK_LONG(-1), num); \ 1198 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1199 1200 OPC_DSTORE_n(0); 1201 OPC_DSTORE_n(1); 1202 OPC_DSTORE_n(2); 1203 OPC_DSTORE_n(3); 1204 1205 /* stack pop, dup, and insert opcodes */ 1206 1207 1208 CASE(_pop): /* Discard the top item on the stack */ 1209 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1210 1211 1212 CASE(_pop2): /* Discard the top 2 items on the stack */ 1213 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1214 1215 1216 CASE(_dup): /* Duplicate the top item on the stack */ 1217 dup(topOfStack); 1218 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1219 1220 CASE(_dup2): /* Duplicate the top 2 items on the stack */ 1221 dup2(topOfStack); 1222 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1223 1224 CASE(_dup_x1): /* insert top word two down */ 1225 dup_x1(topOfStack); 1226 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1227 1228 CASE(_dup_x2): /* insert top word three down */ 1229 dup_x2(topOfStack); 1230 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1231 1232 CASE(_dup2_x1): /* insert top 2 slots three down */ 1233 dup2_x1(topOfStack); 1234 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1235 1236 CASE(_dup2_x2): /* insert top 2 slots four down */ 1237 dup2_x2(topOfStack); 1238 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1239 1240 CASE(_swap): { /* swap top two elements on the stack */ 1241 swap(topOfStack); 1242 UPDATE_PC_AND_CONTINUE(1); 1243 } 1244 1245 /* Perform various binary integer operations */ 1246 1247 #undef OPC_INT_BINARY 1248 #define OPC_INT_BINARY(opcname, opname, test) \ 1249 CASE(_i##opcname): \ 1250 if (test && (STACK_INT(-1) == 0)) { \ 1251 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1252 "/ by zero", note_div0Check_trap); \ 1253 } \ 1254 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1255 STACK_INT(-1)), \ 1256 -2); \ 1257 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1258 CASE(_l##opcname): \ 1259 { \ 1260 if (test) { \ 1261 jlong l1 = STACK_LONG(-1); \ 1262 if (VMlongEqz(l1)) { \ 1263 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1264 "/ by long zero", note_div0Check_trap); \ 1265 } \ 1266 } \ 1267 /* First long at (-1,-2) next long at (-3,-4) */ \ 1268 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \ 1269 STACK_LONG(-1)), \ 1270 -3); \ 1271 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1272 } 1273 1274 OPC_INT_BINARY(add, Add, 0); 1275 OPC_INT_BINARY(sub, Sub, 0); 1276 OPC_INT_BINARY(mul, Mul, 0); 1277 OPC_INT_BINARY(and, And, 0); 1278 OPC_INT_BINARY(or, Or, 0); 1279 OPC_INT_BINARY(xor, Xor, 0); 1280 OPC_INT_BINARY(div, Div, 1); 1281 OPC_INT_BINARY(rem, Rem, 1); 1282 1283 1284 /* Perform various binary floating number operations */ 1285 /* On some machine/platforms/compilers div zero check can be implicit */ 1286 1287 #undef OPC_FLOAT_BINARY 1288 #define OPC_FLOAT_BINARY(opcname, opname) \ 1289 CASE(_d##opcname): { \ 1290 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \ 1291 STACK_DOUBLE(-1)), \ 1292 -3); \ 1293 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1294 } \ 1295 CASE(_f##opcname): \ 1296 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \ 1297 STACK_FLOAT(-1)), \ 1298 -2); \ 1299 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1300 1301 1302 OPC_FLOAT_BINARY(add, Add); 1303 OPC_FLOAT_BINARY(sub, Sub); 1304 OPC_FLOAT_BINARY(mul, Mul); 1305 OPC_FLOAT_BINARY(div, Div); 1306 OPC_FLOAT_BINARY(rem, Rem); 1307 1308 /* Shift operations 1309 * Shift left int and long: ishl, lshl 1310 * Logical shift right int and long w/zero extension: iushr, lushr 1311 * Arithmetic shift right int and long w/sign extension: ishr, lshr 1312 */ 1313 1314 #undef OPC_SHIFT_BINARY 1315 #define OPC_SHIFT_BINARY(opcname, opname) \ 1316 CASE(_i##opcname): \ 1317 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1318 STACK_INT(-1)), \ 1319 -2); \ 1320 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1321 CASE(_l##opcname): \ 1322 { \ 1323 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \ 1324 STACK_INT(-1)), \ 1325 -2); \ 1326 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1327 } 1328 1329 OPC_SHIFT_BINARY(shl, Shl); 1330 OPC_SHIFT_BINARY(shr, Shr); 1331 OPC_SHIFT_BINARY(ushr, Ushr); 1332 1333 /* Increment local variable by constant */ 1334 CASE(_iinc): 1335 { 1336 // locals[pc[1]].j.i += (jbyte)(pc[2]); 1337 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]); 1338 UPDATE_PC_AND_CONTINUE(3); 1339 } 1340 1341 /* negate the value on the top of the stack */ 1342 1343 CASE(_ineg): 1344 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1); 1345 UPDATE_PC_AND_CONTINUE(1); 1346 1347 CASE(_fneg): 1348 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1); 1349 UPDATE_PC_AND_CONTINUE(1); 1350 1351 CASE(_lneg): 1352 { 1353 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1); 1354 UPDATE_PC_AND_CONTINUE(1); 1355 } 1356 1357 CASE(_dneg): 1358 { 1359 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1); 1360 UPDATE_PC_AND_CONTINUE(1); 1361 } 1362 1363 /* Conversion operations */ 1364 1365 CASE(_i2f): /* convert top of stack int to float */ 1366 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1); 1367 UPDATE_PC_AND_CONTINUE(1); 1368 1369 CASE(_i2l): /* convert top of stack int to long */ 1370 { 1371 // this is ugly QQQ 1372 jlong r = VMint2Long(STACK_INT(-1)); 1373 MORE_STACK(-1); // Pop 1374 SET_STACK_LONG(r, 1); 1375 1376 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1377 } 1378 1379 CASE(_i2d): /* convert top of stack int to double */ 1380 { 1381 // this is ugly QQQ (why cast to jlong?? ) 1382 jdouble r = (jlong)STACK_INT(-1); 1383 MORE_STACK(-1); // Pop 1384 SET_STACK_DOUBLE(r, 1); 1385 1386 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1387 } 1388 1389 CASE(_l2i): /* convert top of stack long to int */ 1390 { 1391 jint r = VMlong2Int(STACK_LONG(-1)); 1392 MORE_STACK(-2); // Pop 1393 SET_STACK_INT(r, 0); 1394 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1395 } 1396 1397 CASE(_l2f): /* convert top of stack long to float */ 1398 { 1399 jlong r = STACK_LONG(-1); 1400 MORE_STACK(-2); // Pop 1401 SET_STACK_FLOAT(VMlong2Float(r), 0); 1402 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1403 } 1404 1405 CASE(_l2d): /* convert top of stack long to double */ 1406 { 1407 jlong r = STACK_LONG(-1); 1408 MORE_STACK(-2); // Pop 1409 SET_STACK_DOUBLE(VMlong2Double(r), 1); 1410 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1411 } 1412 1413 CASE(_f2i): /* Convert top of stack float to int */ 1414 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1); 1415 UPDATE_PC_AND_CONTINUE(1); 1416 1417 CASE(_f2l): /* convert top of stack float to long */ 1418 { 1419 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1)); 1420 MORE_STACK(-1); // POP 1421 SET_STACK_LONG(r, 1); 1422 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1423 } 1424 1425 CASE(_f2d): /* convert top of stack float to double */ 1426 { 1427 jfloat f; 1428 jdouble r; 1429 f = STACK_FLOAT(-1); 1430 r = (jdouble) f; 1431 MORE_STACK(-1); // POP 1432 SET_STACK_DOUBLE(r, 1); 1433 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1434 } 1435 1436 CASE(_d2i): /* convert top of stack double to int */ 1437 { 1438 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1)); 1439 MORE_STACK(-2); 1440 SET_STACK_INT(r1, 0); 1441 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1442 } 1443 1444 CASE(_d2f): /* convert top of stack double to float */ 1445 { 1446 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1)); 1447 MORE_STACK(-2); 1448 SET_STACK_FLOAT(r1, 0); 1449 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1450 } 1451 1452 CASE(_d2l): /* convert top of stack double to long */ 1453 { 1454 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1)); 1455 MORE_STACK(-2); 1456 SET_STACK_LONG(r1, 1); 1457 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1458 } 1459 1460 CASE(_i2b): 1461 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1); 1462 UPDATE_PC_AND_CONTINUE(1); 1463 1464 CASE(_i2c): 1465 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1); 1466 UPDATE_PC_AND_CONTINUE(1); 1467 1468 CASE(_i2s): 1469 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1); 1470 UPDATE_PC_AND_CONTINUE(1); 1471 1472 /* comparison operators */ 1473 1474 1475 #define COMPARISON_OP(name, comparison) \ 1476 CASE(_if_icmp##name): { \ 1477 const bool cmp = (STACK_INT(-2) comparison STACK_INT(-1)); \ 1478 int skip = cmp \ 1479 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1480 address branch_pc = pc; \ 1481 /* Profile branch. */ \ 1482 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1483 UPDATE_PC_AND_TOS(skip, -2); \ 1484 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1485 CONTINUE; \ 1486 } \ 1487 CASE(_if##name): { \ 1488 const bool cmp = (STACK_INT(-1) comparison 0); \ 1489 int skip = cmp \ 1490 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1491 address branch_pc = pc; \ 1492 /* Profile branch. */ \ 1493 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1494 UPDATE_PC_AND_TOS(skip, -1); \ 1495 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1496 CONTINUE; \ 1497 } 1498 1499 #define COMPARISON_OP2(name, comparison) \ 1500 COMPARISON_OP(name, comparison) \ 1501 CASE(_if_acmp##name): { \ 1502 const bool cmp = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)); \ 1503 int skip = cmp \ 1504 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1505 address branch_pc = pc; \ 1506 /* Profile branch. */ \ 1507 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1508 UPDATE_PC_AND_TOS(skip, -2); \ 1509 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1510 CONTINUE; \ 1511 } 1512 1513 #define NULL_COMPARISON_NOT_OP(name) \ 1514 CASE(_if##name): { \ 1515 const bool cmp = (!(STACK_OBJECT(-1) == NULL)); \ 1516 int skip = cmp \ 1517 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1518 address branch_pc = pc; \ 1519 /* Profile branch. */ \ 1520 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1521 UPDATE_PC_AND_TOS(skip, -1); \ 1522 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1523 CONTINUE; \ 1524 } 1525 1526 #define NULL_COMPARISON_OP(name) \ 1527 CASE(_if##name): { \ 1528 const bool cmp = ((STACK_OBJECT(-1) == NULL)); \ 1529 int skip = cmp \ 1530 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1531 address branch_pc = pc; \ 1532 /* Profile branch. */ \ 1533 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1534 UPDATE_PC_AND_TOS(skip, -1); \ 1535 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1536 CONTINUE; \ 1537 } 1538 COMPARISON_OP(lt, <); 1539 COMPARISON_OP(gt, >); 1540 COMPARISON_OP(le, <=); 1541 COMPARISON_OP(ge, >=); 1542 COMPARISON_OP2(eq, ==); /* include ref comparison */ 1543 COMPARISON_OP2(ne, !=); /* include ref comparison */ 1544 NULL_COMPARISON_OP(null); 1545 NULL_COMPARISON_NOT_OP(nonnull); 1546 1547 /* Goto pc at specified offset in switch table. */ 1548 1549 CASE(_tableswitch): { 1550 jint* lpc = (jint*)VMalignWordUp(pc+1); 1551 int32_t key = STACK_INT(-1); 1552 int32_t low = Bytes::get_Java_u4((address)&lpc[1]); 1553 int32_t high = Bytes::get_Java_u4((address)&lpc[2]); 1554 int32_t skip; 1555 key -= low; 1556 if (((uint32_t) key > (uint32_t)(high - low))) { 1557 key = -1; 1558 skip = Bytes::get_Java_u4((address)&lpc[0]); 1559 } else { 1560 skip = Bytes::get_Java_u4((address)&lpc[key + 3]); 1561 } 1562 // Profile switch. 1563 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/key); 1564 // Does this really need a full backedge check (osr)? 1565 address branch_pc = pc; 1566 UPDATE_PC_AND_TOS(skip, -1); 1567 DO_BACKEDGE_CHECKS(skip, branch_pc); 1568 CONTINUE; 1569 } 1570 1571 /* Goto pc whose table entry matches specified key. */ 1572 1573 CASE(_lookupswitch): { 1574 jint* lpc = (jint*)VMalignWordUp(pc+1); 1575 int32_t key = STACK_INT(-1); 1576 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */ 1577 // Remember index. 1578 int index = -1; 1579 int newindex = 0; 1580 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]); 1581 while (--npairs >= 0) { 1582 lpc += 2; 1583 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) { 1584 skip = Bytes::get_Java_u4((address)&lpc[1]); 1585 index = newindex; 1586 break; 1587 } 1588 newindex += 1; 1589 } 1590 // Profile switch. 1591 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/index); 1592 address branch_pc = pc; 1593 UPDATE_PC_AND_TOS(skip, -1); 1594 DO_BACKEDGE_CHECKS(skip, branch_pc); 1595 CONTINUE; 1596 } 1597 1598 CASE(_fcmpl): 1599 CASE(_fcmpg): 1600 { 1601 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2), 1602 STACK_FLOAT(-1), 1603 (opcode == Bytecodes::_fcmpl ? -1 : 1)), 1604 -2); 1605 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1606 } 1607 1608 CASE(_dcmpl): 1609 CASE(_dcmpg): 1610 { 1611 int r = VMdoubleCompare(STACK_DOUBLE(-3), 1612 STACK_DOUBLE(-1), 1613 (opcode == Bytecodes::_dcmpl ? -1 : 1)); 1614 MORE_STACK(-4); // Pop 1615 SET_STACK_INT(r, 0); 1616 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1617 } 1618 1619 CASE(_lcmp): 1620 { 1621 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1)); 1622 MORE_STACK(-4); 1623 SET_STACK_INT(r, 0); 1624 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1625 } 1626 1627 1628 /* Return from a method */ 1629 1630 CASE(_areturn): 1631 CASE(_ireturn): 1632 CASE(_freturn): 1633 { 1634 // Allow a safepoint before returning to frame manager. 1635 SAFEPOINT; 1636 1637 goto handle_return; 1638 } 1639 1640 CASE(_lreturn): 1641 CASE(_dreturn): 1642 { 1643 // Allow a safepoint before returning to frame manager. 1644 SAFEPOINT; 1645 goto handle_return; 1646 } 1647 1648 CASE(_return_register_finalizer): { 1649 1650 oop rcvr = LOCALS_OBJECT(0); 1651 VERIFY_OOP(rcvr); 1652 if (rcvr->klass()->has_finalizer()) { 1653 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception); 1654 } 1655 goto handle_return; 1656 } 1657 CASE(_return): { 1658 1659 // Allow a safepoint before returning to frame manager. 1660 SAFEPOINT; 1661 goto handle_return; 1662 } 1663 1664 /* Array access byte-codes */ 1665 1666 /* Every array access byte-code starts out like this */ 1667 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff); 1668 #define ARRAY_INTRO(arrayOff) \ 1669 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \ 1670 jint index = STACK_INT(arrayOff + 1); \ 1671 char message[jintAsStringSize]; \ 1672 CHECK_NULL(arrObj); \ 1673 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \ 1674 sprintf(message, "%d", index); \ 1675 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \ 1676 message, note_rangeCheck_trap); \ 1677 } 1678 1679 /* 32-bit loads. These handle conversion from < 32-bit types */ 1680 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \ 1681 { \ 1682 ARRAY_INTRO(-2); \ 1683 (void)extra; \ 1684 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \ 1685 -2); \ 1686 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1687 } 1688 1689 /* 64-bit loads */ 1690 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \ 1691 { \ 1692 ARRAY_INTRO(-2); \ 1693 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \ 1694 (void)extra; \ 1695 UPDATE_PC_AND_CONTINUE(1); \ 1696 } 1697 1698 CASE(_iaload): 1699 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0); 1700 CASE(_faload): 1701 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1702 CASE(_aaload): { 1703 ARRAY_INTRO(-2); 1704 SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2); 1705 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1706 } 1707 CASE(_baload): 1708 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0); 1709 CASE(_caload): 1710 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0); 1711 CASE(_saload): 1712 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0); 1713 CASE(_laload): 1714 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0); 1715 CASE(_daload): 1716 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1717 1718 /* 32-bit stores. These handle conversion to < 32-bit types */ 1719 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \ 1720 { \ 1721 ARRAY_INTRO(-3); \ 1722 (void)extra; \ 1723 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1724 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \ 1725 } 1726 1727 /* 64-bit stores */ 1728 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \ 1729 { \ 1730 ARRAY_INTRO(-4); \ 1731 (void)extra; \ 1732 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1733 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \ 1734 } 1735 1736 CASE(_iastore): 1737 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0); 1738 CASE(_fastore): 1739 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1740 /* 1741 * This one looks different because of the assignability check 1742 */ 1743 CASE(_aastore): { 1744 oop rhsObject = STACK_OBJECT(-1); 1745 VERIFY_OOP(rhsObject); 1746 ARRAY_INTRO( -3); 1747 // arrObj, index are set 1748 if (rhsObject != NULL) { 1749 /* Check assignability of rhsObject into arrObj */ 1750 Klass* rhsKlass = rhsObject->klass(); // EBX (subclass) 1751 Klass* elemKlass = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX 1752 // 1753 // Check for compatibilty. This check must not GC!! 1754 // Seems way more expensive now that we must dispatch 1755 // 1756 if (rhsKlass != elemKlass && !rhsKlass->is_subtype_of(elemKlass)) { // ebx->is... 1757 // Decrement counter if subtype check failed. 1758 BI_PROFILE_SUBTYPECHECK_FAILED(rhsKlass); 1759 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "", note_arrayCheck_trap); 1760 } 1761 // Profile checkcast with null_seen and receiver. 1762 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, rhsKlass); 1763 } else { 1764 // Profile checkcast with null_seen and receiver. 1765 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL); 1766 } 1767 ((objArrayOop) arrObj)->obj_at_put(index, rhsObject); 1768 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); 1769 } 1770 CASE(_bastore): 1771 ARRAY_STOREFROM32(T_BYTE, jbyte, "%d", STACK_INT, 0); 1772 CASE(_castore): 1773 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0); 1774 CASE(_sastore): 1775 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0); 1776 CASE(_lastore): 1777 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0); 1778 CASE(_dastore): 1779 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1780 1781 CASE(_arraylength): 1782 { 1783 arrayOop ary = (arrayOop) STACK_OBJECT(-1); 1784 CHECK_NULL(ary); 1785 SET_STACK_INT(ary->length(), -1); 1786 UPDATE_PC_AND_CONTINUE(1); 1787 } 1788 1789 /* monitorenter and monitorexit for locking/unlocking an object */ 1790 1791 CASE(_monitorenter): { 1792 oop lockee = STACK_OBJECT(-1); 1793 // derefing's lockee ought to provoke implicit null check 1794 CHECK_NULL(lockee); 1795 // find a free monitor or one already allocated for this object 1796 // if we find a matching object then we need a new monitor 1797 // since this is recursive enter 1798 BasicObjectLock* limit = istate->monitor_base(); 1799 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1800 BasicObjectLock* entry = NULL; 1801 while (most_recent != limit ) { 1802 if (most_recent->obj() == NULL) entry = most_recent; 1803 else if (most_recent->obj() == lockee) break; 1804 most_recent++; 1805 } 1806 if (entry != NULL) { 1807 entry->set_obj(lockee); 1808 int success = false; 1809 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 1810 1811 markOop mark = lockee->mark(); 1812 intptr_t hash = (intptr_t) markOopDesc::no_hash; 1813 // implies UseBiasedLocking 1814 if (mark->has_bias_pattern()) { 1815 uintptr_t thread_ident; 1816 uintptr_t anticipated_bias_locking_value; 1817 thread_ident = (uintptr_t)istate->thread(); 1818 anticipated_bias_locking_value = 1819 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 1820 ~((uintptr_t) markOopDesc::age_mask_in_place); 1821 1822 if (anticipated_bias_locking_value == 0) { 1823 // already biased towards this thread, nothing to do 1824 if (PrintBiasedLockingStatistics) { 1825 (* BiasedLocking::biased_lock_entry_count_addr())++; 1826 } 1827 success = true; 1828 } 1829 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 1830 // try revoke bias 1831 markOop header = lockee->klass()->prototype_header(); 1832 if (hash != markOopDesc::no_hash) { 1833 header = header->copy_set_hash(hash); 1834 } 1835 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { 1836 if (PrintBiasedLockingStatistics) 1837 (*BiasedLocking::revoked_lock_entry_count_addr())++; 1838 } 1839 } 1840 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { 1841 // try rebias 1842 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); 1843 if (hash != markOopDesc::no_hash) { 1844 new_header = new_header->copy_set_hash(hash); 1845 } 1846 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { 1847 if (PrintBiasedLockingStatistics) 1848 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 1849 } 1850 else { 1851 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1852 } 1853 success = true; 1854 } 1855 else { 1856 // try to bias towards thread in case object is anonymously biased 1857 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 1858 (uintptr_t)markOopDesc::age_mask_in_place | 1859 epoch_mask_in_place)); 1860 if (hash != markOopDesc::no_hash) { 1861 header = header->copy_set_hash(hash); 1862 } 1863 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 1864 // debugging hint 1865 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 1866 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { 1867 if (PrintBiasedLockingStatistics) 1868 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 1869 } 1870 else { 1871 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1872 } 1873 success = true; 1874 } 1875 } 1876 1877 // traditional lightweight locking 1878 if (!success) { 1879 markOop displaced = lockee->mark()->set_unlocked(); 1880 entry->lock()->set_displaced_header(displaced); 1881 bool call_vm = UseHeavyMonitors; 1882 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { 1883 // Is it simple recursive case? 1884 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 1885 entry->lock()->set_displaced_header(NULL); 1886 } else { 1887 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1888 } 1889 } 1890 } 1891 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1892 } else { 1893 istate->set_msg(more_monitors); 1894 UPDATE_PC_AND_RETURN(0); // Re-execute 1895 } 1896 } 1897 1898 CASE(_monitorexit): { 1899 oop lockee = STACK_OBJECT(-1); 1900 CHECK_NULL(lockee); 1901 // derefing's lockee ought to provoke implicit null check 1902 // find our monitor slot 1903 BasicObjectLock* limit = istate->monitor_base(); 1904 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1905 while (most_recent != limit ) { 1906 if ((most_recent)->obj() == lockee) { 1907 BasicLock* lock = most_recent->lock(); 1908 markOop header = lock->displaced_header(); 1909 most_recent->set_obj(NULL); 1910 if (!lockee->mark()->has_bias_pattern()) { 1911 bool call_vm = UseHeavyMonitors; 1912 // If it isn't recursive we either must swap old header or call the runtime 1913 if (header != NULL || call_vm) { 1914 if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { 1915 // restore object for the slow case 1916 most_recent->set_obj(lockee); 1917 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception); 1918 } 1919 } 1920 } 1921 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1922 } 1923 most_recent++; 1924 } 1925 // Need to throw illegal monitor state exception 1926 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception); 1927 ShouldNotReachHere(); 1928 } 1929 1930 /* All of the non-quick opcodes. */ 1931 1932 /* -Set clobbersCpIndex true if the quickened opcode clobbers the 1933 * constant pool index in the instruction. 1934 */ 1935 CASE(_getfield): 1936 CASE(_getstatic): 1937 { 1938 u2 index; 1939 ConstantPoolCacheEntry* cache; 1940 index = Bytes::get_native_u2(pc+1); 1941 1942 // QQQ Need to make this as inlined as possible. Probably need to 1943 // split all the bytecode cases out so c++ compiler has a chance 1944 // for constant prop to fold everything possible away. 1945 1946 cache = cp->entry_at(index); 1947 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 1948 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 1949 handle_exception); 1950 cache = cp->entry_at(index); 1951 } 1952 1953 #ifdef VM_JVMTI 1954 if (_jvmti_interp_events) { 1955 int *count_addr; 1956 oop obj; 1957 // Check to see if a field modification watch has been set 1958 // before we take the time to call into the VM. 1959 count_addr = (int *)JvmtiExport::get_field_access_count_addr(); 1960 if ( *count_addr > 0 ) { 1961 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { 1962 obj = (oop)NULL; 1963 } else { 1964 obj = (oop) STACK_OBJECT(-1); 1965 VERIFY_OOP(obj); 1966 } 1967 CALL_VM(InterpreterRuntime::post_field_access(THREAD, 1968 obj, 1969 cache), 1970 handle_exception); 1971 } 1972 } 1973 #endif /* VM_JVMTI */ 1974 1975 oop obj; 1976 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { 1977 Klass* k = cache->f1_as_klass(); 1978 obj = k->java_mirror(); 1979 MORE_STACK(1); // Assume single slot push 1980 } else { 1981 obj = (oop) STACK_OBJECT(-1); 1982 CHECK_NULL(obj); 1983 } 1984 1985 // 1986 // Now store the result on the stack 1987 // 1988 TosState tos_type = cache->flag_state(); 1989 int field_offset = cache->f2_as_index(); 1990 if (cache->is_volatile()) { 1991 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 1992 OrderAccess::fence(); 1993 } 1994 if (tos_type == atos) { 1995 VERIFY_OOP(obj->obj_field_acquire(field_offset)); 1996 SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1); 1997 } else if (tos_type == itos) { 1998 SET_STACK_INT(obj->int_field_acquire(field_offset), -1); 1999 } else if (tos_type == ltos) { 2000 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0); 2001 MORE_STACK(1); 2002 } else if (tos_type == btos) { 2003 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1); 2004 } else if (tos_type == ctos) { 2005 SET_STACK_INT(obj->char_field_acquire(field_offset), -1); 2006 } else if (tos_type == stos) { 2007 SET_STACK_INT(obj->short_field_acquire(field_offset), -1); 2008 } else if (tos_type == ftos) { 2009 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1); 2010 } else { 2011 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0); 2012 MORE_STACK(1); 2013 } 2014 } else { 2015 if (tos_type == atos) { 2016 VERIFY_OOP(obj->obj_field(field_offset)); 2017 SET_STACK_OBJECT(obj->obj_field(field_offset), -1); 2018 } else if (tos_type == itos) { 2019 SET_STACK_INT(obj->int_field(field_offset), -1); 2020 } else if (tos_type == ltos) { 2021 SET_STACK_LONG(obj->long_field(field_offset), 0); 2022 MORE_STACK(1); 2023 } else if (tos_type == btos) { 2024 SET_STACK_INT(obj->byte_field(field_offset), -1); 2025 } else if (tos_type == ctos) { 2026 SET_STACK_INT(obj->char_field(field_offset), -1); 2027 } else if (tos_type == stos) { 2028 SET_STACK_INT(obj->short_field(field_offset), -1); 2029 } else if (tos_type == ftos) { 2030 SET_STACK_FLOAT(obj->float_field(field_offset), -1); 2031 } else { 2032 SET_STACK_DOUBLE(obj->double_field(field_offset), 0); 2033 MORE_STACK(1); 2034 } 2035 } 2036 2037 UPDATE_PC_AND_CONTINUE(3); 2038 } 2039 2040 CASE(_putfield): 2041 CASE(_putstatic): 2042 { 2043 u2 index = Bytes::get_native_u2(pc+1); 2044 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2045 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2046 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2047 handle_exception); 2048 cache = cp->entry_at(index); 2049 } 2050 2051 #ifdef VM_JVMTI 2052 if (_jvmti_interp_events) { 2053 int *count_addr; 2054 oop obj; 2055 // Check to see if a field modification watch has been set 2056 // before we take the time to call into the VM. 2057 count_addr = (int *)JvmtiExport::get_field_modification_count_addr(); 2058 if ( *count_addr > 0 ) { 2059 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 2060 obj = (oop)NULL; 2061 } 2062 else { 2063 if (cache->is_long() || cache->is_double()) { 2064 obj = (oop) STACK_OBJECT(-3); 2065 } else { 2066 obj = (oop) STACK_OBJECT(-2); 2067 } 2068 VERIFY_OOP(obj); 2069 } 2070 2071 CALL_VM(InterpreterRuntime::post_field_modification(THREAD, 2072 obj, 2073 cache, 2074 (jvalue *)STACK_SLOT(-1)), 2075 handle_exception); 2076 } 2077 } 2078 #endif /* VM_JVMTI */ 2079 2080 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2081 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2082 2083 oop obj; 2084 int count; 2085 TosState tos_type = cache->flag_state(); 2086 2087 count = -1; 2088 if (tos_type == ltos || tos_type == dtos) { 2089 --count; 2090 } 2091 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 2092 Klass* k = cache->f1_as_klass(); 2093 obj = k->java_mirror(); 2094 } else { 2095 --count; 2096 obj = (oop) STACK_OBJECT(count); 2097 CHECK_NULL(obj); 2098 } 2099 2100 // 2101 // Now store the result 2102 // 2103 int field_offset = cache->f2_as_index(); 2104 if (cache->is_volatile()) { 2105 if (tos_type == itos) { 2106 obj->release_int_field_put(field_offset, STACK_INT(-1)); 2107 } else if (tos_type == atos) { 2108 VERIFY_OOP(STACK_OBJECT(-1)); 2109 obj->release_obj_field_put(field_offset, STACK_OBJECT(-1)); 2110 } else if (tos_type == btos) { 2111 obj->release_byte_field_put(field_offset, STACK_INT(-1)); 2112 } else if (tos_type == ltos) { 2113 obj->release_long_field_put(field_offset, STACK_LONG(-1)); 2114 } else if (tos_type == ctos) { 2115 obj->release_char_field_put(field_offset, STACK_INT(-1)); 2116 } else if (tos_type == stos) { 2117 obj->release_short_field_put(field_offset, STACK_INT(-1)); 2118 } else if (tos_type == ftos) { 2119 obj->release_float_field_put(field_offset, STACK_FLOAT(-1)); 2120 } else { 2121 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1)); 2122 } 2123 OrderAccess::storeload(); 2124 } else { 2125 if (tos_type == itos) { 2126 obj->int_field_put(field_offset, STACK_INT(-1)); 2127 } else if (tos_type == atos) { 2128 VERIFY_OOP(STACK_OBJECT(-1)); 2129 obj->obj_field_put(field_offset, STACK_OBJECT(-1)); 2130 } else if (tos_type == btos) { 2131 obj->byte_field_put(field_offset, STACK_INT(-1)); 2132 } else if (tos_type == ltos) { 2133 obj->long_field_put(field_offset, STACK_LONG(-1)); 2134 } else if (tos_type == ctos) { 2135 obj->char_field_put(field_offset, STACK_INT(-1)); 2136 } else if (tos_type == stos) { 2137 obj->short_field_put(field_offset, STACK_INT(-1)); 2138 } else if (tos_type == ftos) { 2139 obj->float_field_put(field_offset, STACK_FLOAT(-1)); 2140 } else { 2141 obj->double_field_put(field_offset, STACK_DOUBLE(-1)); 2142 } 2143 } 2144 2145 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count); 2146 } 2147 2148 CASE(_new): { 2149 u2 index = Bytes::get_Java_u2(pc+1); 2150 ConstantPool* constants = istate->method()->constants(); 2151 if (!constants->tag_at(index).is_unresolved_klass()) { 2152 // Make sure klass is initialized and doesn't have a finalizer 2153 Klass* entry = constants->slot_at(index).get_klass(); 2154 InstanceKlass* ik = InstanceKlass::cast(entry); 2155 if (ik->is_initialized() && ik->can_be_fastpath_allocated() ) { 2156 size_t obj_size = ik->size_helper(); 2157 oop result = NULL; 2158 // If the TLAB isn't pre-zeroed then we'll have to do it 2159 bool need_zero = !ZeroTLAB; 2160 if (UseTLAB) { 2161 result = (oop) THREAD->tlab().allocate(obj_size); 2162 } 2163 // Disable non-TLAB-based fast-path, because profiling requires that all 2164 // allocations go through InterpreterRuntime::_new() if THREAD->tlab().allocate 2165 // returns NULL. 2166 #ifndef CC_INTERP_PROFILE 2167 if (result == NULL) { 2168 need_zero = true; 2169 // Try allocate in shared eden 2170 retry: 2171 HeapWord* compare_to = *Universe::heap()->top_addr(); 2172 HeapWord* new_top = compare_to + obj_size; 2173 if (new_top <= *Universe::heap()->end_addr()) { 2174 if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) { 2175 goto retry; 2176 } 2177 result = (oop) compare_to; 2178 } 2179 } 2180 #endif 2181 if (result != NULL) { 2182 // Initialize object (if nonzero size and need) and then the header 2183 if (need_zero ) { 2184 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize; 2185 obj_size -= sizeof(oopDesc) / oopSize; 2186 if (obj_size > 0 ) { 2187 memset(to_zero, 0, obj_size * HeapWordSize); 2188 } 2189 } 2190 if (UseBiasedLocking) { 2191 result->set_mark(ik->prototype_header()); 2192 } else { 2193 result->set_mark(markOopDesc::prototype()); 2194 } 2195 result->set_klass_gap(0); 2196 result->set_klass(ik); 2197 // Must prevent reordering of stores for object initialization 2198 // with stores that publish the new object. 2199 OrderAccess::storestore(); 2200 SET_STACK_OBJECT(result, 0); 2201 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2202 } 2203 } 2204 } 2205 // Slow case allocation 2206 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index), 2207 handle_exception); 2208 // Must prevent reordering of stores for object initialization 2209 // with stores that publish the new object. 2210 OrderAccess::storestore(); 2211 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2212 THREAD->set_vm_result(NULL); 2213 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2214 } 2215 CASE(_anewarray): { 2216 u2 index = Bytes::get_Java_u2(pc+1); 2217 jint size = STACK_INT(-1); 2218 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size), 2219 handle_exception); 2220 // Must prevent reordering of stores for object initialization 2221 // with stores that publish the new object. 2222 OrderAccess::storestore(); 2223 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2224 THREAD->set_vm_result(NULL); 2225 UPDATE_PC_AND_CONTINUE(3); 2226 } 2227 CASE(_multianewarray): { 2228 jint dims = *(pc+3); 2229 jint size = STACK_INT(-1); 2230 // stack grows down, dimensions are up! 2231 jint *dimarray = 2232 (jint*)&topOfStack[dims * Interpreter::stackElementWords+ 2233 Interpreter::stackElementWords-1]; 2234 //adjust pointer to start of stack element 2235 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray), 2236 handle_exception); 2237 // Must prevent reordering of stores for object initialization 2238 // with stores that publish the new object. 2239 OrderAccess::storestore(); 2240 SET_STACK_OBJECT(THREAD->vm_result(), -dims); 2241 THREAD->set_vm_result(NULL); 2242 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1)); 2243 } 2244 CASE(_checkcast): 2245 if (STACK_OBJECT(-1) != NULL) { 2246 VERIFY_OOP(STACK_OBJECT(-1)); 2247 u2 index = Bytes::get_Java_u2(pc+1); 2248 // Constant pool may have actual klass or unresolved klass. If it is 2249 // unresolved we must resolve it. 2250 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2251 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2252 } 2253 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); 2254 Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx 2255 // 2256 // Check for compatibilty. This check must not GC!! 2257 // Seems way more expensive now that we must dispatch. 2258 // 2259 if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) { 2260 // Decrement counter at checkcast. 2261 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass); 2262 ResourceMark rm(THREAD); 2263 const char* objName = objKlass->external_name(); 2264 const char* klassName = klassOf->external_name(); 2265 char* message = SharedRuntime::generate_class_cast_message( 2266 objName, klassName); 2267 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message, note_classCheck_trap); 2268 } 2269 // Profile checkcast with null_seen and receiver. 2270 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, objKlass); 2271 } else { 2272 // Profile checkcast with null_seen and receiver. 2273 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL); 2274 } 2275 UPDATE_PC_AND_CONTINUE(3); 2276 2277 CASE(_instanceof): 2278 if (STACK_OBJECT(-1) == NULL) { 2279 SET_STACK_INT(0, -1); 2280 // Profile instanceof with null_seen and receiver. 2281 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/true, NULL); 2282 } else { 2283 VERIFY_OOP(STACK_OBJECT(-1)); 2284 u2 index = Bytes::get_Java_u2(pc+1); 2285 // Constant pool may have actual klass or unresolved klass. If it is 2286 // unresolved we must resolve it. 2287 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2288 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2289 } 2290 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); 2291 Klass* objKlass = STACK_OBJECT(-1)->klass(); 2292 // 2293 // Check for compatibilty. This check must not GC!! 2294 // Seems way more expensive now that we must dispatch. 2295 // 2296 if ( objKlass == klassOf || objKlass->is_subtype_of(klassOf)) { 2297 SET_STACK_INT(1, -1); 2298 } else { 2299 SET_STACK_INT(0, -1); 2300 // Decrement counter at checkcast. 2301 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass); 2302 } 2303 // Profile instanceof with null_seen and receiver. 2304 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/false, objKlass); 2305 } 2306 UPDATE_PC_AND_CONTINUE(3); 2307 2308 CASE(_ldc_w): 2309 CASE(_ldc): 2310 { 2311 u2 index; 2312 bool wide = false; 2313 int incr = 2; // frequent case 2314 if (opcode == Bytecodes::_ldc) { 2315 index = pc[1]; 2316 } else { 2317 index = Bytes::get_Java_u2(pc+1); 2318 incr = 3; 2319 wide = true; 2320 } 2321 2322 ConstantPool* constants = METHOD->constants(); 2323 switch (constants->tag_at(index).value()) { 2324 case JVM_CONSTANT_Integer: 2325 SET_STACK_INT(constants->int_at(index), 0); 2326 break; 2327 2328 case JVM_CONSTANT_Float: 2329 SET_STACK_FLOAT(constants->float_at(index), 0); 2330 break; 2331 2332 case JVM_CONSTANT_String: 2333 { 2334 oop result = constants->resolved_references()->obj_at(index); 2335 if (result == NULL) { 2336 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception); 2337 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2338 THREAD->set_vm_result(NULL); 2339 } else { 2340 VERIFY_OOP(result); 2341 SET_STACK_OBJECT(result, 0); 2342 } 2343 break; 2344 } 2345 2346 case JVM_CONSTANT_Class: 2347 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror()); 2348 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0); 2349 break; 2350 2351 case JVM_CONSTANT_UnresolvedClass: 2352 case JVM_CONSTANT_UnresolvedClassInError: 2353 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception); 2354 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2355 THREAD->set_vm_result(NULL); 2356 break; 2357 2358 default: ShouldNotReachHere(); 2359 } 2360 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2361 } 2362 2363 CASE(_ldc2_w): 2364 { 2365 u2 index = Bytes::get_Java_u2(pc+1); 2366 2367 ConstantPool* constants = METHOD->constants(); 2368 switch (constants->tag_at(index).value()) { 2369 2370 case JVM_CONSTANT_Long: 2371 SET_STACK_LONG(constants->long_at(index), 1); 2372 break; 2373 2374 case JVM_CONSTANT_Double: 2375 SET_STACK_DOUBLE(constants->double_at(index), 1); 2376 break; 2377 default: ShouldNotReachHere(); 2378 } 2379 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2); 2380 } 2381 2382 CASE(_fast_aldc_w): 2383 CASE(_fast_aldc): { 2384 u2 index; 2385 int incr; 2386 if (opcode == Bytecodes::_fast_aldc) { 2387 index = pc[1]; 2388 incr = 2; 2389 } else { 2390 index = Bytes::get_native_u2(pc+1); 2391 incr = 3; 2392 } 2393 2394 // We are resolved if the f1 field contains a non-null object (CallSite, etc.) 2395 // This kind of CP cache entry does not need to match the flags byte, because 2396 // there is a 1-1 relation between bytecode type and CP entry type. 2397 ConstantPool* constants = METHOD->constants(); 2398 oop result = constants->resolved_references()->obj_at(index); 2399 if (result == NULL) { 2400 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), 2401 handle_exception); 2402 result = THREAD->vm_result(); 2403 } 2404 2405 VERIFY_OOP(result); 2406 SET_STACK_OBJECT(result, 0); 2407 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2408 } 2409 2410 CASE(_invokedynamic): { 2411 2412 u4 index = Bytes::get_native_u4(pc+1); 2413 ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2414 2415 // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.) 2416 // This kind of CP cache entry does not need to match the flags byte, because 2417 // there is a 1-1 relation between bytecode type and CP entry type. 2418 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2419 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2420 handle_exception); 2421 cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2422 } 2423 2424 Method* method = cache->f1_as_method(); 2425 if (VerifyOops) method->verify(); 2426 2427 if (cache->has_appendix()) { 2428 ConstantPool* constants = METHOD->constants(); 2429 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2430 MORE_STACK(1); 2431 } 2432 2433 istate->set_msg(call_method); 2434 istate->set_callee(method); 2435 istate->set_callee_entry_point(method->from_interpreted_entry()); 2436 istate->set_bcp_advance(5); 2437 2438 // Invokedynamic has got a call counter, just like an invokestatic -> increment! 2439 BI_PROFILE_UPDATE_CALL(); 2440 2441 UPDATE_PC_AND_RETURN(0); // I'll be back... 2442 } 2443 2444 CASE(_invokehandle): { 2445 2446 u2 index = Bytes::get_native_u2(pc+1); 2447 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2448 2449 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2450 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2451 handle_exception); 2452 cache = cp->entry_at(index); 2453 } 2454 2455 Method* method = cache->f1_as_method(); 2456 if (VerifyOops) method->verify(); 2457 2458 if (cache->has_appendix()) { 2459 ConstantPool* constants = METHOD->constants(); 2460 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2461 MORE_STACK(1); 2462 } 2463 2464 istate->set_msg(call_method); 2465 istate->set_callee(method); 2466 istate->set_callee_entry_point(method->from_interpreted_entry()); 2467 istate->set_bcp_advance(3); 2468 2469 // Invokehandle has got a call counter, just like a final call -> increment! 2470 BI_PROFILE_UPDATE_FINALCALL(); 2471 2472 UPDATE_PC_AND_RETURN(0); // I'll be back... 2473 } 2474 2475 CASE(_invokeinterface): { 2476 u2 index = Bytes::get_native_u2(pc+1); 2477 2478 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2479 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2480 2481 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2482 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2483 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2484 handle_exception); 2485 cache = cp->entry_at(index); 2486 } 2487 2488 istate->set_msg(call_method); 2489 2490 // Special case of invokeinterface called for virtual method of 2491 // java.lang.Object. See cpCacheOop.cpp for details. 2492 // This code isn't produced by javac, but could be produced by 2493 // another compliant java compiler. 2494 if (cache->is_forced_virtual()) { 2495 Method* callee; 2496 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2497 if (cache->is_vfinal()) { 2498 callee = cache->f2_as_vfinal_method(); 2499 // Profile 'special case of invokeinterface' final call. 2500 BI_PROFILE_UPDATE_FINALCALL(); 2501 } else { 2502 // Get receiver. 2503 int parms = cache->parameter_size(); 2504 // Same comments as invokevirtual apply here. 2505 oop rcvr = STACK_OBJECT(-parms); 2506 VERIFY_OOP(rcvr); 2507 Klass* rcvrKlass = rcvr->klass(); 2508 callee = (Method*) rcvrKlass->method_at_vtable(cache->f2_as_index()); 2509 // Profile 'special case of invokeinterface' virtual call. 2510 BI_PROFILE_UPDATE_VIRTUALCALL(rcvrKlass); 2511 } 2512 istate->set_callee(callee); 2513 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2514 #ifdef VM_JVMTI 2515 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2516 istate->set_callee_entry_point(callee->interpreter_entry()); 2517 } 2518 #endif /* VM_JVMTI */ 2519 istate->set_bcp_advance(5); 2520 UPDATE_PC_AND_RETURN(0); // I'll be back... 2521 } 2522 2523 // this could definitely be cleaned up QQQ 2524 Method* callee; 2525 Klass* iclass = cache->f1_as_klass(); 2526 // InstanceKlass* interface = (InstanceKlass*) iclass; 2527 // get receiver 2528 int parms = cache->parameter_size(); 2529 oop rcvr = STACK_OBJECT(-parms); 2530 CHECK_NULL(rcvr); 2531 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass(); 2532 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable(); 2533 int i; 2534 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) { 2535 if (ki->interface_klass() == iclass) break; 2536 } 2537 // If the interface isn't found, this class doesn't implement this 2538 // interface. The link resolver checks this but only for the first 2539 // time this interface is called. 2540 if (i == int2->itable_length()) { 2541 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "", note_no_trap); 2542 } 2543 int mindex = cache->f2_as_index(); 2544 itableMethodEntry* im = ki->first_method_entry(rcvr->klass()); 2545 callee = im[mindex].method(); 2546 if (callee == NULL) { 2547 VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "", note_no_trap); 2548 } 2549 2550 // Profile virtual call. 2551 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass()); 2552 2553 istate->set_callee(callee); 2554 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2555 #ifdef VM_JVMTI 2556 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2557 istate->set_callee_entry_point(callee->interpreter_entry()); 2558 } 2559 #endif /* VM_JVMTI */ 2560 istate->set_bcp_advance(5); 2561 UPDATE_PC_AND_RETURN(0); // I'll be back... 2562 } 2563 2564 CASE(_invokevirtual): 2565 CASE(_invokespecial): 2566 CASE(_invokestatic): { 2567 u2 index = Bytes::get_native_u2(pc+1); 2568 2569 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2570 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2571 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2572 2573 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2574 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2575 handle_exception); 2576 cache = cp->entry_at(index); 2577 } 2578 2579 istate->set_msg(call_method); 2580 { 2581 Method* callee; 2582 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) { 2583 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2584 if (cache->is_vfinal()) { 2585 callee = cache->f2_as_vfinal_method(); 2586 // Profile final call. 2587 BI_PROFILE_UPDATE_FINALCALL(); 2588 } else { 2589 // get receiver 2590 int parms = cache->parameter_size(); 2591 // this works but needs a resourcemark and seems to create a vtable on every call: 2592 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index()); 2593 // 2594 // this fails with an assert 2595 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass()); 2596 // but this works 2597 oop rcvr = STACK_OBJECT(-parms); 2598 VERIFY_OOP(rcvr); 2599 Klass* rcvrKlass = rcvr->klass(); 2600 /* 2601 Executing this code in java.lang.String: 2602 public String(char value[]) { 2603 this.count = value.length; 2604 this.value = (char[])value.clone(); 2605 } 2606 2607 a find on rcvr->klass() reports: 2608 {type array char}{type array class} 2609 - klass: {other class} 2610 2611 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure 2612 because rcvr->klass()->is_instance_klass() == 0 2613 However it seems to have a vtable in the right location. Huh? 2614 Because vtables have the same offset for ArrayKlass and InstanceKlass. 2615 */ 2616 callee = (Method*) rcvrKlass->method_at_vtable(cache->f2_as_index()); 2617 // Profile virtual call. 2618 BI_PROFILE_UPDATE_VIRTUALCALL(rcvrKlass); 2619 } 2620 } else { 2621 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) { 2622 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2623 } 2624 callee = cache->f1_as_method(); 2625 2626 // Profile call. 2627 BI_PROFILE_UPDATE_CALL(); 2628 } 2629 2630 istate->set_callee(callee); 2631 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2632 #ifdef VM_JVMTI 2633 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2634 istate->set_callee_entry_point(callee->interpreter_entry()); 2635 } 2636 #endif /* VM_JVMTI */ 2637 istate->set_bcp_advance(3); 2638 UPDATE_PC_AND_RETURN(0); // I'll be back... 2639 } 2640 } 2641 2642 /* Allocate memory for a new java object. */ 2643 2644 CASE(_newarray): { 2645 BasicType atype = (BasicType) *(pc+1); 2646 jint size = STACK_INT(-1); 2647 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size), 2648 handle_exception); 2649 // Must prevent reordering of stores for object initialization 2650 // with stores that publish the new object. 2651 OrderAccess::storestore(); 2652 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2653 THREAD->set_vm_result(NULL); 2654 2655 UPDATE_PC_AND_CONTINUE(2); 2656 } 2657 2658 /* Throw an exception. */ 2659 2660 CASE(_athrow): { 2661 oop except_oop = STACK_OBJECT(-1); 2662 CHECK_NULL(except_oop); 2663 // set pending_exception so we use common code 2664 THREAD->set_pending_exception(except_oop, NULL, 0); 2665 goto handle_exception; 2666 } 2667 2668 /* goto and jsr. They are exactly the same except jsr pushes 2669 * the address of the next instruction first. 2670 */ 2671 2672 CASE(_jsr): { 2673 /* push bytecode index on stack */ 2674 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0); 2675 MORE_STACK(1); 2676 /* FALL THROUGH */ 2677 } 2678 2679 CASE(_goto): 2680 { 2681 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1); 2682 // Profile jump. 2683 BI_PROFILE_UPDATE_JUMP(); 2684 address branch_pc = pc; 2685 UPDATE_PC(offset); 2686 DO_BACKEDGE_CHECKS(offset, branch_pc); 2687 CONTINUE; 2688 } 2689 2690 CASE(_jsr_w): { 2691 /* push return address on the stack */ 2692 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0); 2693 MORE_STACK(1); 2694 /* FALL THROUGH */ 2695 } 2696 2697 CASE(_goto_w): 2698 { 2699 int32_t offset = Bytes::get_Java_u4(pc + 1); 2700 // Profile jump. 2701 BI_PROFILE_UPDATE_JUMP(); 2702 address branch_pc = pc; 2703 UPDATE_PC(offset); 2704 DO_BACKEDGE_CHECKS(offset, branch_pc); 2705 CONTINUE; 2706 } 2707 2708 /* return from a jsr or jsr_w */ 2709 2710 CASE(_ret): { 2711 // Profile ret. 2712 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(pc[1])))); 2713 // Now, update the pc. 2714 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1])); 2715 UPDATE_PC_AND_CONTINUE(0); 2716 } 2717 2718 /* debugger breakpoint */ 2719 2720 CASE(_breakpoint): { 2721 Bytecodes::Code original_bytecode; 2722 DECACHE_STATE(); 2723 SET_LAST_JAVA_FRAME(); 2724 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD, 2725 METHOD, pc); 2726 RESET_LAST_JAVA_FRAME(); 2727 CACHE_STATE(); 2728 if (THREAD->has_pending_exception()) goto handle_exception; 2729 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc), 2730 handle_exception); 2731 2732 opcode = (jubyte)original_bytecode; 2733 goto opcode_switch; 2734 } 2735 2736 DEFAULT: 2737 fatal("Unimplemented opcode %d = %s", opcode, 2738 Bytecodes::name((Bytecodes::Code)opcode)); 2739 goto finish; 2740 2741 } /* switch(opc) */ 2742 2743 2744 #ifdef USELABELS 2745 check_for_exception: 2746 #endif 2747 { 2748 if (!THREAD->has_pending_exception()) { 2749 CONTINUE; 2750 } 2751 /* We will be gcsafe soon, so flush our state. */ 2752 DECACHE_PC(); 2753 goto handle_exception; 2754 } 2755 do_continue: ; 2756 2757 } /* while (1) interpreter loop */ 2758 2759 2760 // An exception exists in the thread state see whether this activation can handle it 2761 handle_exception: { 2762 2763 HandleMarkCleaner __hmc(THREAD); 2764 Handle except_oop(THREAD, THREAD->pending_exception()); 2765 // Prevent any subsequent HandleMarkCleaner in the VM 2766 // from freeing the except_oop handle. 2767 HandleMark __hm(THREAD); 2768 2769 THREAD->clear_pending_exception(); 2770 assert(except_oop(), "No exception to process"); 2771 intptr_t continuation_bci; 2772 // expression stack is emptied 2773 topOfStack = istate->stack_base() - Interpreter::stackElementWords; 2774 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()), 2775 handle_exception); 2776 2777 except_oop = THREAD->vm_result(); 2778 THREAD->set_vm_result(NULL); 2779 if (continuation_bci >= 0) { 2780 // Place exception on top of stack 2781 SET_STACK_OBJECT(except_oop(), 0); 2782 MORE_STACK(1); 2783 pc = METHOD->code_base() + continuation_bci; 2784 if (log_is_enabled(Info, exceptions)) { 2785 ResourceMark rm(THREAD); 2786 stringStream tempst; 2787 tempst.print("interpreter method <%s>\n" 2788 " at bci %d, continuing at %d for thread " INTPTR_FORMAT, 2789 METHOD->print_value_string(), 2790 (int)(istate->bcp() - METHOD->code_base()), 2791 (int)continuation_bci, p2i(THREAD)); 2792 Exceptions::log_exception(except_oop, tempst); 2793 } 2794 // for AbortVMOnException flag 2795 Exceptions::debug_check_abort(except_oop); 2796 2797 // Update profiling data. 2798 BI_PROFILE_ALIGN_TO_CURRENT_BCI(); 2799 goto run; 2800 } 2801 if (log_is_enabled(Info, exceptions)) { 2802 ResourceMark rm; 2803 stringStream tempst; 2804 tempst.print("interpreter method <%s>\n" 2805 " at bci %d, unwinding for thread " INTPTR_FORMAT, 2806 METHOD->print_value_string(), 2807 (int)(istate->bcp() - METHOD->code_base()), 2808 p2i(THREAD)); 2809 Exceptions::log_exception(except_oop, tempst); 2810 } 2811 // for AbortVMOnException flag 2812 Exceptions::debug_check_abort(except_oop); 2813 2814 // No handler in this activation, unwind and try again 2815 THREAD->set_pending_exception(except_oop(), NULL, 0); 2816 goto handle_return; 2817 } // handle_exception: 2818 2819 // Return from an interpreter invocation with the result of the interpretation 2820 // on the top of the Java Stack (or a pending exception) 2821 2822 handle_Pop_Frame: { 2823 2824 // We don't really do anything special here except we must be aware 2825 // that we can get here without ever locking the method (if sync). 2826 // Also we skip the notification of the exit. 2827 2828 istate->set_msg(popping_frame); 2829 // Clear pending so while the pop is in process 2830 // we don't start another one if a call_vm is done. 2831 THREAD->clr_pop_frame_pending(); 2832 // Let interpreter (only) see the we're in the process of popping a frame 2833 THREAD->set_pop_frame_in_process(); 2834 2835 goto handle_return; 2836 2837 } // handle_Pop_Frame 2838 2839 // ForceEarlyReturn ends a method, and returns to the caller with a return value 2840 // given by the invoker of the early return. 2841 handle_Early_Return: { 2842 2843 istate->set_msg(early_return); 2844 2845 // Clear expression stack. 2846 topOfStack = istate->stack_base() - Interpreter::stackElementWords; 2847 2848 JvmtiThreadState *ts = THREAD->jvmti_thread_state(); 2849 2850 // Push the value to be returned. 2851 switch (istate->method()->result_type()) { 2852 case T_BOOLEAN: 2853 case T_SHORT: 2854 case T_BYTE: 2855 case T_CHAR: 2856 case T_INT: 2857 SET_STACK_INT(ts->earlyret_value().i, 0); 2858 MORE_STACK(1); 2859 break; 2860 case T_LONG: 2861 SET_STACK_LONG(ts->earlyret_value().j, 1); 2862 MORE_STACK(2); 2863 break; 2864 case T_FLOAT: 2865 SET_STACK_FLOAT(ts->earlyret_value().f, 0); 2866 MORE_STACK(1); 2867 break; 2868 case T_DOUBLE: 2869 SET_STACK_DOUBLE(ts->earlyret_value().d, 1); 2870 MORE_STACK(2); 2871 break; 2872 case T_ARRAY: 2873 case T_OBJECT: 2874 SET_STACK_OBJECT(ts->earlyret_oop(), 0); 2875 MORE_STACK(1); 2876 break; 2877 } 2878 2879 ts->clr_earlyret_value(); 2880 ts->set_earlyret_oop(NULL); 2881 ts->clr_earlyret_pending(); 2882 2883 // Fall through to handle_return. 2884 2885 } // handle_Early_Return 2886 2887 handle_return: { 2888 // A storestore barrier is required to order initialization of 2889 // final fields with publishing the reference to the object that 2890 // holds the field. Without the barrier the value of final fields 2891 // can be observed to change. 2892 OrderAccess::storestore(); 2893 2894 DECACHE_STATE(); 2895 2896 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return; 2897 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame; 2898 Handle original_exception(THREAD, THREAD->pending_exception()); 2899 Handle illegal_state_oop(THREAD, NULL); 2900 2901 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner 2902 // in any following VM entries from freeing our live handles, but illegal_state_oop 2903 // isn't really allocated yet and so doesn't become live until later and 2904 // in unpredicatable places. Instead we must protect the places where we enter the 2905 // VM. It would be much simpler (and safer) if we could allocate a real handle with 2906 // a NULL oop in it and then overwrite the oop later as needed. This isn't 2907 // unfortunately isn't possible. 2908 2909 THREAD->clear_pending_exception(); 2910 2911 // 2912 // As far as we are concerned we have returned. If we have a pending exception 2913 // that will be returned as this invocation's result. However if we get any 2914 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions 2915 // will be our final result (i.e. monitor exception trumps a pending exception). 2916 // 2917 2918 // If we never locked the method (or really passed the point where we would have), 2919 // there is no need to unlock it (or look for other monitors), since that 2920 // could not have happened. 2921 2922 if (THREAD->do_not_unlock()) { 2923 2924 // Never locked, reset the flag now because obviously any caller must 2925 // have passed their point of locking for us to have gotten here. 2926 2927 THREAD->clr_do_not_unlock(); 2928 } else { 2929 // At this point we consider that we have returned. We now check that the 2930 // locks were properly block structured. If we find that they were not 2931 // used properly we will return with an illegal monitor exception. 2932 // The exception is checked by the caller not the callee since this 2933 // checking is considered to be part of the invocation and therefore 2934 // in the callers scope (JVM spec 8.13). 2935 // 2936 // Another weird thing to watch for is if the method was locked 2937 // recursively and then not exited properly. This means we must 2938 // examine all the entries in reverse time(and stack) order and 2939 // unlock as we find them. If we find the method monitor before 2940 // we are at the initial entry then we should throw an exception. 2941 // It is not clear the template based interpreter does this 2942 // correctly 2943 2944 BasicObjectLock* base = istate->monitor_base(); 2945 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base(); 2946 bool method_unlock_needed = METHOD->is_synchronized(); 2947 // We know the initial monitor was used for the method don't check that 2948 // slot in the loop 2949 if (method_unlock_needed) base--; 2950 2951 // Check all the monitors to see they are unlocked. Install exception if found to be locked. 2952 while (end < base) { 2953 oop lockee = end->obj(); 2954 if (lockee != NULL) { 2955 BasicLock* lock = end->lock(); 2956 markOop header = lock->displaced_header(); 2957 end->set_obj(NULL); 2958 2959 if (!lockee->mark()->has_bias_pattern()) { 2960 // If it isn't recursive we either must swap old header or call the runtime 2961 if (header != NULL) { 2962 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { 2963 // restore object for the slow case 2964 end->set_obj(lockee); 2965 { 2966 // Prevent any HandleMarkCleaner from freeing our live handles 2967 HandleMark __hm(THREAD); 2968 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end)); 2969 } 2970 } 2971 } 2972 } 2973 // One error is plenty 2974 if (illegal_state_oop() == NULL && !suppress_error) { 2975 { 2976 // Prevent any HandleMarkCleaner from freeing our live handles 2977 HandleMark __hm(THREAD); 2978 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 2979 } 2980 assert(THREAD->has_pending_exception(), "Lost our exception!"); 2981 illegal_state_oop = THREAD->pending_exception(); 2982 THREAD->clear_pending_exception(); 2983 } 2984 } 2985 end++; 2986 } 2987 // Unlock the method if needed 2988 if (method_unlock_needed) { 2989 if (base->obj() == NULL) { 2990 // The method is already unlocked this is not good. 2991 if (illegal_state_oop() == NULL && !suppress_error) { 2992 { 2993 // Prevent any HandleMarkCleaner from freeing our live handles 2994 HandleMark __hm(THREAD); 2995 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 2996 } 2997 assert(THREAD->has_pending_exception(), "Lost our exception!"); 2998 illegal_state_oop = THREAD->pending_exception(); 2999 THREAD->clear_pending_exception(); 3000 } 3001 } else { 3002 // 3003 // The initial monitor is always used for the method 3004 // However if that slot is no longer the oop for the method it was unlocked 3005 // and reused by something that wasn't unlocked! 3006 // 3007 // deopt can come in with rcvr dead because c2 knows 3008 // its value is preserved in the monitor. So we can't use locals[0] at all 3009 // and must use first monitor slot. 3010 // 3011 oop rcvr = base->obj(); 3012 if (rcvr == NULL) { 3013 if (!suppress_error) { 3014 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap); 3015 illegal_state_oop = THREAD->pending_exception(); 3016 THREAD->clear_pending_exception(); 3017 } 3018 } else if (UseHeavyMonitors) { 3019 { 3020 // Prevent any HandleMarkCleaner from freeing our live handles. 3021 HandleMark __hm(THREAD); 3022 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 3023 } 3024 if (THREAD->has_pending_exception()) { 3025 if (!suppress_error) illegal_state_oop = THREAD->pending_exception(); 3026 THREAD->clear_pending_exception(); 3027 } 3028 } else { 3029 BasicLock* lock = base->lock(); 3030 markOop header = lock->displaced_header(); 3031 base->set_obj(NULL); 3032 3033 if (!rcvr->mark()->has_bias_pattern()) { 3034 base->set_obj(NULL); 3035 // If it isn't recursive we either must swap old header or call the runtime 3036 if (header != NULL) { 3037 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) { 3038 // restore object for the slow case 3039 base->set_obj(rcvr); 3040 { 3041 // Prevent any HandleMarkCleaner from freeing our live handles 3042 HandleMark __hm(THREAD); 3043 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 3044 } 3045 if (THREAD->has_pending_exception()) { 3046 if (!suppress_error) illegal_state_oop = THREAD->pending_exception(); 3047 THREAD->clear_pending_exception(); 3048 } 3049 } 3050 } 3051 } 3052 } 3053 } 3054 } 3055 } 3056 // Clear the do_not_unlock flag now. 3057 THREAD->clr_do_not_unlock(); 3058 3059 // 3060 // Notify jvmti/jvmdi 3061 // 3062 // NOTE: we do not notify a method_exit if we have a pending exception, 3063 // including an exception we generate for unlocking checks. In the former 3064 // case, JVMDI has already been notified by our call for the exception handler 3065 // and in both cases as far as JVMDI is concerned we have already returned. 3066 // If we notify it again JVMDI will be all confused about how many frames 3067 // are still on the stack (4340444). 3068 // 3069 // NOTE Further! It turns out the the JVMTI spec in fact expects to see 3070 // method_exit events whenever we leave an activation unless it was done 3071 // for popframe. This is nothing like jvmdi. However we are passing the 3072 // tests at the moment (apparently because they are jvmdi based) so rather 3073 // than change this code and possibly fail tests we will leave it alone 3074 // (with this note) in anticipation of changing the vm and the tests 3075 // simultaneously. 3076 3077 3078 // 3079 suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL; 3080 3081 3082 3083 #ifdef VM_JVMTI 3084 if (_jvmti_interp_events) { 3085 // Whenever JVMTI puts a thread in interp_only_mode, method 3086 // entry/exit events are sent for that thread to track stack depth. 3087 if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) { 3088 { 3089 // Prevent any HandleMarkCleaner from freeing our live handles 3090 HandleMark __hm(THREAD); 3091 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD)); 3092 } 3093 } 3094 } 3095 #endif /* VM_JVMTI */ 3096 3097 // 3098 // See if we are returning any exception 3099 // A pending exception that was pending prior to a possible popping frame 3100 // overrides the popping frame. 3101 // 3102 assert(!suppress_error || (suppress_error && illegal_state_oop() == NULL), "Error was not suppressed"); 3103 if (illegal_state_oop() != NULL || original_exception() != NULL) { 3104 // Inform the frame manager we have no result. 3105 istate->set_msg(throwing_exception); 3106 if (illegal_state_oop() != NULL) 3107 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0); 3108 else 3109 THREAD->set_pending_exception(original_exception(), NULL, 0); 3110 UPDATE_PC_AND_RETURN(0); 3111 } 3112 3113 if (istate->msg() == popping_frame) { 3114 // Make it simpler on the assembly code and set the message for the frame pop. 3115 // returns 3116 if (istate->prev() == NULL) { 3117 // We must be returning to a deoptimized frame (because popframe only happens between 3118 // two interpreted frames). We need to save the current arguments in C heap so that 3119 // the deoptimized frame when it restarts can copy the arguments to its expression 3120 // stack and re-execute the call. We also have to notify deoptimization that this 3121 // has occurred and to pick the preserved args copy them to the deoptimized frame's 3122 // java expression stack. Yuck. 3123 // 3124 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize), 3125 LOCALS_SLOT(METHOD->size_of_parameters() - 1)); 3126 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit); 3127 } 3128 } else { 3129 istate->set_msg(return_from_method); 3130 } 3131 3132 // Normal return 3133 // Advance the pc and return to frame manager 3134 UPDATE_PC_AND_RETURN(1); 3135 } /* handle_return: */ 3136 3137 // This is really a fatal error return 3138 3139 finish: 3140 DECACHE_TOS(); 3141 DECACHE_PC(); 3142 3143 return; 3144 } 3145 3146 /* 3147 * All the code following this point is only produced once and is not present 3148 * in the JVMTI version of the interpreter 3149 */ 3150 3151 #ifndef VM_JVMTI 3152 3153 // This constructor should only be used to contruct the object to signal 3154 // interpreter initialization. All other instances should be created by 3155 // the frame manager. 3156 BytecodeInterpreter::BytecodeInterpreter(messages msg) { 3157 if (msg != initialize) ShouldNotReachHere(); 3158 _msg = msg; 3159 _self_link = this; 3160 _prev_link = NULL; 3161 } 3162 3163 // Inline static functions for Java Stack and Local manipulation 3164 3165 // The implementations are platform dependent. We have to worry about alignment 3166 // issues on some machines which can change on the same platform depending on 3167 // whether it is an LP64 machine also. 3168 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) { 3169 return (address) tos[Interpreter::expr_index_at(-offset)]; 3170 } 3171 3172 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) { 3173 return *((jint*) &tos[Interpreter::expr_index_at(-offset)]); 3174 } 3175 3176 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) { 3177 return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]); 3178 } 3179 3180 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) { 3181 return cast_to_oop(tos [Interpreter::expr_index_at(-offset)]); 3182 } 3183 3184 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) { 3185 return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d; 3186 } 3187 3188 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) { 3189 return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l; 3190 } 3191 3192 // only used for value types 3193 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value, 3194 int offset) { 3195 *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3196 } 3197 3198 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value, 3199 int offset) { 3200 *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3201 } 3202 3203 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value, 3204 int offset) { 3205 *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3206 } 3207 3208 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value, 3209 int offset) { 3210 *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3211 } 3212 3213 // needs to be platform dep for the 32 bit platforms. 3214 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value, 3215 int offset) { 3216 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value; 3217 } 3218 3219 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos, 3220 address addr, int offset) { 3221 (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = 3222 ((VMJavaVal64*)addr)->d); 3223 } 3224 3225 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value, 3226 int offset) { 3227 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3228 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value; 3229 } 3230 3231 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos, 3232 address addr, int offset) { 3233 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3234 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = 3235 ((VMJavaVal64*)addr)->l; 3236 } 3237 3238 // Locals 3239 3240 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) { 3241 return (address)locals[Interpreter::local_index_at(-offset)]; 3242 } 3243 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) { 3244 return (jint)locals[Interpreter::local_index_at(-offset)]; 3245 } 3246 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) { 3247 return (jfloat)locals[Interpreter::local_index_at(-offset)]; 3248 } 3249 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) { 3250 return cast_to_oop(locals[Interpreter::local_index_at(-offset)]); 3251 } 3252 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) { 3253 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d; 3254 } 3255 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) { 3256 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l; 3257 } 3258 3259 // Returns the address of locals value. 3260 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) { 3261 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3262 } 3263 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) { 3264 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3265 } 3266 3267 // Used for local value or returnAddress 3268 void BytecodeInterpreter::set_locals_slot(intptr_t *locals, 3269 address value, int offset) { 3270 *((address*)&locals[Interpreter::local_index_at(-offset)]) = value; 3271 } 3272 void BytecodeInterpreter::set_locals_int(intptr_t *locals, 3273 jint value, int offset) { 3274 *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value; 3275 } 3276 void BytecodeInterpreter::set_locals_float(intptr_t *locals, 3277 jfloat value, int offset) { 3278 *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value; 3279 } 3280 void BytecodeInterpreter::set_locals_object(intptr_t *locals, 3281 oop value, int offset) { 3282 *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value; 3283 } 3284 void BytecodeInterpreter::set_locals_double(intptr_t *locals, 3285 jdouble value, int offset) { 3286 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value; 3287 } 3288 void BytecodeInterpreter::set_locals_long(intptr_t *locals, 3289 jlong value, int offset) { 3290 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value; 3291 } 3292 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals, 3293 address addr, int offset) { 3294 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d; 3295 } 3296 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals, 3297 address addr, int offset) { 3298 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l; 3299 } 3300 3301 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset, 3302 intptr_t* locals, int locals_offset) { 3303 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)]; 3304 locals[Interpreter::local_index_at(-locals_offset)] = value; 3305 } 3306 3307 3308 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset, 3309 int to_offset) { 3310 tos[Interpreter::expr_index_at(-to_offset)] = 3311 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)]; 3312 } 3313 3314 void BytecodeInterpreter::dup(intptr_t *tos) { 3315 copy_stack_slot(tos, -1, 0); 3316 } 3317 void BytecodeInterpreter::dup2(intptr_t *tos) { 3318 copy_stack_slot(tos, -2, 0); 3319 copy_stack_slot(tos, -1, 1); 3320 } 3321 3322 void BytecodeInterpreter::dup_x1(intptr_t *tos) { 3323 /* insert top word two down */ 3324 copy_stack_slot(tos, -1, 0); 3325 copy_stack_slot(tos, -2, -1); 3326 copy_stack_slot(tos, 0, -2); 3327 } 3328 3329 void BytecodeInterpreter::dup_x2(intptr_t *tos) { 3330 /* insert top word three down */ 3331 copy_stack_slot(tos, -1, 0); 3332 copy_stack_slot(tos, -2, -1); 3333 copy_stack_slot(tos, -3, -2); 3334 copy_stack_slot(tos, 0, -3); 3335 } 3336 void BytecodeInterpreter::dup2_x1(intptr_t *tos) { 3337 /* insert top 2 slots three down */ 3338 copy_stack_slot(tos, -1, 1); 3339 copy_stack_slot(tos, -2, 0); 3340 copy_stack_slot(tos, -3, -1); 3341 copy_stack_slot(tos, 1, -2); 3342 copy_stack_slot(tos, 0, -3); 3343 } 3344 void BytecodeInterpreter::dup2_x2(intptr_t *tos) { 3345 /* insert top 2 slots four down */ 3346 copy_stack_slot(tos, -1, 1); 3347 copy_stack_slot(tos, -2, 0); 3348 copy_stack_slot(tos, -3, -1); 3349 copy_stack_slot(tos, -4, -2); 3350 copy_stack_slot(tos, 1, -3); 3351 copy_stack_slot(tos, 0, -4); 3352 } 3353 3354 3355 void BytecodeInterpreter::swap(intptr_t *tos) { 3356 // swap top two elements 3357 intptr_t val = tos[Interpreter::expr_index_at(1)]; 3358 // Copy -2 entry to -1 3359 copy_stack_slot(tos, -2, -1); 3360 // Store saved -1 entry into -2 3361 tos[Interpreter::expr_index_at(2)] = val; 3362 } 3363 // -------------------------------------------------------------------------------- 3364 // Non-product code 3365 #ifndef PRODUCT 3366 3367 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) { 3368 switch (msg) { 3369 case BytecodeInterpreter::no_request: return("no_request"); 3370 case BytecodeInterpreter::initialize: return("initialize"); 3371 // status message to C++ interpreter 3372 case BytecodeInterpreter::method_entry: return("method_entry"); 3373 case BytecodeInterpreter::method_resume: return("method_resume"); 3374 case BytecodeInterpreter::got_monitors: return("got_monitors"); 3375 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception"); 3376 // requests to frame manager from C++ interpreter 3377 case BytecodeInterpreter::call_method: return("call_method"); 3378 case BytecodeInterpreter::return_from_method: return("return_from_method"); 3379 case BytecodeInterpreter::more_monitors: return("more_monitors"); 3380 case BytecodeInterpreter::throwing_exception: return("throwing_exception"); 3381 case BytecodeInterpreter::popping_frame: return("popping_frame"); 3382 case BytecodeInterpreter::do_osr: return("do_osr"); 3383 // deopt 3384 case BytecodeInterpreter::deopt_resume: return("deopt_resume"); 3385 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2"); 3386 default: return("BAD MSG"); 3387 } 3388 } 3389 void 3390 BytecodeInterpreter::print() { 3391 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread); 3392 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp); 3393 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals); 3394 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants); 3395 { 3396 ResourceMark rm; 3397 char *method_name = _method->name_and_sig_as_C_string(); 3398 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name); 3399 } 3400 tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx); 3401 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack); 3402 tty->print_cr("msg: %s", C_msg(this->_msg)); 3403 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee); 3404 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point); 3405 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance); 3406 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf); 3407 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry); 3408 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link); 3409 tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) p2i(this->_oop_temp)); 3410 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base); 3411 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit); 3412 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base); 3413 #ifdef SPARC 3414 tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc); 3415 tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom); 3416 tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult); 3417 tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult); 3418 #endif 3419 #if !defined(ZERO) && defined(PPC) 3420 tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp); 3421 #endif // !ZERO 3422 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link); 3423 } 3424 3425 extern "C" { 3426 void PI(uintptr_t arg) { 3427 ((BytecodeInterpreter*)arg)->print(); 3428 } 3429 } 3430 #endif // PRODUCT 3431 3432 #endif // JVMTI 3433 #endif // CC_INTERP