1 /* 2 * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "classfile/vmSymbols.hpp" 27 #include "gc/shared/collectedHeap.hpp" 28 #include "interpreter/bytecodeHistogram.hpp" 29 #include "interpreter/bytecodeInterpreter.hpp" 30 #include "interpreter/bytecodeInterpreter.inline.hpp" 31 #include "interpreter/bytecodeInterpreterProfiling.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/interpreterRuntime.hpp" 34 #include "logging/log.hpp" 35 #include "memory/resourceArea.hpp" 36 #include "oops/methodCounters.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "oops/objArrayOop.inline.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "prims/jvmtiExport.hpp" 41 #include "prims/jvmtiThreadState.hpp" 42 #include "runtime/access.inline.hpp" 43 #include "runtime/atomic.hpp" 44 #include "runtime/biasedLocking.hpp" 45 #include "runtime/frame.inline.hpp" 46 #include "runtime/handles.inline.hpp" 47 #include "runtime/interfaceSupport.hpp" 48 #include "runtime/orderAccess.inline.hpp" 49 #include "runtime/sharedRuntime.hpp" 50 #include "runtime/threadCritical.hpp" 51 #include "utilities/exceptions.hpp" 52 53 // no precompiled headers 54 #ifdef CC_INTERP 55 56 /* 57 * USELABELS - If using GCC, then use labels for the opcode dispatching 58 * rather -then a switch statement. This improves performance because it 59 * gives us the opportunity to have the instructions that calculate the 60 * next opcode to jump to be intermixed with the rest of the instructions 61 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro). 62 */ 63 #undef USELABELS 64 #ifdef __GNUC__ 65 /* 66 ASSERT signifies debugging. It is much easier to step thru bytecodes if we 67 don't use the computed goto approach. 68 */ 69 #ifndef ASSERT 70 #define USELABELS 71 #endif 72 #endif 73 74 #undef CASE 75 #ifdef USELABELS 76 #define CASE(opcode) opc ## opcode 77 #define DEFAULT opc_default 78 #else 79 #define CASE(opcode) case Bytecodes:: opcode 80 #define DEFAULT default 81 #endif 82 83 /* 84 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next 85 * opcode before going back to the top of the while loop, rather then having 86 * the top of the while loop handle it. This provides a better opportunity 87 * for instruction scheduling. Some compilers just do this prefetch 88 * automatically. Some actually end up with worse performance if you 89 * force the prefetch. Solaris gcc seems to do better, but cc does worse. 90 */ 91 #undef PREFETCH_OPCCODE 92 #define PREFETCH_OPCCODE 93 94 /* 95 Interpreter safepoint: it is expected that the interpreter will have no live 96 handles of its own creation live at an interpreter safepoint. Therefore we 97 run a HandleMarkCleaner and trash all handles allocated in the call chain 98 since the JavaCalls::call_helper invocation that initiated the chain. 99 There really shouldn't be any handles remaining to trash but this is cheap 100 in relation to a safepoint. 101 */ 102 #define SAFEPOINT \ 103 if ( SafepointSynchronize::is_synchronizing()) { \ 104 { \ 105 /* zap freed handles rather than GC'ing them */ \ 106 HandleMarkCleaner __hmc(THREAD); \ 107 } \ 108 CALL_VM(SafepointSynchronize::block(THREAD), handle_exception); \ 109 } 110 111 /* 112 * VM_JAVA_ERROR - Macro for throwing a java exception from 113 * the interpreter loop. Should really be a CALL_VM but there 114 * is no entry point to do the transition to vm so we just 115 * do it by hand here. 116 */ 117 #define VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \ 118 DECACHE_STATE(); \ 119 SET_LAST_JAVA_FRAME(); \ 120 { \ 121 InterpreterRuntime::note_a_trap(THREAD, istate->method(), BCI()); \ 122 ThreadInVMfromJava trans(THREAD); \ 123 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \ 124 } \ 125 RESET_LAST_JAVA_FRAME(); \ 126 CACHE_STATE(); 127 128 // Normal throw of a java error. 129 #define VM_JAVA_ERROR(name, msg, note_a_trap) \ 130 VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \ 131 goto handle_exception; 132 133 #ifdef PRODUCT 134 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) 135 #else 136 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \ 137 { \ 138 BytecodeCounter::_counter_value++; \ 139 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \ 140 if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \ 141 if (TraceBytecodes) { \ 142 CALL_VM((void)InterpreterRuntime::trace_bytecode(THREAD, 0, \ 143 topOfStack[Interpreter::expr_index_at(1)], \ 144 topOfStack[Interpreter::expr_index_at(2)]), \ 145 handle_exception); \ 146 } \ 147 } 148 #endif 149 150 #undef DEBUGGER_SINGLE_STEP_NOTIFY 151 #ifdef VM_JVMTI 152 /* NOTE: (kbr) This macro must be called AFTER the PC has been 153 incremented. JvmtiExport::at_single_stepping_point() may cause a 154 breakpoint opcode to get inserted at the current PC to allow the 155 debugger to coalesce single-step events. 156 157 As a result if we call at_single_stepping_point() we refetch opcode 158 to get the current opcode. This will override any other prefetching 159 that might have occurred. 160 */ 161 #define DEBUGGER_SINGLE_STEP_NOTIFY() \ 162 { \ 163 if (_jvmti_interp_events) { \ 164 if (JvmtiExport::should_post_single_step()) { \ 165 DECACHE_STATE(); \ 166 SET_LAST_JAVA_FRAME(); \ 167 ThreadInVMfromJava trans(THREAD); \ 168 JvmtiExport::at_single_stepping_point(THREAD, \ 169 istate->method(), \ 170 pc); \ 171 RESET_LAST_JAVA_FRAME(); \ 172 CACHE_STATE(); \ 173 if (THREAD->pop_frame_pending() && \ 174 !THREAD->pop_frame_in_process()) { \ 175 goto handle_Pop_Frame; \ 176 } \ 177 if (THREAD->jvmti_thread_state() && \ 178 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ 179 goto handle_Early_Return; \ 180 } \ 181 opcode = *pc; \ 182 } \ 183 } \ 184 } 185 #else 186 #define DEBUGGER_SINGLE_STEP_NOTIFY() 187 #endif 188 189 /* 190 * CONTINUE - Macro for executing the next opcode. 191 */ 192 #undef CONTINUE 193 #ifdef USELABELS 194 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an 195 // initialization (which is is the initialization of the table pointer...) 196 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode] 197 #define CONTINUE { \ 198 opcode = *pc; \ 199 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 200 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 201 DISPATCH(opcode); \ 202 } 203 #else 204 #ifdef PREFETCH_OPCCODE 205 #define CONTINUE { \ 206 opcode = *pc; \ 207 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 208 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 209 continue; \ 210 } 211 #else 212 #define CONTINUE { \ 213 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 214 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 215 continue; \ 216 } 217 #endif 218 #endif 219 220 221 #define UPDATE_PC(opsize) {pc += opsize; } 222 /* 223 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack. 224 */ 225 #undef UPDATE_PC_AND_TOS 226 #define UPDATE_PC_AND_TOS(opsize, stack) \ 227 {pc += opsize; MORE_STACK(stack); } 228 229 /* 230 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack, 231 * and executing the next opcode. It's somewhat similar to the combination 232 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations. 233 */ 234 #undef UPDATE_PC_AND_TOS_AND_CONTINUE 235 #ifdef USELABELS 236 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 237 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 238 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 239 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 240 DISPATCH(opcode); \ 241 } 242 243 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 244 pc += opsize; opcode = *pc; \ 245 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 246 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 247 DISPATCH(opcode); \ 248 } 249 #else 250 #ifdef PREFETCH_OPCCODE 251 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 252 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 253 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 254 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 255 goto do_continue; \ 256 } 257 258 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 259 pc += opsize; opcode = *pc; \ 260 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 261 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 262 goto do_continue; \ 263 } 264 #else 265 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 266 pc += opsize; MORE_STACK(stack); \ 267 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 268 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 269 goto do_continue; \ 270 } 271 272 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 273 pc += opsize; \ 274 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 275 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 276 goto do_continue; \ 277 } 278 #endif /* PREFETCH_OPCCODE */ 279 #endif /* USELABELS */ 280 281 // About to call a new method, update the save the adjusted pc and return to frame manager 282 #define UPDATE_PC_AND_RETURN(opsize) \ 283 DECACHE_TOS(); \ 284 istate->set_bcp(pc+opsize); \ 285 return; 286 287 288 #define METHOD istate->method() 289 #define GET_METHOD_COUNTERS(res) \ 290 res = METHOD->method_counters(); \ 291 if (res == NULL) { \ 292 CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \ 293 } 294 295 #define OSR_REQUEST(res, branch_pc) \ 296 CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception); 297 /* 298 * For those opcodes that need to have a GC point on a backwards branch 299 */ 300 301 // Backedge counting is kind of strange. The asm interpreter will increment 302 // the backedge counter as a separate counter but it does it's comparisons 303 // to the sum (scaled) of invocation counter and backedge count to make 304 // a decision. Seems kind of odd to sum them together like that 305 306 // skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp 307 308 309 #define DO_BACKEDGE_CHECKS(skip, branch_pc) \ 310 if ((skip) <= 0) { \ 311 MethodCounters* mcs; \ 312 GET_METHOD_COUNTERS(mcs); \ 313 if (UseLoopCounter) { \ 314 bool do_OSR = UseOnStackReplacement; \ 315 mcs->backedge_counter()->increment(); \ 316 if (ProfileInterpreter) { \ 317 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); \ 318 /* Check for overflow against MDO count. */ \ 319 do_OSR = do_OSR \ 320 && (mdo_last_branch_taken_count >= (uint)InvocationCounter::InterpreterBackwardBranchLimit)\ 321 /* When ProfileInterpreter is on, the backedge_count comes */ \ 322 /* from the methodDataOop, which value does not get reset on */ \ 323 /* the call to frequency_counter_overflow(). To avoid */ \ 324 /* excessive calls to the overflow routine while the method is */ \ 325 /* being compiled, add a second test to make sure the overflow */ \ 326 /* function is called only once every overflow_frequency. */ \ 327 && (!(mdo_last_branch_taken_count & 1023)); \ 328 } else { \ 329 /* check for overflow of backedge counter */ \ 330 do_OSR = do_OSR \ 331 && mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter()); \ 332 } \ 333 if (do_OSR) { \ 334 nmethod* osr_nmethod; \ 335 OSR_REQUEST(osr_nmethod, branch_pc); \ 336 if (osr_nmethod != NULL && osr_nmethod->is_in_use()) { \ 337 intptr_t* buf; \ 338 /* Call OSR migration with last java frame only, no checks. */ \ 339 CALL_VM_NAKED_LJF(buf=SharedRuntime::OSR_migration_begin(THREAD)); \ 340 istate->set_msg(do_osr); \ 341 istate->set_osr_buf((address)buf); \ 342 istate->set_osr_entry(osr_nmethod->osr_entry()); \ 343 return; \ 344 } \ 345 } \ 346 } /* UseCompiler ... */ \ 347 SAFEPOINT; \ 348 } 349 350 /* 351 * For those opcodes that need to have a GC point on a backwards branch 352 */ 353 354 /* 355 * Macros for caching and flushing the interpreter state. Some local 356 * variables need to be flushed out to the frame before we do certain 357 * things (like pushing frames or becomming gc safe) and some need to 358 * be recached later (like after popping a frame). We could use one 359 * macro to cache or decache everything, but this would be less then 360 * optimal because we don't always need to cache or decache everything 361 * because some things we know are already cached or decached. 362 */ 363 #undef DECACHE_TOS 364 #undef CACHE_TOS 365 #undef CACHE_PREV_TOS 366 #define DECACHE_TOS() istate->set_stack(topOfStack); 367 368 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack(); 369 370 #undef DECACHE_PC 371 #undef CACHE_PC 372 #define DECACHE_PC() istate->set_bcp(pc); 373 #define CACHE_PC() pc = istate->bcp(); 374 #define CACHE_CP() cp = istate->constants(); 375 #define CACHE_LOCALS() locals = istate->locals(); 376 #undef CACHE_FRAME 377 #define CACHE_FRAME() 378 379 // BCI() returns the current bytecode-index. 380 #undef BCI 381 #define BCI() ((int)(intptr_t)(pc - (intptr_t)istate->method()->code_base())) 382 383 /* 384 * CHECK_NULL - Macro for throwing a NullPointerException if the object 385 * passed is a null ref. 386 * On some architectures/platforms it should be possible to do this implicitly 387 */ 388 #undef CHECK_NULL 389 #define CHECK_NULL(obj_) \ 390 if ((obj_) == NULL) { \ 391 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), NULL, note_nullCheck_trap); \ 392 } \ 393 VERIFY_OOP(obj_) 394 395 #define VMdoubleConstZero() 0.0 396 #define VMdoubleConstOne() 1.0 397 #define VMlongConstZero() (max_jlong-max_jlong) 398 #define VMlongConstOne() ((max_jlong-max_jlong)+1) 399 400 /* 401 * Alignment 402 */ 403 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3) 404 405 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod) 406 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS(); 407 408 // Reload interpreter state after calling the VM or a possible GC 409 #define CACHE_STATE() \ 410 CACHE_TOS(); \ 411 CACHE_PC(); \ 412 CACHE_CP(); \ 413 CACHE_LOCALS(); 414 415 // Call the VM with last java frame only. 416 #define CALL_VM_NAKED_LJF(func) \ 417 DECACHE_STATE(); \ 418 SET_LAST_JAVA_FRAME(); \ 419 func; \ 420 RESET_LAST_JAVA_FRAME(); \ 421 CACHE_STATE(); 422 423 // Call the VM. Don't check for pending exceptions. 424 #define CALL_VM_NOCHECK(func) \ 425 CALL_VM_NAKED_LJF(func) \ 426 if (THREAD->pop_frame_pending() && \ 427 !THREAD->pop_frame_in_process()) { \ 428 goto handle_Pop_Frame; \ 429 } \ 430 if (THREAD->jvmti_thread_state() && \ 431 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ 432 goto handle_Early_Return; \ 433 } 434 435 // Call the VM and check for pending exceptions 436 #define CALL_VM(func, label) { \ 437 CALL_VM_NOCHECK(func); \ 438 if (THREAD->has_pending_exception()) goto label; \ 439 } 440 441 /* 442 * BytecodeInterpreter::run(interpreterState istate) 443 * BytecodeInterpreter::runWithChecks(interpreterState istate) 444 * 445 * The real deal. This is where byte codes actually get interpreted. 446 * Basically it's a big while loop that iterates until we return from 447 * the method passed in. 448 * 449 * The runWithChecks is used if JVMTI is enabled. 450 * 451 */ 452 #if defined(VM_JVMTI) 453 void 454 BytecodeInterpreter::runWithChecks(interpreterState istate) { 455 #else 456 void 457 BytecodeInterpreter::run(interpreterState istate) { 458 #endif 459 460 // In order to simplify some tests based on switches set at runtime 461 // we invoke the interpreter a single time after switches are enabled 462 // and set simpler to to test variables rather than method calls or complex 463 // boolean expressions. 464 465 static int initialized = 0; 466 static int checkit = 0; 467 static intptr_t* c_addr = NULL; 468 static intptr_t c_value; 469 470 if (checkit && *c_addr != c_value) { 471 os::breakpoint(); 472 } 473 #ifdef VM_JVMTI 474 static bool _jvmti_interp_events = 0; 475 #endif 476 477 static int _compiling; // (UseCompiler || CountCompiledCalls) 478 479 #ifdef ASSERT 480 if (istate->_msg != initialize) { 481 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit"); 482 #ifndef SHARK 483 IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong")); 484 #endif // !SHARK 485 } 486 // Verify linkages. 487 interpreterState l = istate; 488 do { 489 assert(l == l->_self_link, "bad link"); 490 l = l->_prev_link; 491 } while (l != NULL); 492 // Screwups with stack management usually cause us to overwrite istate 493 // save a copy so we can verify it. 494 interpreterState orig = istate; 495 #endif 496 497 register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */ 498 register address pc = istate->bcp(); 499 register jubyte opcode; 500 register intptr_t* locals = istate->locals(); 501 register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache() 502 #ifdef LOTS_OF_REGS 503 register JavaThread* THREAD = istate->thread(); 504 #else 505 #undef THREAD 506 #define THREAD istate->thread() 507 #endif 508 509 #ifdef USELABELS 510 const static void* const opclabels_data[256] = { 511 /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0, 512 /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4, 513 /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0, 514 /* 0x0C */ &&opc_fconst_1,&&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1, 515 516 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w, 517 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload, 518 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0,&&opc_iload_1, 519 /* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1, 520 521 /* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1, 522 /* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1, 523 /* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1, 524 /* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload, 525 526 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload, 527 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore, 528 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0, 529 /* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0, 530 531 /* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0, 532 /* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0, 533 /* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0, 534 /* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore, 535 536 /* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore, 537 /* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop, 538 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2, 539 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap, 540 541 /* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd, 542 /* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub, 543 /* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul, 544 /* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv, 545 546 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem, 547 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg, 548 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr, 549 /* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land, 550 551 /* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor, 552 /* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d, 553 /* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i, 554 /* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l, 555 556 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s, 557 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl, 558 /* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt, 559 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq, 560 561 /* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge, &&opc_if_icmpgt, 562 /* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne, &&opc_goto, 563 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch,&&opc_lookupswitch, 564 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn, 565 566 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic, 567 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial, 568 /* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new, 569 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow, 570 571 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit, 572 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull, 573 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_default, 574 /* 0xCC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 575 576 /* 0xD0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 577 /* 0xD4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 578 /* 0xD8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 579 /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 580 581 /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 582 /* 0xE4 */ &&opc_default, &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, 583 /* 0xE8 */ &&opc_return_register_finalizer, 584 &&opc_invokehandle, &&opc_default, &&opc_default, 585 /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 586 587 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 588 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 589 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 590 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default 591 }; 592 register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0]; 593 #endif /* USELABELS */ 594 595 #ifdef ASSERT 596 // this will trigger a VERIFY_OOP on entry 597 if (istate->msg() != initialize && ! METHOD->is_static()) { 598 oop rcvr = LOCALS_OBJECT(0); 599 VERIFY_OOP(rcvr); 600 } 601 #endif 602 // #define HACK 603 #ifdef HACK 604 bool interesting = false; 605 #endif // HACK 606 607 /* QQQ this should be a stack method so we don't know actual direction */ 608 guarantee(istate->msg() == initialize || 609 topOfStack >= istate->stack_limit() && 610 topOfStack < istate->stack_base(), 611 "Stack top out of range"); 612 613 #ifdef CC_INTERP_PROFILE 614 // MethodData's last branch taken count. 615 uint mdo_last_branch_taken_count = 0; 616 #else 617 const uint mdo_last_branch_taken_count = 0; 618 #endif 619 620 switch (istate->msg()) { 621 case initialize: { 622 if (initialized++) ShouldNotReachHere(); // Only one initialize call. 623 _compiling = (UseCompiler || CountCompiledCalls); 624 #ifdef VM_JVMTI 625 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events(); 626 #endif 627 return; 628 } 629 break; 630 case method_entry: { 631 THREAD->set_do_not_unlock(); 632 // count invocations 633 assert(initialized, "Interpreter not initialized"); 634 if (_compiling) { 635 MethodCounters* mcs; 636 GET_METHOD_COUNTERS(mcs); 637 #if COMPILER2_OR_JVMCI 638 if (ProfileInterpreter) { 639 METHOD->increment_interpreter_invocation_count(THREAD); 640 } 641 #endif 642 mcs->invocation_counter()->increment(); 643 if (mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter())) { 644 CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception); 645 // We no longer retry on a counter overflow. 646 } 647 // Get or create profile data. Check for pending (async) exceptions. 648 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 649 SAFEPOINT; 650 } 651 652 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 653 // initialize 654 os::breakpoint(); 655 } 656 657 #ifdef HACK 658 { 659 ResourceMark rm; 660 char *method_name = istate->method()->name_and_sig_as_C_string(); 661 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { 662 tty->print_cr("entering: depth %d bci: %d", 663 (istate->_stack_base - istate->_stack), 664 istate->_bcp - istate->_method->code_base()); 665 interesting = true; 666 } 667 } 668 #endif // HACK 669 670 // Lock method if synchronized. 671 if (METHOD->is_synchronized()) { 672 // oop rcvr = locals[0].j.r; 673 oop rcvr; 674 if (METHOD->is_static()) { 675 rcvr = METHOD->constants()->pool_holder()->java_mirror(); 676 } else { 677 rcvr = LOCALS_OBJECT(0); 678 VERIFY_OOP(rcvr); 679 } 680 // The initial monitor is ours for the taking. 681 // Monitor not filled in frame manager any longer as this caused race condition with biased locking. 682 BasicObjectLock* mon = &istate->monitor_base()[-1]; 683 mon->set_obj(rcvr); 684 bool success = false; 685 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 686 markOop mark = rcvr->mark(); 687 intptr_t hash = (intptr_t) markOopDesc::no_hash; 688 // Implies UseBiasedLocking. 689 if (mark->has_bias_pattern()) { 690 uintptr_t thread_ident; 691 uintptr_t anticipated_bias_locking_value; 692 thread_ident = (uintptr_t)istate->thread(); 693 anticipated_bias_locking_value = 694 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 695 ~((uintptr_t) markOopDesc::age_mask_in_place); 696 697 if (anticipated_bias_locking_value == 0) { 698 // Already biased towards this thread, nothing to do. 699 if (PrintBiasedLockingStatistics) { 700 (* BiasedLocking::biased_lock_entry_count_addr())++; 701 } 702 success = true; 703 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 704 // Try to revoke bias. 705 markOop header = rcvr->klass()->prototype_header(); 706 if (hash != markOopDesc::no_hash) { 707 header = header->copy_set_hash(hash); 708 } 709 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) { 710 if (PrintBiasedLockingStatistics) 711 (*BiasedLocking::revoked_lock_entry_count_addr())++; 712 } 713 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) { 714 // Try to rebias. 715 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident); 716 if (hash != markOopDesc::no_hash) { 717 new_header = new_header->copy_set_hash(hash); 718 } 719 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) { 720 if (PrintBiasedLockingStatistics) { 721 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 722 } 723 } else { 724 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 725 } 726 success = true; 727 } else { 728 // Try to bias towards thread in case object is anonymously biased. 729 markOop header = (markOop) ((uintptr_t) mark & 730 ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 731 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); 732 if (hash != markOopDesc::no_hash) { 733 header = header->copy_set_hash(hash); 734 } 735 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 736 // Debugging hint. 737 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 738 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) { 739 if (PrintBiasedLockingStatistics) { 740 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 741 } 742 } else { 743 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 744 } 745 success = true; 746 } 747 } 748 749 // Traditional lightweight locking. 750 if (!success) { 751 markOop displaced = rcvr->mark()->set_unlocked(); 752 mon->lock()->set_displaced_header(displaced); 753 bool call_vm = UseHeavyMonitors; 754 if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) { 755 // Is it simple recursive case? 756 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 757 mon->lock()->set_displaced_header(NULL); 758 } else { 759 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 760 } 761 } 762 } 763 } 764 THREAD->clr_do_not_unlock(); 765 766 // Notify jvmti 767 #ifdef VM_JVMTI 768 if (_jvmti_interp_events) { 769 // Whenever JVMTI puts a thread in interp_only_mode, method 770 // entry/exit events are sent for that thread to track stack depth. 771 if (THREAD->is_interp_only_mode()) { 772 CALL_VM(InterpreterRuntime::post_method_entry(THREAD), 773 handle_exception); 774 } 775 } 776 #endif /* VM_JVMTI */ 777 778 goto run; 779 } 780 781 case popping_frame: { 782 // returned from a java call to pop the frame, restart the call 783 // clear the message so we don't confuse ourselves later 784 assert(THREAD->pop_frame_in_process(), "wrong frame pop state"); 785 istate->set_msg(no_request); 786 if (_compiling) { 787 // Set MDX back to the ProfileData of the invoke bytecode that will be 788 // restarted. 789 SET_MDX(NULL); 790 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 791 } 792 THREAD->clr_pop_frame_in_process(); 793 goto run; 794 } 795 796 case method_resume: { 797 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 798 // resume 799 os::breakpoint(); 800 } 801 #ifdef HACK 802 { 803 ResourceMark rm; 804 char *method_name = istate->method()->name_and_sig_as_C_string(); 805 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { 806 tty->print_cr("resume: depth %d bci: %d", 807 (istate->_stack_base - istate->_stack) , 808 istate->_bcp - istate->_method->code_base()); 809 interesting = true; 810 } 811 } 812 #endif // HACK 813 // returned from a java call, continue executing. 814 if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) { 815 goto handle_Pop_Frame; 816 } 817 if (THREAD->jvmti_thread_state() && 818 THREAD->jvmti_thread_state()->is_earlyret_pending()) { 819 goto handle_Early_Return; 820 } 821 822 if (THREAD->has_pending_exception()) goto handle_exception; 823 // Update the pc by the saved amount of the invoke bytecode size 824 UPDATE_PC(istate->bcp_advance()); 825 826 if (_compiling) { 827 // Get or create profile data. Check for pending (async) exceptions. 828 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 829 } 830 goto run; 831 } 832 833 case deopt_resume2: { 834 // Returned from an opcode that will reexecute. Deopt was 835 // a result of a PopFrame request. 836 // 837 838 if (_compiling) { 839 // Get or create profile data. Check for pending (async) exceptions. 840 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 841 } 842 goto run; 843 } 844 845 case deopt_resume: { 846 // Returned from an opcode that has completed. The stack has 847 // the result all we need to do is skip across the bytecode 848 // and continue (assuming there is no exception pending) 849 // 850 // compute continuation length 851 // 852 // Note: it is possible to deopt at a return_register_finalizer opcode 853 // because this requires entering the vm to do the registering. While the 854 // opcode is complete we can't advance because there are no more opcodes 855 // much like trying to deopt at a poll return. In that has we simply 856 // get out of here 857 // 858 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) { 859 // this will do the right thing even if an exception is pending. 860 goto handle_return; 861 } 862 UPDATE_PC(Bytecodes::length_at(METHOD, pc)); 863 if (THREAD->has_pending_exception()) goto handle_exception; 864 865 if (_compiling) { 866 // Get or create profile data. Check for pending (async) exceptions. 867 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 868 } 869 goto run; 870 } 871 case got_monitors: { 872 // continue locking now that we have a monitor to use 873 // we expect to find newly allocated monitor at the "top" of the monitor stack. 874 oop lockee = STACK_OBJECT(-1); 875 VERIFY_OOP(lockee); 876 // derefing's lockee ought to provoke implicit null check 877 // find a free monitor 878 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base(); 879 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor"); 880 entry->set_obj(lockee); 881 bool success = false; 882 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 883 884 markOop mark = lockee->mark(); 885 intptr_t hash = (intptr_t) markOopDesc::no_hash; 886 // implies UseBiasedLocking 887 if (mark->has_bias_pattern()) { 888 uintptr_t thread_ident; 889 uintptr_t anticipated_bias_locking_value; 890 thread_ident = (uintptr_t)istate->thread(); 891 anticipated_bias_locking_value = 892 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 893 ~((uintptr_t) markOopDesc::age_mask_in_place); 894 895 if (anticipated_bias_locking_value == 0) { 896 // already biased towards this thread, nothing to do 897 if (PrintBiasedLockingStatistics) { 898 (* BiasedLocking::biased_lock_entry_count_addr())++; 899 } 900 success = true; 901 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 902 // try revoke bias 903 markOop header = lockee->klass()->prototype_header(); 904 if (hash != markOopDesc::no_hash) { 905 header = header->copy_set_hash(hash); 906 } 907 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { 908 if (PrintBiasedLockingStatistics) { 909 (*BiasedLocking::revoked_lock_entry_count_addr())++; 910 } 911 } 912 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { 913 // try rebias 914 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); 915 if (hash != markOopDesc::no_hash) { 916 new_header = new_header->copy_set_hash(hash); 917 } 918 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { 919 if (PrintBiasedLockingStatistics) { 920 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 921 } 922 } else { 923 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 924 } 925 success = true; 926 } else { 927 // try to bias towards thread in case object is anonymously biased 928 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 929 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); 930 if (hash != markOopDesc::no_hash) { 931 header = header->copy_set_hash(hash); 932 } 933 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 934 // debugging hint 935 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 936 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { 937 if (PrintBiasedLockingStatistics) { 938 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 939 } 940 } else { 941 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 942 } 943 success = true; 944 } 945 } 946 947 // traditional lightweight locking 948 if (!success) { 949 markOop displaced = lockee->mark()->set_unlocked(); 950 entry->lock()->set_displaced_header(displaced); 951 bool call_vm = UseHeavyMonitors; 952 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { 953 // Is it simple recursive case? 954 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 955 entry->lock()->set_displaced_header(NULL); 956 } else { 957 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 958 } 959 } 960 } 961 UPDATE_PC_AND_TOS(1, -1); 962 goto run; 963 } 964 default: { 965 fatal("Unexpected message from frame manager"); 966 } 967 } 968 969 run: 970 971 DO_UPDATE_INSTRUCTION_COUNT(*pc) 972 DEBUGGER_SINGLE_STEP_NOTIFY(); 973 #ifdef PREFETCH_OPCCODE 974 opcode = *pc; /* prefetch first opcode */ 975 #endif 976 977 #ifndef USELABELS 978 while (1) 979 #endif 980 { 981 #ifndef PREFETCH_OPCCODE 982 opcode = *pc; 983 #endif 984 // Seems like this happens twice per opcode. At worst this is only 985 // need at entry to the loop. 986 // DEBUGGER_SINGLE_STEP_NOTIFY(); 987 /* Using this labels avoids double breakpoints when quickening and 988 * when returing from transition frames. 989 */ 990 opcode_switch: 991 assert(istate == orig, "Corrupted istate"); 992 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */ 993 assert(topOfStack >= istate->stack_limit(), "Stack overrun"); 994 assert(topOfStack < istate->stack_base(), "Stack underrun"); 995 996 #ifdef USELABELS 997 DISPATCH(opcode); 998 #else 999 switch (opcode) 1000 #endif 1001 { 1002 CASE(_nop): 1003 UPDATE_PC_AND_CONTINUE(1); 1004 1005 /* Push miscellaneous constants onto the stack. */ 1006 1007 CASE(_aconst_null): 1008 SET_STACK_OBJECT(NULL, 0); 1009 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1010 1011 #undef OPC_CONST_n 1012 #define OPC_CONST_n(opcode, const_type, value) \ 1013 CASE(opcode): \ 1014 SET_STACK_ ## const_type(value, 0); \ 1015 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1016 1017 OPC_CONST_n(_iconst_m1, INT, -1); 1018 OPC_CONST_n(_iconst_0, INT, 0); 1019 OPC_CONST_n(_iconst_1, INT, 1); 1020 OPC_CONST_n(_iconst_2, INT, 2); 1021 OPC_CONST_n(_iconst_3, INT, 3); 1022 OPC_CONST_n(_iconst_4, INT, 4); 1023 OPC_CONST_n(_iconst_5, INT, 5); 1024 OPC_CONST_n(_fconst_0, FLOAT, 0.0); 1025 OPC_CONST_n(_fconst_1, FLOAT, 1.0); 1026 OPC_CONST_n(_fconst_2, FLOAT, 2.0); 1027 1028 #undef OPC_CONST2_n 1029 #define OPC_CONST2_n(opcname, value, key, kind) \ 1030 CASE(_##opcname): \ 1031 { \ 1032 SET_STACK_ ## kind(VM##key##Const##value(), 1); \ 1033 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1034 } 1035 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE); 1036 OPC_CONST2_n(dconst_1, One, double, DOUBLE); 1037 OPC_CONST2_n(lconst_0, Zero, long, LONG); 1038 OPC_CONST2_n(lconst_1, One, long, LONG); 1039 1040 /* Load constant from constant pool: */ 1041 1042 /* Push a 1-byte signed integer value onto the stack. */ 1043 CASE(_bipush): 1044 SET_STACK_INT((jbyte)(pc[1]), 0); 1045 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1046 1047 /* Push a 2-byte signed integer constant onto the stack. */ 1048 CASE(_sipush): 1049 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0); 1050 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 1051 1052 /* load from local variable */ 1053 1054 CASE(_aload): 1055 VERIFY_OOP(LOCALS_OBJECT(pc[1])); 1056 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0); 1057 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1058 1059 CASE(_iload): 1060 CASE(_fload): 1061 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0); 1062 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1063 1064 CASE(_lload): 1065 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1); 1066 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1067 1068 CASE(_dload): 1069 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1); 1070 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1071 1072 #undef OPC_LOAD_n 1073 #define OPC_LOAD_n(num) \ 1074 CASE(_aload_##num): \ 1075 VERIFY_OOP(LOCALS_OBJECT(num)); \ 1076 SET_STACK_OBJECT(LOCALS_OBJECT(num), 0); \ 1077 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1078 \ 1079 CASE(_iload_##num): \ 1080 CASE(_fload_##num): \ 1081 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \ 1082 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1083 \ 1084 CASE(_lload_##num): \ 1085 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \ 1086 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1087 CASE(_dload_##num): \ 1088 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \ 1089 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1090 1091 OPC_LOAD_n(0); 1092 OPC_LOAD_n(1); 1093 OPC_LOAD_n(2); 1094 OPC_LOAD_n(3); 1095 1096 /* store to a local variable */ 1097 1098 CASE(_astore): 1099 astore(topOfStack, -1, locals, pc[1]); 1100 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1101 1102 CASE(_istore): 1103 CASE(_fstore): 1104 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]); 1105 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1106 1107 CASE(_lstore): 1108 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]); 1109 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1110 1111 CASE(_dstore): 1112 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]); 1113 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1114 1115 CASE(_wide): { 1116 uint16_t reg = Bytes::get_Java_u2(pc + 2); 1117 1118 opcode = pc[1]; 1119 1120 // Wide and it's sub-bytecode are counted as separate instructions. If we 1121 // don't account for this here, the bytecode trace skips the next bytecode. 1122 DO_UPDATE_INSTRUCTION_COUNT(opcode); 1123 1124 switch(opcode) { 1125 case Bytecodes::_aload: 1126 VERIFY_OOP(LOCALS_OBJECT(reg)); 1127 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0); 1128 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1129 1130 case Bytecodes::_iload: 1131 case Bytecodes::_fload: 1132 SET_STACK_SLOT(LOCALS_SLOT(reg), 0); 1133 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1134 1135 case Bytecodes::_lload: 1136 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1137 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1138 1139 case Bytecodes::_dload: 1140 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1141 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1142 1143 case Bytecodes::_astore: 1144 astore(topOfStack, -1, locals, reg); 1145 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1146 1147 case Bytecodes::_istore: 1148 case Bytecodes::_fstore: 1149 SET_LOCALS_SLOT(STACK_SLOT(-1), reg); 1150 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1151 1152 case Bytecodes::_lstore: 1153 SET_LOCALS_LONG(STACK_LONG(-1), reg); 1154 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1155 1156 case Bytecodes::_dstore: 1157 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg); 1158 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1159 1160 case Bytecodes::_iinc: { 1161 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4); 1162 // Be nice to see what this generates.... QQQ 1163 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg); 1164 UPDATE_PC_AND_CONTINUE(6); 1165 } 1166 case Bytecodes::_ret: 1167 // Profile ret. 1168 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(reg)))); 1169 // Now, update the pc. 1170 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg)); 1171 UPDATE_PC_AND_CONTINUE(0); 1172 default: 1173 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode", note_no_trap); 1174 } 1175 } 1176 1177 1178 #undef OPC_STORE_n 1179 #define OPC_STORE_n(num) \ 1180 CASE(_astore_##num): \ 1181 astore(topOfStack, -1, locals, num); \ 1182 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1183 CASE(_istore_##num): \ 1184 CASE(_fstore_##num): \ 1185 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \ 1186 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1187 1188 OPC_STORE_n(0); 1189 OPC_STORE_n(1); 1190 OPC_STORE_n(2); 1191 OPC_STORE_n(3); 1192 1193 #undef OPC_DSTORE_n 1194 #define OPC_DSTORE_n(num) \ 1195 CASE(_dstore_##num): \ 1196 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \ 1197 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1198 CASE(_lstore_##num): \ 1199 SET_LOCALS_LONG(STACK_LONG(-1), num); \ 1200 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1201 1202 OPC_DSTORE_n(0); 1203 OPC_DSTORE_n(1); 1204 OPC_DSTORE_n(2); 1205 OPC_DSTORE_n(3); 1206 1207 /* stack pop, dup, and insert opcodes */ 1208 1209 1210 CASE(_pop): /* Discard the top item on the stack */ 1211 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1212 1213 1214 CASE(_pop2): /* Discard the top 2 items on the stack */ 1215 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1216 1217 1218 CASE(_dup): /* Duplicate the top item on the stack */ 1219 dup(topOfStack); 1220 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1221 1222 CASE(_dup2): /* Duplicate the top 2 items on the stack */ 1223 dup2(topOfStack); 1224 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1225 1226 CASE(_dup_x1): /* insert top word two down */ 1227 dup_x1(topOfStack); 1228 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1229 1230 CASE(_dup_x2): /* insert top word three down */ 1231 dup_x2(topOfStack); 1232 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1233 1234 CASE(_dup2_x1): /* insert top 2 slots three down */ 1235 dup2_x1(topOfStack); 1236 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1237 1238 CASE(_dup2_x2): /* insert top 2 slots four down */ 1239 dup2_x2(topOfStack); 1240 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1241 1242 CASE(_swap): { /* swap top two elements on the stack */ 1243 swap(topOfStack); 1244 UPDATE_PC_AND_CONTINUE(1); 1245 } 1246 1247 /* Perform various binary integer operations */ 1248 1249 #undef OPC_INT_BINARY 1250 #define OPC_INT_BINARY(opcname, opname, test) \ 1251 CASE(_i##opcname): \ 1252 if (test && (STACK_INT(-1) == 0)) { \ 1253 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1254 "/ by zero", note_div0Check_trap); \ 1255 } \ 1256 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1257 STACK_INT(-1)), \ 1258 -2); \ 1259 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1260 CASE(_l##opcname): \ 1261 { \ 1262 if (test) { \ 1263 jlong l1 = STACK_LONG(-1); \ 1264 if (VMlongEqz(l1)) { \ 1265 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1266 "/ by long zero", note_div0Check_trap); \ 1267 } \ 1268 } \ 1269 /* First long at (-1,-2) next long at (-3,-4) */ \ 1270 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \ 1271 STACK_LONG(-1)), \ 1272 -3); \ 1273 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1274 } 1275 1276 OPC_INT_BINARY(add, Add, 0); 1277 OPC_INT_BINARY(sub, Sub, 0); 1278 OPC_INT_BINARY(mul, Mul, 0); 1279 OPC_INT_BINARY(and, And, 0); 1280 OPC_INT_BINARY(or, Or, 0); 1281 OPC_INT_BINARY(xor, Xor, 0); 1282 OPC_INT_BINARY(div, Div, 1); 1283 OPC_INT_BINARY(rem, Rem, 1); 1284 1285 1286 /* Perform various binary floating number operations */ 1287 /* On some machine/platforms/compilers div zero check can be implicit */ 1288 1289 #undef OPC_FLOAT_BINARY 1290 #define OPC_FLOAT_BINARY(opcname, opname) \ 1291 CASE(_d##opcname): { \ 1292 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \ 1293 STACK_DOUBLE(-1)), \ 1294 -3); \ 1295 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1296 } \ 1297 CASE(_f##opcname): \ 1298 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \ 1299 STACK_FLOAT(-1)), \ 1300 -2); \ 1301 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1302 1303 1304 OPC_FLOAT_BINARY(add, Add); 1305 OPC_FLOAT_BINARY(sub, Sub); 1306 OPC_FLOAT_BINARY(mul, Mul); 1307 OPC_FLOAT_BINARY(div, Div); 1308 OPC_FLOAT_BINARY(rem, Rem); 1309 1310 /* Shift operations 1311 * Shift left int and long: ishl, lshl 1312 * Logical shift right int and long w/zero extension: iushr, lushr 1313 * Arithmetic shift right int and long w/sign extension: ishr, lshr 1314 */ 1315 1316 #undef OPC_SHIFT_BINARY 1317 #define OPC_SHIFT_BINARY(opcname, opname) \ 1318 CASE(_i##opcname): \ 1319 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1320 STACK_INT(-1)), \ 1321 -2); \ 1322 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1323 CASE(_l##opcname): \ 1324 { \ 1325 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \ 1326 STACK_INT(-1)), \ 1327 -2); \ 1328 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1329 } 1330 1331 OPC_SHIFT_BINARY(shl, Shl); 1332 OPC_SHIFT_BINARY(shr, Shr); 1333 OPC_SHIFT_BINARY(ushr, Ushr); 1334 1335 /* Increment local variable by constant */ 1336 CASE(_iinc): 1337 { 1338 // locals[pc[1]].j.i += (jbyte)(pc[2]); 1339 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]); 1340 UPDATE_PC_AND_CONTINUE(3); 1341 } 1342 1343 /* negate the value on the top of the stack */ 1344 1345 CASE(_ineg): 1346 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1); 1347 UPDATE_PC_AND_CONTINUE(1); 1348 1349 CASE(_fneg): 1350 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1); 1351 UPDATE_PC_AND_CONTINUE(1); 1352 1353 CASE(_lneg): 1354 { 1355 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1); 1356 UPDATE_PC_AND_CONTINUE(1); 1357 } 1358 1359 CASE(_dneg): 1360 { 1361 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1); 1362 UPDATE_PC_AND_CONTINUE(1); 1363 } 1364 1365 /* Conversion operations */ 1366 1367 CASE(_i2f): /* convert top of stack int to float */ 1368 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1); 1369 UPDATE_PC_AND_CONTINUE(1); 1370 1371 CASE(_i2l): /* convert top of stack int to long */ 1372 { 1373 // this is ugly QQQ 1374 jlong r = VMint2Long(STACK_INT(-1)); 1375 MORE_STACK(-1); // Pop 1376 SET_STACK_LONG(r, 1); 1377 1378 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1379 } 1380 1381 CASE(_i2d): /* convert top of stack int to double */ 1382 { 1383 // this is ugly QQQ (why cast to jlong?? ) 1384 jdouble r = (jlong)STACK_INT(-1); 1385 MORE_STACK(-1); // Pop 1386 SET_STACK_DOUBLE(r, 1); 1387 1388 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1389 } 1390 1391 CASE(_l2i): /* convert top of stack long to int */ 1392 { 1393 jint r = VMlong2Int(STACK_LONG(-1)); 1394 MORE_STACK(-2); // Pop 1395 SET_STACK_INT(r, 0); 1396 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1397 } 1398 1399 CASE(_l2f): /* convert top of stack long to float */ 1400 { 1401 jlong r = STACK_LONG(-1); 1402 MORE_STACK(-2); // Pop 1403 SET_STACK_FLOAT(VMlong2Float(r), 0); 1404 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1405 } 1406 1407 CASE(_l2d): /* convert top of stack long to double */ 1408 { 1409 jlong r = STACK_LONG(-1); 1410 MORE_STACK(-2); // Pop 1411 SET_STACK_DOUBLE(VMlong2Double(r), 1); 1412 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1413 } 1414 1415 CASE(_f2i): /* Convert top of stack float to int */ 1416 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1); 1417 UPDATE_PC_AND_CONTINUE(1); 1418 1419 CASE(_f2l): /* convert top of stack float to long */ 1420 { 1421 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1)); 1422 MORE_STACK(-1); // POP 1423 SET_STACK_LONG(r, 1); 1424 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1425 } 1426 1427 CASE(_f2d): /* convert top of stack float to double */ 1428 { 1429 jfloat f; 1430 jdouble r; 1431 f = STACK_FLOAT(-1); 1432 r = (jdouble) f; 1433 MORE_STACK(-1); // POP 1434 SET_STACK_DOUBLE(r, 1); 1435 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1436 } 1437 1438 CASE(_d2i): /* convert top of stack double to int */ 1439 { 1440 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1)); 1441 MORE_STACK(-2); 1442 SET_STACK_INT(r1, 0); 1443 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1444 } 1445 1446 CASE(_d2f): /* convert top of stack double to float */ 1447 { 1448 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1)); 1449 MORE_STACK(-2); 1450 SET_STACK_FLOAT(r1, 0); 1451 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1452 } 1453 1454 CASE(_d2l): /* convert top of stack double to long */ 1455 { 1456 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1)); 1457 MORE_STACK(-2); 1458 SET_STACK_LONG(r1, 1); 1459 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1460 } 1461 1462 CASE(_i2b): 1463 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1); 1464 UPDATE_PC_AND_CONTINUE(1); 1465 1466 CASE(_i2c): 1467 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1); 1468 UPDATE_PC_AND_CONTINUE(1); 1469 1470 CASE(_i2s): 1471 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1); 1472 UPDATE_PC_AND_CONTINUE(1); 1473 1474 /* comparison operators */ 1475 1476 1477 #define COMPARISON_OP(name, comparison) \ 1478 CASE(_if_icmp##name): { \ 1479 const bool cmp = (STACK_INT(-2) comparison STACK_INT(-1)); \ 1480 int skip = cmp \ 1481 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1482 address branch_pc = pc; \ 1483 /* Profile branch. */ \ 1484 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1485 UPDATE_PC_AND_TOS(skip, -2); \ 1486 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1487 CONTINUE; \ 1488 } \ 1489 CASE(_if##name): { \ 1490 const bool cmp = (STACK_INT(-1) comparison 0); \ 1491 int skip = cmp \ 1492 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1493 address branch_pc = pc; \ 1494 /* Profile branch. */ \ 1495 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1496 UPDATE_PC_AND_TOS(skip, -1); \ 1497 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1498 CONTINUE; \ 1499 } 1500 1501 #define COMPARISON_OP2(name, comparison) \ 1502 COMPARISON_OP(name, comparison) \ 1503 CASE(_if_acmp##name): { \ 1504 const bool cmp = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)); \ 1505 int skip = cmp \ 1506 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1507 address branch_pc = pc; \ 1508 /* Profile branch. */ \ 1509 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1510 UPDATE_PC_AND_TOS(skip, -2); \ 1511 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1512 CONTINUE; \ 1513 } 1514 1515 #define NULL_COMPARISON_NOT_OP(name) \ 1516 CASE(_if##name): { \ 1517 const bool cmp = (!(STACK_OBJECT(-1) == NULL)); \ 1518 int skip = cmp \ 1519 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1520 address branch_pc = pc; \ 1521 /* Profile branch. */ \ 1522 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1523 UPDATE_PC_AND_TOS(skip, -1); \ 1524 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1525 CONTINUE; \ 1526 } 1527 1528 #define NULL_COMPARISON_OP(name) \ 1529 CASE(_if##name): { \ 1530 const bool cmp = ((STACK_OBJECT(-1) == NULL)); \ 1531 int skip = cmp \ 1532 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1533 address branch_pc = pc; \ 1534 /* Profile branch. */ \ 1535 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1536 UPDATE_PC_AND_TOS(skip, -1); \ 1537 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1538 CONTINUE; \ 1539 } 1540 COMPARISON_OP(lt, <); 1541 COMPARISON_OP(gt, >); 1542 COMPARISON_OP(le, <=); 1543 COMPARISON_OP(ge, >=); 1544 COMPARISON_OP2(eq, ==); /* include ref comparison */ 1545 COMPARISON_OP2(ne, !=); /* include ref comparison */ 1546 NULL_COMPARISON_OP(null); 1547 NULL_COMPARISON_NOT_OP(nonnull); 1548 1549 /* Goto pc at specified offset in switch table. */ 1550 1551 CASE(_tableswitch): { 1552 jint* lpc = (jint*)VMalignWordUp(pc+1); 1553 int32_t key = STACK_INT(-1); 1554 int32_t low = Bytes::get_Java_u4((address)&lpc[1]); 1555 int32_t high = Bytes::get_Java_u4((address)&lpc[2]); 1556 int32_t skip; 1557 key -= low; 1558 if (((uint32_t) key > (uint32_t)(high - low))) { 1559 key = -1; 1560 skip = Bytes::get_Java_u4((address)&lpc[0]); 1561 } else { 1562 skip = Bytes::get_Java_u4((address)&lpc[key + 3]); 1563 } 1564 // Profile switch. 1565 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/key); 1566 // Does this really need a full backedge check (osr)? 1567 address branch_pc = pc; 1568 UPDATE_PC_AND_TOS(skip, -1); 1569 DO_BACKEDGE_CHECKS(skip, branch_pc); 1570 CONTINUE; 1571 } 1572 1573 /* Goto pc whose table entry matches specified key. */ 1574 1575 CASE(_lookupswitch): { 1576 jint* lpc = (jint*)VMalignWordUp(pc+1); 1577 int32_t key = STACK_INT(-1); 1578 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */ 1579 // Remember index. 1580 int index = -1; 1581 int newindex = 0; 1582 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]); 1583 while (--npairs >= 0) { 1584 lpc += 2; 1585 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) { 1586 skip = Bytes::get_Java_u4((address)&lpc[1]); 1587 index = newindex; 1588 break; 1589 } 1590 newindex += 1; 1591 } 1592 // Profile switch. 1593 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/index); 1594 address branch_pc = pc; 1595 UPDATE_PC_AND_TOS(skip, -1); 1596 DO_BACKEDGE_CHECKS(skip, branch_pc); 1597 CONTINUE; 1598 } 1599 1600 CASE(_fcmpl): 1601 CASE(_fcmpg): 1602 { 1603 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2), 1604 STACK_FLOAT(-1), 1605 (opcode == Bytecodes::_fcmpl ? -1 : 1)), 1606 -2); 1607 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1608 } 1609 1610 CASE(_dcmpl): 1611 CASE(_dcmpg): 1612 { 1613 int r = VMdoubleCompare(STACK_DOUBLE(-3), 1614 STACK_DOUBLE(-1), 1615 (opcode == Bytecodes::_dcmpl ? -1 : 1)); 1616 MORE_STACK(-4); // Pop 1617 SET_STACK_INT(r, 0); 1618 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1619 } 1620 1621 CASE(_lcmp): 1622 { 1623 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1)); 1624 MORE_STACK(-4); 1625 SET_STACK_INT(r, 0); 1626 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1627 } 1628 1629 1630 /* Return from a method */ 1631 1632 CASE(_areturn): 1633 CASE(_ireturn): 1634 CASE(_freturn): 1635 { 1636 // Allow a safepoint before returning to frame manager. 1637 SAFEPOINT; 1638 1639 goto handle_return; 1640 } 1641 1642 CASE(_lreturn): 1643 CASE(_dreturn): 1644 { 1645 // Allow a safepoint before returning to frame manager. 1646 SAFEPOINT; 1647 goto handle_return; 1648 } 1649 1650 CASE(_return_register_finalizer): { 1651 1652 oop rcvr = LOCALS_OBJECT(0); 1653 VERIFY_OOP(rcvr); 1654 if (rcvr->klass()->has_finalizer()) { 1655 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception); 1656 } 1657 goto handle_return; 1658 } 1659 CASE(_return): { 1660 1661 // Allow a safepoint before returning to frame manager. 1662 SAFEPOINT; 1663 goto handle_return; 1664 } 1665 1666 /* Array access byte-codes */ 1667 1668 /* Every array access byte-code starts out like this */ 1669 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff); 1670 #define ARRAY_INTRO(arrayOff) \ 1671 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \ 1672 jint index = STACK_INT(arrayOff + 1); \ 1673 char message[jintAsStringSize]; \ 1674 CHECK_NULL(arrObj); \ 1675 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \ 1676 sprintf(message, "%d", index); \ 1677 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \ 1678 message, note_rangeCheck_trap); \ 1679 } 1680 1681 /* 32-bit loads. These handle conversion from < 32-bit types */ 1682 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \ 1683 { \ 1684 ARRAY_INTRO(-2); \ 1685 (void)extra; \ 1686 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \ 1687 -2); \ 1688 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1689 } 1690 1691 /* 64-bit loads */ 1692 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \ 1693 { \ 1694 ARRAY_INTRO(-2); \ 1695 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \ 1696 (void)extra; \ 1697 UPDATE_PC_AND_CONTINUE(1); \ 1698 } 1699 1700 CASE(_iaload): 1701 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0); 1702 CASE(_faload): 1703 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1704 CASE(_aaload): { 1705 ARRAY_INTRO(-2); 1706 SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2); 1707 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1708 } 1709 CASE(_baload): 1710 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0); 1711 CASE(_caload): 1712 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0); 1713 CASE(_saload): 1714 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0); 1715 CASE(_laload): 1716 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0); 1717 CASE(_daload): 1718 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1719 1720 /* 32-bit stores. These handle conversion to < 32-bit types */ 1721 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \ 1722 { \ 1723 ARRAY_INTRO(-3); \ 1724 (void)extra; \ 1725 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1726 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \ 1727 } 1728 1729 /* 64-bit stores */ 1730 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \ 1731 { \ 1732 ARRAY_INTRO(-4); \ 1733 (void)extra; \ 1734 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1735 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \ 1736 } 1737 1738 CASE(_iastore): 1739 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0); 1740 CASE(_fastore): 1741 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1742 /* 1743 * This one looks different because of the assignability check 1744 */ 1745 CASE(_aastore): { 1746 oop rhsObject = STACK_OBJECT(-1); 1747 VERIFY_OOP(rhsObject); 1748 ARRAY_INTRO( -3); 1749 // arrObj, index are set 1750 if (rhsObject != NULL) { 1751 /* Check assignability of rhsObject into arrObj */ 1752 Klass* rhsKlass = rhsObject->klass(); // EBX (subclass) 1753 Klass* elemKlass = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX 1754 // 1755 // Check for compatibilty. This check must not GC!! 1756 // Seems way more expensive now that we must dispatch 1757 // 1758 if (rhsKlass != elemKlass && !rhsKlass->is_subtype_of(elemKlass)) { // ebx->is... 1759 // Decrement counter if subtype check failed. 1760 BI_PROFILE_SUBTYPECHECK_FAILED(rhsKlass); 1761 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "", note_arrayCheck_trap); 1762 } 1763 // Profile checkcast with null_seen and receiver. 1764 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, rhsKlass); 1765 } else { 1766 // Profile checkcast with null_seen and receiver. 1767 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL); 1768 } 1769 ((objArrayOop) arrObj)->obj_at_put(index, rhsObject); 1770 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); 1771 } 1772 CASE(_bastore): { 1773 ARRAY_INTRO(-3); 1774 int item = STACK_INT(-1); 1775 // if it is a T_BOOLEAN array, mask the stored value to 0/1 1776 if (arrObj->klass() == Universe::boolArrayKlassObj()) { 1777 item &= 1; 1778 } else { 1779 assert(arrObj->klass() == Universe::byteArrayKlassObj(), 1780 "should be byte array otherwise"); 1781 } 1782 ((typeArrayOop)arrObj)->byte_at_put(index, item); 1783 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); 1784 } 1785 CASE(_castore): 1786 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0); 1787 CASE(_sastore): 1788 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0); 1789 CASE(_lastore): 1790 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0); 1791 CASE(_dastore): 1792 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1793 1794 CASE(_arraylength): 1795 { 1796 arrayOop ary = (arrayOop) STACK_OBJECT(-1); 1797 CHECK_NULL(ary); 1798 SET_STACK_INT(ary->length(), -1); 1799 UPDATE_PC_AND_CONTINUE(1); 1800 } 1801 1802 /* monitorenter and monitorexit for locking/unlocking an object */ 1803 1804 CASE(_monitorenter): { 1805 oop lockee = STACK_OBJECT(-1); 1806 // derefing's lockee ought to provoke implicit null check 1807 CHECK_NULL(lockee); 1808 // find a free monitor or one already allocated for this object 1809 // if we find a matching object then we need a new monitor 1810 // since this is recursive enter 1811 BasicObjectLock* limit = istate->monitor_base(); 1812 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1813 BasicObjectLock* entry = NULL; 1814 while (most_recent != limit ) { 1815 if (most_recent->obj() == NULL) entry = most_recent; 1816 else if (most_recent->obj() == lockee) break; 1817 most_recent++; 1818 } 1819 if (entry != NULL) { 1820 entry->set_obj(lockee); 1821 int success = false; 1822 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 1823 1824 markOop mark = lockee->mark(); 1825 intptr_t hash = (intptr_t) markOopDesc::no_hash; 1826 // implies UseBiasedLocking 1827 if (mark->has_bias_pattern()) { 1828 uintptr_t thread_ident; 1829 uintptr_t anticipated_bias_locking_value; 1830 thread_ident = (uintptr_t)istate->thread(); 1831 anticipated_bias_locking_value = 1832 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 1833 ~((uintptr_t) markOopDesc::age_mask_in_place); 1834 1835 if (anticipated_bias_locking_value == 0) { 1836 // already biased towards this thread, nothing to do 1837 if (PrintBiasedLockingStatistics) { 1838 (* BiasedLocking::biased_lock_entry_count_addr())++; 1839 } 1840 success = true; 1841 } 1842 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 1843 // try revoke bias 1844 markOop header = lockee->klass()->prototype_header(); 1845 if (hash != markOopDesc::no_hash) { 1846 header = header->copy_set_hash(hash); 1847 } 1848 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { 1849 if (PrintBiasedLockingStatistics) 1850 (*BiasedLocking::revoked_lock_entry_count_addr())++; 1851 } 1852 } 1853 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { 1854 // try rebias 1855 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); 1856 if (hash != markOopDesc::no_hash) { 1857 new_header = new_header->copy_set_hash(hash); 1858 } 1859 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { 1860 if (PrintBiasedLockingStatistics) 1861 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 1862 } 1863 else { 1864 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1865 } 1866 success = true; 1867 } 1868 else { 1869 // try to bias towards thread in case object is anonymously biased 1870 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 1871 (uintptr_t)markOopDesc::age_mask_in_place | 1872 epoch_mask_in_place)); 1873 if (hash != markOopDesc::no_hash) { 1874 header = header->copy_set_hash(hash); 1875 } 1876 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 1877 // debugging hint 1878 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 1879 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { 1880 if (PrintBiasedLockingStatistics) 1881 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 1882 } 1883 else { 1884 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1885 } 1886 success = true; 1887 } 1888 } 1889 1890 // traditional lightweight locking 1891 if (!success) { 1892 markOop displaced = lockee->mark()->set_unlocked(); 1893 entry->lock()->set_displaced_header(displaced); 1894 bool call_vm = UseHeavyMonitors; 1895 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { 1896 // Is it simple recursive case? 1897 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 1898 entry->lock()->set_displaced_header(NULL); 1899 } else { 1900 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1901 } 1902 } 1903 } 1904 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1905 } else { 1906 istate->set_msg(more_monitors); 1907 UPDATE_PC_AND_RETURN(0); // Re-execute 1908 } 1909 } 1910 1911 CASE(_monitorexit): { 1912 oop lockee = STACK_OBJECT(-1); 1913 CHECK_NULL(lockee); 1914 // derefing's lockee ought to provoke implicit null check 1915 // find our monitor slot 1916 BasicObjectLock* limit = istate->monitor_base(); 1917 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1918 while (most_recent != limit ) { 1919 if ((most_recent)->obj() == lockee) { 1920 BasicLock* lock = most_recent->lock(); 1921 markOop header = lock->displaced_header(); 1922 most_recent->set_obj(NULL); 1923 if (!lockee->mark()->has_bias_pattern()) { 1924 bool call_vm = UseHeavyMonitors; 1925 // If it isn't recursive we either must swap old header or call the runtime 1926 if (header != NULL || call_vm) { 1927 if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { 1928 // restore object for the slow case 1929 most_recent->set_obj(lockee); 1930 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception); 1931 } 1932 } 1933 } 1934 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1935 } 1936 most_recent++; 1937 } 1938 // Need to throw illegal monitor state exception 1939 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception); 1940 ShouldNotReachHere(); 1941 } 1942 1943 /* All of the non-quick opcodes. */ 1944 1945 /* -Set clobbersCpIndex true if the quickened opcode clobbers the 1946 * constant pool index in the instruction. 1947 */ 1948 CASE(_getfield): 1949 CASE(_getstatic): 1950 { 1951 u2 index; 1952 ConstantPoolCacheEntry* cache; 1953 index = Bytes::get_native_u2(pc+1); 1954 1955 // QQQ Need to make this as inlined as possible. Probably need to 1956 // split all the bytecode cases out so c++ compiler has a chance 1957 // for constant prop to fold everything possible away. 1958 1959 cache = cp->entry_at(index); 1960 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 1961 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 1962 handle_exception); 1963 cache = cp->entry_at(index); 1964 } 1965 1966 #ifdef VM_JVMTI 1967 if (_jvmti_interp_events) { 1968 int *count_addr; 1969 oop obj; 1970 // Check to see if a field modification watch has been set 1971 // before we take the time to call into the VM. 1972 count_addr = (int *)JvmtiExport::get_field_access_count_addr(); 1973 if ( *count_addr > 0 ) { 1974 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { 1975 obj = (oop)NULL; 1976 } else { 1977 obj = (oop) STACK_OBJECT(-1); 1978 VERIFY_OOP(obj); 1979 } 1980 CALL_VM(InterpreterRuntime::post_field_access(THREAD, 1981 obj, 1982 cache), 1983 handle_exception); 1984 } 1985 } 1986 #endif /* VM_JVMTI */ 1987 1988 oop obj; 1989 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { 1990 Klass* k = cache->f1_as_klass(); 1991 obj = k->java_mirror(); 1992 MORE_STACK(1); // Assume single slot push 1993 } else { 1994 obj = (oop) STACK_OBJECT(-1); 1995 CHECK_NULL(obj); 1996 } 1997 1998 // 1999 // Now store the result on the stack 2000 // 2001 TosState tos_type = cache->flag_state(); 2002 int field_offset = cache->f2_as_index(); 2003 if (cache->is_volatile()) { 2004 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2005 OrderAccess::fence(); 2006 } 2007 if (tos_type == atos) { 2008 VERIFY_OOP(obj->obj_field_acquire(field_offset)); 2009 SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1); 2010 } else if (tos_type == itos) { 2011 SET_STACK_INT(obj->int_field_acquire(field_offset), -1); 2012 } else if (tos_type == ltos) { 2013 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0); 2014 MORE_STACK(1); 2015 } else if (tos_type == btos || tos_type == ztos) { 2016 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1); 2017 } else if (tos_type == ctos) { 2018 SET_STACK_INT(obj->char_field_acquire(field_offset), -1); 2019 } else if (tos_type == stos) { 2020 SET_STACK_INT(obj->short_field_acquire(field_offset), -1); 2021 } else if (tos_type == ftos) { 2022 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1); 2023 } else { 2024 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0); 2025 MORE_STACK(1); 2026 } 2027 } else { 2028 if (tos_type == atos) { 2029 VERIFY_OOP(obj->obj_field(field_offset)); 2030 SET_STACK_OBJECT(obj->obj_field(field_offset), -1); 2031 } else if (tos_type == itos) { 2032 SET_STACK_INT(obj->int_field(field_offset), -1); 2033 } else if (tos_type == ltos) { 2034 SET_STACK_LONG(obj->long_field(field_offset), 0); 2035 MORE_STACK(1); 2036 } else if (tos_type == btos || tos_type == ztos) { 2037 SET_STACK_INT(obj->byte_field(field_offset), -1); 2038 } else if (tos_type == ctos) { 2039 SET_STACK_INT(obj->char_field(field_offset), -1); 2040 } else if (tos_type == stos) { 2041 SET_STACK_INT(obj->short_field(field_offset), -1); 2042 } else if (tos_type == ftos) { 2043 SET_STACK_FLOAT(obj->float_field(field_offset), -1); 2044 } else { 2045 SET_STACK_DOUBLE(obj->double_field(field_offset), 0); 2046 MORE_STACK(1); 2047 } 2048 } 2049 2050 UPDATE_PC_AND_CONTINUE(3); 2051 } 2052 2053 CASE(_putfield): 2054 CASE(_putstatic): 2055 { 2056 u2 index = Bytes::get_native_u2(pc+1); 2057 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2058 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2059 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2060 handle_exception); 2061 cache = cp->entry_at(index); 2062 } 2063 2064 #ifdef VM_JVMTI 2065 if (_jvmti_interp_events) { 2066 int *count_addr; 2067 oop obj; 2068 // Check to see if a field modification watch has been set 2069 // before we take the time to call into the VM. 2070 count_addr = (int *)JvmtiExport::get_field_modification_count_addr(); 2071 if ( *count_addr > 0 ) { 2072 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 2073 obj = (oop)NULL; 2074 } 2075 else { 2076 if (cache->is_long() || cache->is_double()) { 2077 obj = (oop) STACK_OBJECT(-3); 2078 } else { 2079 obj = (oop) STACK_OBJECT(-2); 2080 } 2081 VERIFY_OOP(obj); 2082 } 2083 2084 CALL_VM(InterpreterRuntime::post_field_modification(THREAD, 2085 obj, 2086 cache, 2087 (jvalue *)STACK_SLOT(-1)), 2088 handle_exception); 2089 } 2090 } 2091 #endif /* VM_JVMTI */ 2092 2093 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2094 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2095 2096 oop obj; 2097 int count; 2098 TosState tos_type = cache->flag_state(); 2099 2100 count = -1; 2101 if (tos_type == ltos || tos_type == dtos) { 2102 --count; 2103 } 2104 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 2105 Klass* k = cache->f1_as_klass(); 2106 obj = k->java_mirror(); 2107 } else { 2108 --count; 2109 obj = (oop) STACK_OBJECT(count); 2110 CHECK_NULL(obj); 2111 } 2112 2113 // 2114 // Now store the result 2115 // 2116 int field_offset = cache->f2_as_index(); 2117 if (cache->is_volatile()) { 2118 if (tos_type == itos) { 2119 obj->release_int_field_put(field_offset, STACK_INT(-1)); 2120 } else if (tos_type == atos) { 2121 VERIFY_OOP(STACK_OBJECT(-1)); 2122 obj->release_obj_field_put(field_offset, STACK_OBJECT(-1)); 2123 } else if (tos_type == btos) { 2124 obj->release_byte_field_put(field_offset, STACK_INT(-1)); 2125 } else if (tos_type == ztos) { 2126 int bool_field = STACK_INT(-1); // only store LSB 2127 obj->release_byte_field_put(field_offset, (bool_field & 1)); 2128 } else if (tos_type == ltos) { 2129 obj->release_long_field_put(field_offset, STACK_LONG(-1)); 2130 } else if (tos_type == ctos) { 2131 obj->release_char_field_put(field_offset, STACK_INT(-1)); 2132 } else if (tos_type == stos) { 2133 obj->release_short_field_put(field_offset, STACK_INT(-1)); 2134 } else if (tos_type == ftos) { 2135 obj->release_float_field_put(field_offset, STACK_FLOAT(-1)); 2136 } else { 2137 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1)); 2138 } 2139 OrderAccess::storeload(); 2140 } else { 2141 if (tos_type == itos) { 2142 obj->int_field_put(field_offset, STACK_INT(-1)); 2143 } else if (tos_type == atos) { 2144 VERIFY_OOP(STACK_OBJECT(-1)); 2145 obj->obj_field_put(field_offset, STACK_OBJECT(-1)); 2146 } else if (tos_type == btos) { 2147 obj->byte_field_put(field_offset, STACK_INT(-1)); 2148 } else if (tos_type == ztos) { 2149 int bool_field = STACK_INT(-1); // only store LSB 2150 obj->byte_field_put(field_offset, (bool_field & 1)); 2151 } else if (tos_type == ltos) { 2152 obj->long_field_put(field_offset, STACK_LONG(-1)); 2153 } else if (tos_type == ctos) { 2154 obj->char_field_put(field_offset, STACK_INT(-1)); 2155 } else if (tos_type == stos) { 2156 obj->short_field_put(field_offset, STACK_INT(-1)); 2157 } else if (tos_type == ftos) { 2158 obj->float_field_put(field_offset, STACK_FLOAT(-1)); 2159 } else { 2160 obj->double_field_put(field_offset, STACK_DOUBLE(-1)); 2161 } 2162 } 2163 2164 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count); 2165 } 2166 2167 CASE(_new): { 2168 u2 index = Bytes::get_Java_u2(pc+1); 2169 ConstantPool* constants = istate->method()->constants(); 2170 if (!constants->tag_at(index).is_unresolved_klass()) { 2171 // Make sure klass is initialized and doesn't have a finalizer 2172 Klass* entry = constants->slot_at(index).get_klass(); 2173 InstanceKlass* ik = InstanceKlass::cast(entry); 2174 if (ik->is_initialized() && ik->can_be_fastpath_allocated() ) { 2175 size_t obj_size = ik->size_helper(); 2176 oop result = NULL; 2177 // If the TLAB isn't pre-zeroed then we'll have to do it 2178 bool need_zero = !ZeroTLAB; 2179 if (UseTLAB) { 2180 result = (oop) THREAD->tlab().allocate(obj_size); 2181 } 2182 // Disable non-TLAB-based fast-path, because profiling requires that all 2183 // allocations go through InterpreterRuntime::_new() if THREAD->tlab().allocate 2184 // returns NULL. 2185 #ifndef CC_INTERP_PROFILE 2186 if (result == NULL) { 2187 need_zero = true; 2188 // Try allocate in shared eden 2189 retry: 2190 HeapWord* compare_to = *Universe::heap()->top_addr(); 2191 HeapWord* new_top = compare_to + obj_size; 2192 if (new_top <= *Universe::heap()->end_addr()) { 2193 if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) { 2194 goto retry; 2195 } 2196 result = (oop) compare_to; 2197 } 2198 } 2199 #endif 2200 if (result != NULL) { 2201 // Initialize object (if nonzero size and need) and then the header 2202 if (need_zero ) { 2203 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize; 2204 obj_size -= sizeof(oopDesc) / oopSize; 2205 if (obj_size > 0 ) { 2206 memset(to_zero, 0, obj_size * HeapWordSize); 2207 } 2208 } 2209 if (UseBiasedLocking) { 2210 result->set_mark(ik->prototype_header()); 2211 } else { 2212 result->set_mark(markOopDesc::prototype()); 2213 } 2214 result->set_klass_gap(0); 2215 result->set_klass(ik); 2216 // Must prevent reordering of stores for object initialization 2217 // with stores that publish the new object. 2218 OrderAccess::storestore(); 2219 SET_STACK_OBJECT(result, 0); 2220 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2221 } 2222 } 2223 } 2224 // Slow case allocation 2225 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index), 2226 handle_exception); 2227 // Must prevent reordering of stores for object initialization 2228 // with stores that publish the new object. 2229 OrderAccess::storestore(); 2230 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2231 THREAD->set_vm_result(NULL); 2232 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2233 } 2234 CASE(_anewarray): { 2235 u2 index = Bytes::get_Java_u2(pc+1); 2236 jint size = STACK_INT(-1); 2237 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size), 2238 handle_exception); 2239 // Must prevent reordering of stores for object initialization 2240 // with stores that publish the new object. 2241 OrderAccess::storestore(); 2242 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2243 THREAD->set_vm_result(NULL); 2244 UPDATE_PC_AND_CONTINUE(3); 2245 } 2246 CASE(_multianewarray): { 2247 jint dims = *(pc+3); 2248 jint size = STACK_INT(-1); 2249 // stack grows down, dimensions are up! 2250 jint *dimarray = 2251 (jint*)&topOfStack[dims * Interpreter::stackElementWords+ 2252 Interpreter::stackElementWords-1]; 2253 //adjust pointer to start of stack element 2254 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray), 2255 handle_exception); 2256 // Must prevent reordering of stores for object initialization 2257 // with stores that publish the new object. 2258 OrderAccess::storestore(); 2259 SET_STACK_OBJECT(THREAD->vm_result(), -dims); 2260 THREAD->set_vm_result(NULL); 2261 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1)); 2262 } 2263 CASE(_checkcast): 2264 if (STACK_OBJECT(-1) != NULL) { 2265 VERIFY_OOP(STACK_OBJECT(-1)); 2266 u2 index = Bytes::get_Java_u2(pc+1); 2267 // Constant pool may have actual klass or unresolved klass. If it is 2268 // unresolved we must resolve it. 2269 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2270 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2271 } 2272 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); 2273 Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx 2274 // 2275 // Check for compatibilty. This check must not GC!! 2276 // Seems way more expensive now that we must dispatch. 2277 // 2278 if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) { 2279 // Decrement counter at checkcast. 2280 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass); 2281 ResourceMark rm(THREAD); 2282 char* message = SharedRuntime::generate_class_cast_message( 2283 objKlass, klassOf); 2284 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message, note_classCheck_trap); 2285 } 2286 // Profile checkcast with null_seen and receiver. 2287 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, objKlass); 2288 } else { 2289 // Profile checkcast with null_seen and receiver. 2290 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL); 2291 } 2292 UPDATE_PC_AND_CONTINUE(3); 2293 2294 CASE(_instanceof): 2295 if (STACK_OBJECT(-1) == NULL) { 2296 SET_STACK_INT(0, -1); 2297 // Profile instanceof with null_seen and receiver. 2298 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/true, NULL); 2299 } else { 2300 VERIFY_OOP(STACK_OBJECT(-1)); 2301 u2 index = Bytes::get_Java_u2(pc+1); 2302 // Constant pool may have actual klass or unresolved klass. If it is 2303 // unresolved we must resolve it. 2304 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2305 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2306 } 2307 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); 2308 Klass* objKlass = STACK_OBJECT(-1)->klass(); 2309 // 2310 // Check for compatibilty. This check must not GC!! 2311 // Seems way more expensive now that we must dispatch. 2312 // 2313 if ( objKlass == klassOf || objKlass->is_subtype_of(klassOf)) { 2314 SET_STACK_INT(1, -1); 2315 } else { 2316 SET_STACK_INT(0, -1); 2317 // Decrement counter at checkcast. 2318 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass); 2319 } 2320 // Profile instanceof with null_seen and receiver. 2321 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/false, objKlass); 2322 } 2323 UPDATE_PC_AND_CONTINUE(3); 2324 2325 CASE(_ldc_w): 2326 CASE(_ldc): 2327 { 2328 u2 index; 2329 bool wide = false; 2330 int incr = 2; // frequent case 2331 if (opcode == Bytecodes::_ldc) { 2332 index = pc[1]; 2333 } else { 2334 index = Bytes::get_Java_u2(pc+1); 2335 incr = 3; 2336 wide = true; 2337 } 2338 2339 ConstantPool* constants = METHOD->constants(); 2340 switch (constants->tag_at(index).value()) { 2341 case JVM_CONSTANT_Integer: 2342 SET_STACK_INT(constants->int_at(index), 0); 2343 break; 2344 2345 case JVM_CONSTANT_Float: 2346 SET_STACK_FLOAT(constants->float_at(index), 0); 2347 break; 2348 2349 case JVM_CONSTANT_String: 2350 { 2351 oop result = constants->resolved_references()->obj_at(index); 2352 if (result == NULL) { 2353 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception); 2354 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2355 THREAD->set_vm_result(NULL); 2356 } else { 2357 VERIFY_OOP(result); 2358 SET_STACK_OBJECT(result, 0); 2359 } 2360 break; 2361 } 2362 2363 case JVM_CONSTANT_Class: 2364 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror()); 2365 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0); 2366 break; 2367 2368 case JVM_CONSTANT_UnresolvedClass: 2369 case JVM_CONSTANT_UnresolvedClassInError: 2370 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception); 2371 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2372 THREAD->set_vm_result(NULL); 2373 break; 2374 2375 default: ShouldNotReachHere(); 2376 } 2377 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2378 } 2379 2380 CASE(_ldc2_w): 2381 { 2382 u2 index = Bytes::get_Java_u2(pc+1); 2383 2384 ConstantPool* constants = METHOD->constants(); 2385 switch (constants->tag_at(index).value()) { 2386 2387 case JVM_CONSTANT_Long: 2388 SET_STACK_LONG(constants->long_at(index), 1); 2389 break; 2390 2391 case JVM_CONSTANT_Double: 2392 SET_STACK_DOUBLE(constants->double_at(index), 1); 2393 break; 2394 default: ShouldNotReachHere(); 2395 } 2396 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2); 2397 } 2398 2399 CASE(_fast_aldc_w): 2400 CASE(_fast_aldc): { 2401 u2 index; 2402 int incr; 2403 if (opcode == Bytecodes::_fast_aldc) { 2404 index = pc[1]; 2405 incr = 2; 2406 } else { 2407 index = Bytes::get_native_u2(pc+1); 2408 incr = 3; 2409 } 2410 2411 // We are resolved if the f1 field contains a non-null object (CallSite, etc.) 2412 // This kind of CP cache entry does not need to match the flags byte, because 2413 // there is a 1-1 relation between bytecode type and CP entry type. 2414 ConstantPool* constants = METHOD->constants(); 2415 oop result = constants->resolved_references()->obj_at(index); 2416 if (result == NULL) { 2417 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), 2418 handle_exception); 2419 result = THREAD->vm_result(); 2420 } 2421 2422 VERIFY_OOP(result); 2423 SET_STACK_OBJECT(result, 0); 2424 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2425 } 2426 2427 CASE(_invokedynamic): { 2428 2429 u4 index = Bytes::get_native_u4(pc+1); 2430 ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2431 2432 // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.) 2433 // This kind of CP cache entry does not need to match the flags byte, because 2434 // there is a 1-1 relation between bytecode type and CP entry type. 2435 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2436 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2437 handle_exception); 2438 cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2439 } 2440 2441 Method* method = cache->f1_as_method(); 2442 if (VerifyOops) method->verify(); 2443 2444 if (cache->has_appendix()) { 2445 ConstantPool* constants = METHOD->constants(); 2446 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2447 MORE_STACK(1); 2448 } 2449 2450 istate->set_msg(call_method); 2451 istate->set_callee(method); 2452 istate->set_callee_entry_point(method->from_interpreted_entry()); 2453 istate->set_bcp_advance(5); 2454 2455 // Invokedynamic has got a call counter, just like an invokestatic -> increment! 2456 BI_PROFILE_UPDATE_CALL(); 2457 2458 UPDATE_PC_AND_RETURN(0); // I'll be back... 2459 } 2460 2461 CASE(_invokehandle): { 2462 2463 u2 index = Bytes::get_native_u2(pc+1); 2464 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2465 2466 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2467 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2468 handle_exception); 2469 cache = cp->entry_at(index); 2470 } 2471 2472 Method* method = cache->f1_as_method(); 2473 if (VerifyOops) method->verify(); 2474 2475 if (cache->has_appendix()) { 2476 ConstantPool* constants = METHOD->constants(); 2477 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2478 MORE_STACK(1); 2479 } 2480 2481 istate->set_msg(call_method); 2482 istate->set_callee(method); 2483 istate->set_callee_entry_point(method->from_interpreted_entry()); 2484 istate->set_bcp_advance(3); 2485 2486 // Invokehandle has got a call counter, just like a final call -> increment! 2487 BI_PROFILE_UPDATE_FINALCALL(); 2488 2489 UPDATE_PC_AND_RETURN(0); // I'll be back... 2490 } 2491 2492 CASE(_invokeinterface): { 2493 u2 index = Bytes::get_native_u2(pc+1); 2494 2495 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2496 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2497 2498 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2499 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2500 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2501 handle_exception); 2502 cache = cp->entry_at(index); 2503 } 2504 2505 istate->set_msg(call_method); 2506 2507 // Special case of invokeinterface called for virtual method of 2508 // java.lang.Object. See cpCacheOop.cpp for details. 2509 // This code isn't produced by javac, but could be produced by 2510 // another compliant java compiler. 2511 if (cache->is_forced_virtual()) { 2512 Method* callee; 2513 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2514 if (cache->is_vfinal()) { 2515 callee = cache->f2_as_vfinal_method(); 2516 // Profile 'special case of invokeinterface' final call. 2517 BI_PROFILE_UPDATE_FINALCALL(); 2518 } else { 2519 // Get receiver. 2520 int parms = cache->parameter_size(); 2521 // Same comments as invokevirtual apply here. 2522 oop rcvr = STACK_OBJECT(-parms); 2523 VERIFY_OOP(rcvr); 2524 Klass* rcvrKlass = rcvr->klass(); 2525 callee = (Method*) rcvrKlass->method_at_vtable(cache->f2_as_index()); 2526 // Profile 'special case of invokeinterface' virtual call. 2527 BI_PROFILE_UPDATE_VIRTUALCALL(rcvrKlass); 2528 } 2529 istate->set_callee(callee); 2530 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2531 #ifdef VM_JVMTI 2532 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2533 istate->set_callee_entry_point(callee->interpreter_entry()); 2534 } 2535 #endif /* VM_JVMTI */ 2536 istate->set_bcp_advance(5); 2537 UPDATE_PC_AND_RETURN(0); // I'll be back... 2538 } 2539 2540 // this could definitely be cleaned up QQQ 2541 Method* callee; 2542 Klass* iclass = cache->f1_as_klass(); 2543 // InstanceKlass* interface = (InstanceKlass*) iclass; 2544 // get receiver 2545 int parms = cache->parameter_size(); 2546 oop rcvr = STACK_OBJECT(-parms); 2547 CHECK_NULL(rcvr); 2548 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass(); 2549 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable(); 2550 int i; 2551 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) { 2552 if (ki->interface_klass() == iclass) break; 2553 } 2554 // If the interface isn't found, this class doesn't implement this 2555 // interface. The link resolver checks this but only for the first 2556 // time this interface is called. 2557 if (i == int2->itable_length()) { 2558 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "", note_no_trap); 2559 } 2560 int mindex = cache->f2_as_index(); 2561 itableMethodEntry* im = ki->first_method_entry(rcvr->klass()); 2562 callee = im[mindex].method(); 2563 if (callee == NULL) { 2564 VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "", note_no_trap); 2565 } 2566 2567 // Profile virtual call. 2568 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass()); 2569 2570 istate->set_callee(callee); 2571 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2572 #ifdef VM_JVMTI 2573 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2574 istate->set_callee_entry_point(callee->interpreter_entry()); 2575 } 2576 #endif /* VM_JVMTI */ 2577 istate->set_bcp_advance(5); 2578 UPDATE_PC_AND_RETURN(0); // I'll be back... 2579 } 2580 2581 CASE(_invokevirtual): 2582 CASE(_invokespecial): 2583 CASE(_invokestatic): { 2584 u2 index = Bytes::get_native_u2(pc+1); 2585 2586 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2587 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2588 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2589 2590 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2591 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2592 handle_exception); 2593 cache = cp->entry_at(index); 2594 } 2595 2596 istate->set_msg(call_method); 2597 { 2598 Method* callee; 2599 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) { 2600 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2601 if (cache->is_vfinal()) { 2602 callee = cache->f2_as_vfinal_method(); 2603 // Profile final call. 2604 BI_PROFILE_UPDATE_FINALCALL(); 2605 } else { 2606 // get receiver 2607 int parms = cache->parameter_size(); 2608 // this works but needs a resourcemark and seems to create a vtable on every call: 2609 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index()); 2610 // 2611 // this fails with an assert 2612 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass()); 2613 // but this works 2614 oop rcvr = STACK_OBJECT(-parms); 2615 VERIFY_OOP(rcvr); 2616 Klass* rcvrKlass = rcvr->klass(); 2617 /* 2618 Executing this code in java.lang.String: 2619 public String(char value[]) { 2620 this.count = value.length; 2621 this.value = (char[])value.clone(); 2622 } 2623 2624 a find on rcvr->klass() reports: 2625 {type array char}{type array class} 2626 - klass: {other class} 2627 2628 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure 2629 because rcvr->klass()->is_instance_klass() == 0 2630 However it seems to have a vtable in the right location. Huh? 2631 Because vtables have the same offset for ArrayKlass and InstanceKlass. 2632 */ 2633 callee = (Method*) rcvrKlass->method_at_vtable(cache->f2_as_index()); 2634 // Profile virtual call. 2635 BI_PROFILE_UPDATE_VIRTUALCALL(rcvrKlass); 2636 } 2637 } else { 2638 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) { 2639 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2640 } 2641 callee = cache->f1_as_method(); 2642 2643 // Profile call. 2644 BI_PROFILE_UPDATE_CALL(); 2645 } 2646 2647 istate->set_callee(callee); 2648 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2649 #ifdef VM_JVMTI 2650 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2651 istate->set_callee_entry_point(callee->interpreter_entry()); 2652 } 2653 #endif /* VM_JVMTI */ 2654 istate->set_bcp_advance(3); 2655 UPDATE_PC_AND_RETURN(0); // I'll be back... 2656 } 2657 } 2658 2659 /* Allocate memory for a new java object. */ 2660 2661 CASE(_newarray): { 2662 BasicType atype = (BasicType) *(pc+1); 2663 jint size = STACK_INT(-1); 2664 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size), 2665 handle_exception); 2666 // Must prevent reordering of stores for object initialization 2667 // with stores that publish the new object. 2668 OrderAccess::storestore(); 2669 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2670 THREAD->set_vm_result(NULL); 2671 2672 UPDATE_PC_AND_CONTINUE(2); 2673 } 2674 2675 /* Throw an exception. */ 2676 2677 CASE(_athrow): { 2678 oop except_oop = STACK_OBJECT(-1); 2679 CHECK_NULL(except_oop); 2680 // set pending_exception so we use common code 2681 THREAD->set_pending_exception(except_oop, NULL, 0); 2682 goto handle_exception; 2683 } 2684 2685 /* goto and jsr. They are exactly the same except jsr pushes 2686 * the address of the next instruction first. 2687 */ 2688 2689 CASE(_jsr): { 2690 /* push bytecode index on stack */ 2691 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0); 2692 MORE_STACK(1); 2693 /* FALL THROUGH */ 2694 } 2695 2696 CASE(_goto): 2697 { 2698 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1); 2699 // Profile jump. 2700 BI_PROFILE_UPDATE_JUMP(); 2701 address branch_pc = pc; 2702 UPDATE_PC(offset); 2703 DO_BACKEDGE_CHECKS(offset, branch_pc); 2704 CONTINUE; 2705 } 2706 2707 CASE(_jsr_w): { 2708 /* push return address on the stack */ 2709 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0); 2710 MORE_STACK(1); 2711 /* FALL THROUGH */ 2712 } 2713 2714 CASE(_goto_w): 2715 { 2716 int32_t offset = Bytes::get_Java_u4(pc + 1); 2717 // Profile jump. 2718 BI_PROFILE_UPDATE_JUMP(); 2719 address branch_pc = pc; 2720 UPDATE_PC(offset); 2721 DO_BACKEDGE_CHECKS(offset, branch_pc); 2722 CONTINUE; 2723 } 2724 2725 /* return from a jsr or jsr_w */ 2726 2727 CASE(_ret): { 2728 // Profile ret. 2729 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(pc[1])))); 2730 // Now, update the pc. 2731 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1])); 2732 UPDATE_PC_AND_CONTINUE(0); 2733 } 2734 2735 /* debugger breakpoint */ 2736 2737 CASE(_breakpoint): { 2738 Bytecodes::Code original_bytecode; 2739 DECACHE_STATE(); 2740 SET_LAST_JAVA_FRAME(); 2741 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD, 2742 METHOD, pc); 2743 RESET_LAST_JAVA_FRAME(); 2744 CACHE_STATE(); 2745 if (THREAD->has_pending_exception()) goto handle_exception; 2746 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc), 2747 handle_exception); 2748 2749 opcode = (jubyte)original_bytecode; 2750 goto opcode_switch; 2751 } 2752 2753 DEFAULT: 2754 fatal("Unimplemented opcode %d = %s", opcode, 2755 Bytecodes::name((Bytecodes::Code)opcode)); 2756 goto finish; 2757 2758 } /* switch(opc) */ 2759 2760 2761 #ifdef USELABELS 2762 check_for_exception: 2763 #endif 2764 { 2765 if (!THREAD->has_pending_exception()) { 2766 CONTINUE; 2767 } 2768 /* We will be gcsafe soon, so flush our state. */ 2769 DECACHE_PC(); 2770 goto handle_exception; 2771 } 2772 do_continue: ; 2773 2774 } /* while (1) interpreter loop */ 2775 2776 2777 // An exception exists in the thread state see whether this activation can handle it 2778 handle_exception: { 2779 2780 HandleMarkCleaner __hmc(THREAD); 2781 Handle except_oop(THREAD, THREAD->pending_exception()); 2782 // Prevent any subsequent HandleMarkCleaner in the VM 2783 // from freeing the except_oop handle. 2784 HandleMark __hm(THREAD); 2785 2786 THREAD->clear_pending_exception(); 2787 assert(except_oop(), "No exception to process"); 2788 intptr_t continuation_bci; 2789 // expression stack is emptied 2790 topOfStack = istate->stack_base() - Interpreter::stackElementWords; 2791 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()), 2792 handle_exception); 2793 2794 except_oop = THREAD->vm_result(); 2795 THREAD->set_vm_result(NULL); 2796 if (continuation_bci >= 0) { 2797 // Place exception on top of stack 2798 SET_STACK_OBJECT(except_oop(), 0); 2799 MORE_STACK(1); 2800 pc = METHOD->code_base() + continuation_bci; 2801 if (log_is_enabled(Info, exceptions)) { 2802 ResourceMark rm(THREAD); 2803 stringStream tempst; 2804 tempst.print("interpreter method <%s>\n" 2805 " at bci %d, continuing at %d for thread " INTPTR_FORMAT, 2806 METHOD->print_value_string(), 2807 (int)(istate->bcp() - METHOD->code_base()), 2808 (int)continuation_bci, p2i(THREAD)); 2809 Exceptions::log_exception(except_oop, tempst); 2810 } 2811 // for AbortVMOnException flag 2812 Exceptions::debug_check_abort(except_oop); 2813 2814 // Update profiling data. 2815 BI_PROFILE_ALIGN_TO_CURRENT_BCI(); 2816 goto run; 2817 } 2818 if (log_is_enabled(Info, exceptions)) { 2819 ResourceMark rm; 2820 stringStream tempst; 2821 tempst.print("interpreter method <%s>\n" 2822 " at bci %d, unwinding for thread " INTPTR_FORMAT, 2823 METHOD->print_value_string(), 2824 (int)(istate->bcp() - METHOD->code_base()), 2825 p2i(THREAD)); 2826 Exceptions::log_exception(except_oop, tempst); 2827 } 2828 // for AbortVMOnException flag 2829 Exceptions::debug_check_abort(except_oop); 2830 2831 // No handler in this activation, unwind and try again 2832 THREAD->set_pending_exception(except_oop(), NULL, 0); 2833 goto handle_return; 2834 } // handle_exception: 2835 2836 // Return from an interpreter invocation with the result of the interpretation 2837 // on the top of the Java Stack (or a pending exception) 2838 2839 handle_Pop_Frame: { 2840 2841 // We don't really do anything special here except we must be aware 2842 // that we can get here without ever locking the method (if sync). 2843 // Also we skip the notification of the exit. 2844 2845 istate->set_msg(popping_frame); 2846 // Clear pending so while the pop is in process 2847 // we don't start another one if a call_vm is done. 2848 THREAD->clr_pop_frame_pending(); 2849 // Let interpreter (only) see the we're in the process of popping a frame 2850 THREAD->set_pop_frame_in_process(); 2851 2852 goto handle_return; 2853 2854 } // handle_Pop_Frame 2855 2856 // ForceEarlyReturn ends a method, and returns to the caller with a return value 2857 // given by the invoker of the early return. 2858 handle_Early_Return: { 2859 2860 istate->set_msg(early_return); 2861 2862 // Clear expression stack. 2863 topOfStack = istate->stack_base() - Interpreter::stackElementWords; 2864 2865 JvmtiThreadState *ts = THREAD->jvmti_thread_state(); 2866 2867 // Push the value to be returned. 2868 switch (istate->method()->result_type()) { 2869 case T_BOOLEAN: 2870 case T_SHORT: 2871 case T_BYTE: 2872 case T_CHAR: 2873 case T_INT: 2874 SET_STACK_INT(ts->earlyret_value().i, 0); 2875 MORE_STACK(1); 2876 break; 2877 case T_LONG: 2878 SET_STACK_LONG(ts->earlyret_value().j, 1); 2879 MORE_STACK(2); 2880 break; 2881 case T_FLOAT: 2882 SET_STACK_FLOAT(ts->earlyret_value().f, 0); 2883 MORE_STACK(1); 2884 break; 2885 case T_DOUBLE: 2886 SET_STACK_DOUBLE(ts->earlyret_value().d, 1); 2887 MORE_STACK(2); 2888 break; 2889 case T_ARRAY: 2890 case T_OBJECT: 2891 SET_STACK_OBJECT(ts->earlyret_oop(), 0); 2892 MORE_STACK(1); 2893 break; 2894 } 2895 2896 ts->clr_earlyret_value(); 2897 ts->set_earlyret_oop(NULL); 2898 ts->clr_earlyret_pending(); 2899 2900 // Fall through to handle_return. 2901 2902 } // handle_Early_Return 2903 2904 handle_return: { 2905 // A storestore barrier is required to order initialization of 2906 // final fields with publishing the reference to the object that 2907 // holds the field. Without the barrier the value of final fields 2908 // can be observed to change. 2909 OrderAccess::storestore(); 2910 2911 DECACHE_STATE(); 2912 2913 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return; 2914 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame; 2915 Handle original_exception(THREAD, THREAD->pending_exception()); 2916 Handle illegal_state_oop(THREAD, NULL); 2917 2918 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner 2919 // in any following VM entries from freeing our live handles, but illegal_state_oop 2920 // isn't really allocated yet and so doesn't become live until later and 2921 // in unpredicatable places. Instead we must protect the places where we enter the 2922 // VM. It would be much simpler (and safer) if we could allocate a real handle with 2923 // a NULL oop in it and then overwrite the oop later as needed. This isn't 2924 // unfortunately isn't possible. 2925 2926 THREAD->clear_pending_exception(); 2927 2928 // 2929 // As far as we are concerned we have returned. If we have a pending exception 2930 // that will be returned as this invocation's result. However if we get any 2931 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions 2932 // will be our final result (i.e. monitor exception trumps a pending exception). 2933 // 2934 2935 // If we never locked the method (or really passed the point where we would have), 2936 // there is no need to unlock it (or look for other monitors), since that 2937 // could not have happened. 2938 2939 if (THREAD->do_not_unlock()) { 2940 2941 // Never locked, reset the flag now because obviously any caller must 2942 // have passed their point of locking for us to have gotten here. 2943 2944 THREAD->clr_do_not_unlock(); 2945 } else { 2946 // At this point we consider that we have returned. We now check that the 2947 // locks were properly block structured. If we find that they were not 2948 // used properly we will return with an illegal monitor exception. 2949 // The exception is checked by the caller not the callee since this 2950 // checking is considered to be part of the invocation and therefore 2951 // in the callers scope (JVM spec 8.13). 2952 // 2953 // Another weird thing to watch for is if the method was locked 2954 // recursively and then not exited properly. This means we must 2955 // examine all the entries in reverse time(and stack) order and 2956 // unlock as we find them. If we find the method monitor before 2957 // we are at the initial entry then we should throw an exception. 2958 // It is not clear the template based interpreter does this 2959 // correctly 2960 2961 BasicObjectLock* base = istate->monitor_base(); 2962 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base(); 2963 bool method_unlock_needed = METHOD->is_synchronized(); 2964 // We know the initial monitor was used for the method don't check that 2965 // slot in the loop 2966 if (method_unlock_needed) base--; 2967 2968 // Check all the monitors to see they are unlocked. Install exception if found to be locked. 2969 while (end < base) { 2970 oop lockee = end->obj(); 2971 if (lockee != NULL) { 2972 BasicLock* lock = end->lock(); 2973 markOop header = lock->displaced_header(); 2974 end->set_obj(NULL); 2975 2976 if (!lockee->mark()->has_bias_pattern()) { 2977 // If it isn't recursive we either must swap old header or call the runtime 2978 if (header != NULL) { 2979 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { 2980 // restore object for the slow case 2981 end->set_obj(lockee); 2982 { 2983 // Prevent any HandleMarkCleaner from freeing our live handles 2984 HandleMark __hm(THREAD); 2985 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end)); 2986 } 2987 } 2988 } 2989 } 2990 // One error is plenty 2991 if (illegal_state_oop() == NULL && !suppress_error) { 2992 { 2993 // Prevent any HandleMarkCleaner from freeing our live handles 2994 HandleMark __hm(THREAD); 2995 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 2996 } 2997 assert(THREAD->has_pending_exception(), "Lost our exception!"); 2998 illegal_state_oop = THREAD->pending_exception(); 2999 THREAD->clear_pending_exception(); 3000 } 3001 } 3002 end++; 3003 } 3004 // Unlock the method if needed 3005 if (method_unlock_needed) { 3006 if (base->obj() == NULL) { 3007 // The method is already unlocked this is not good. 3008 if (illegal_state_oop() == NULL && !suppress_error) { 3009 { 3010 // Prevent any HandleMarkCleaner from freeing our live handles 3011 HandleMark __hm(THREAD); 3012 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 3013 } 3014 assert(THREAD->has_pending_exception(), "Lost our exception!"); 3015 illegal_state_oop = THREAD->pending_exception(); 3016 THREAD->clear_pending_exception(); 3017 } 3018 } else { 3019 // 3020 // The initial monitor is always used for the method 3021 // However if that slot is no longer the oop for the method it was unlocked 3022 // and reused by something that wasn't unlocked! 3023 // 3024 // deopt can come in with rcvr dead because c2 knows 3025 // its value is preserved in the monitor. So we can't use locals[0] at all 3026 // and must use first monitor slot. 3027 // 3028 oop rcvr = base->obj(); 3029 if (rcvr == NULL) { 3030 if (!suppress_error) { 3031 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap); 3032 illegal_state_oop = THREAD->pending_exception(); 3033 THREAD->clear_pending_exception(); 3034 } 3035 } else if (UseHeavyMonitors) { 3036 { 3037 // Prevent any HandleMarkCleaner from freeing our live handles. 3038 HandleMark __hm(THREAD); 3039 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 3040 } 3041 if (THREAD->has_pending_exception()) { 3042 if (!suppress_error) illegal_state_oop = THREAD->pending_exception(); 3043 THREAD->clear_pending_exception(); 3044 } 3045 } else { 3046 BasicLock* lock = base->lock(); 3047 markOop header = lock->displaced_header(); 3048 base->set_obj(NULL); 3049 3050 if (!rcvr->mark()->has_bias_pattern()) { 3051 base->set_obj(NULL); 3052 // If it isn't recursive we either must swap old header or call the runtime 3053 if (header != NULL) { 3054 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) { 3055 // restore object for the slow case 3056 base->set_obj(rcvr); 3057 { 3058 // Prevent any HandleMarkCleaner from freeing our live handles 3059 HandleMark __hm(THREAD); 3060 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 3061 } 3062 if (THREAD->has_pending_exception()) { 3063 if (!suppress_error) illegal_state_oop = THREAD->pending_exception(); 3064 THREAD->clear_pending_exception(); 3065 } 3066 } 3067 } 3068 } 3069 } 3070 } 3071 } 3072 } 3073 // Clear the do_not_unlock flag now. 3074 THREAD->clr_do_not_unlock(); 3075 3076 // 3077 // Notify jvmti/jvmdi 3078 // 3079 // NOTE: we do not notify a method_exit if we have a pending exception, 3080 // including an exception we generate for unlocking checks. In the former 3081 // case, JVMDI has already been notified by our call for the exception handler 3082 // and in both cases as far as JVMDI is concerned we have already returned. 3083 // If we notify it again JVMDI will be all confused about how many frames 3084 // are still on the stack (4340444). 3085 // 3086 // NOTE Further! It turns out the the JVMTI spec in fact expects to see 3087 // method_exit events whenever we leave an activation unless it was done 3088 // for popframe. This is nothing like jvmdi. However we are passing the 3089 // tests at the moment (apparently because they are jvmdi based) so rather 3090 // than change this code and possibly fail tests we will leave it alone 3091 // (with this note) in anticipation of changing the vm and the tests 3092 // simultaneously. 3093 3094 3095 // 3096 suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL; 3097 3098 3099 3100 #ifdef VM_JVMTI 3101 if (_jvmti_interp_events) { 3102 // Whenever JVMTI puts a thread in interp_only_mode, method 3103 // entry/exit events are sent for that thread to track stack depth. 3104 if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) { 3105 { 3106 // Prevent any HandleMarkCleaner from freeing our live handles 3107 HandleMark __hm(THREAD); 3108 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD)); 3109 } 3110 } 3111 } 3112 #endif /* VM_JVMTI */ 3113 3114 // 3115 // See if we are returning any exception 3116 // A pending exception that was pending prior to a possible popping frame 3117 // overrides the popping frame. 3118 // 3119 assert(!suppress_error || (suppress_error && illegal_state_oop() == NULL), "Error was not suppressed"); 3120 if (illegal_state_oop() != NULL || original_exception() != NULL) { 3121 // Inform the frame manager we have no result. 3122 istate->set_msg(throwing_exception); 3123 if (illegal_state_oop() != NULL) 3124 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0); 3125 else 3126 THREAD->set_pending_exception(original_exception(), NULL, 0); 3127 UPDATE_PC_AND_RETURN(0); 3128 } 3129 3130 if (istate->msg() == popping_frame) { 3131 // Make it simpler on the assembly code and set the message for the frame pop. 3132 // returns 3133 if (istate->prev() == NULL) { 3134 // We must be returning to a deoptimized frame (because popframe only happens between 3135 // two interpreted frames). We need to save the current arguments in C heap so that 3136 // the deoptimized frame when it restarts can copy the arguments to its expression 3137 // stack and re-execute the call. We also have to notify deoptimization that this 3138 // has occurred and to pick the preserved args copy them to the deoptimized frame's 3139 // java expression stack. Yuck. 3140 // 3141 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize), 3142 LOCALS_SLOT(METHOD->size_of_parameters() - 1)); 3143 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit); 3144 } 3145 } else { 3146 istate->set_msg(return_from_method); 3147 } 3148 3149 // Normal return 3150 // Advance the pc and return to frame manager 3151 UPDATE_PC_AND_RETURN(1); 3152 } /* handle_return: */ 3153 3154 // This is really a fatal error return 3155 3156 finish: 3157 DECACHE_TOS(); 3158 DECACHE_PC(); 3159 3160 return; 3161 } 3162 3163 /* 3164 * All the code following this point is only produced once and is not present 3165 * in the JVMTI version of the interpreter 3166 */ 3167 3168 #ifndef VM_JVMTI 3169 3170 // This constructor should only be used to contruct the object to signal 3171 // interpreter initialization. All other instances should be created by 3172 // the frame manager. 3173 BytecodeInterpreter::BytecodeInterpreter(messages msg) { 3174 if (msg != initialize) ShouldNotReachHere(); 3175 _msg = msg; 3176 _self_link = this; 3177 _prev_link = NULL; 3178 } 3179 3180 // Inline static functions for Java Stack and Local manipulation 3181 3182 // The implementations are platform dependent. We have to worry about alignment 3183 // issues on some machines which can change on the same platform depending on 3184 // whether it is an LP64 machine also. 3185 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) { 3186 return (address) tos[Interpreter::expr_index_at(-offset)]; 3187 } 3188 3189 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) { 3190 return *((jint*) &tos[Interpreter::expr_index_at(-offset)]); 3191 } 3192 3193 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) { 3194 return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]); 3195 } 3196 3197 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) { 3198 return cast_to_oop(tos [Interpreter::expr_index_at(-offset)]); 3199 } 3200 3201 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) { 3202 return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d; 3203 } 3204 3205 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) { 3206 return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l; 3207 } 3208 3209 // only used for value types 3210 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value, 3211 int offset) { 3212 *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3213 } 3214 3215 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value, 3216 int offset) { 3217 *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3218 } 3219 3220 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value, 3221 int offset) { 3222 *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3223 } 3224 3225 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value, 3226 int offset) { 3227 *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3228 } 3229 3230 // needs to be platform dep for the 32 bit platforms. 3231 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value, 3232 int offset) { 3233 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value; 3234 } 3235 3236 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos, 3237 address addr, int offset) { 3238 (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = 3239 ((VMJavaVal64*)addr)->d); 3240 } 3241 3242 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value, 3243 int offset) { 3244 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3245 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value; 3246 } 3247 3248 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos, 3249 address addr, int offset) { 3250 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3251 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = 3252 ((VMJavaVal64*)addr)->l; 3253 } 3254 3255 // Locals 3256 3257 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) { 3258 return (address)locals[Interpreter::local_index_at(-offset)]; 3259 } 3260 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) { 3261 return (jint)locals[Interpreter::local_index_at(-offset)]; 3262 } 3263 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) { 3264 return (jfloat)locals[Interpreter::local_index_at(-offset)]; 3265 } 3266 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) { 3267 return cast_to_oop(locals[Interpreter::local_index_at(-offset)]); 3268 } 3269 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) { 3270 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d; 3271 } 3272 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) { 3273 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l; 3274 } 3275 3276 // Returns the address of locals value. 3277 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) { 3278 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3279 } 3280 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) { 3281 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3282 } 3283 3284 // Used for local value or returnAddress 3285 void BytecodeInterpreter::set_locals_slot(intptr_t *locals, 3286 address value, int offset) { 3287 *((address*)&locals[Interpreter::local_index_at(-offset)]) = value; 3288 } 3289 void BytecodeInterpreter::set_locals_int(intptr_t *locals, 3290 jint value, int offset) { 3291 *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value; 3292 } 3293 void BytecodeInterpreter::set_locals_float(intptr_t *locals, 3294 jfloat value, int offset) { 3295 *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value; 3296 } 3297 void BytecodeInterpreter::set_locals_object(intptr_t *locals, 3298 oop value, int offset) { 3299 *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value; 3300 } 3301 void BytecodeInterpreter::set_locals_double(intptr_t *locals, 3302 jdouble value, int offset) { 3303 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value; 3304 } 3305 void BytecodeInterpreter::set_locals_long(intptr_t *locals, 3306 jlong value, int offset) { 3307 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value; 3308 } 3309 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals, 3310 address addr, int offset) { 3311 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d; 3312 } 3313 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals, 3314 address addr, int offset) { 3315 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l; 3316 } 3317 3318 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset, 3319 intptr_t* locals, int locals_offset) { 3320 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)]; 3321 locals[Interpreter::local_index_at(-locals_offset)] = value; 3322 } 3323 3324 3325 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset, 3326 int to_offset) { 3327 tos[Interpreter::expr_index_at(-to_offset)] = 3328 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)]; 3329 } 3330 3331 void BytecodeInterpreter::dup(intptr_t *tos) { 3332 copy_stack_slot(tos, -1, 0); 3333 } 3334 void BytecodeInterpreter::dup2(intptr_t *tos) { 3335 copy_stack_slot(tos, -2, 0); 3336 copy_stack_slot(tos, -1, 1); 3337 } 3338 3339 void BytecodeInterpreter::dup_x1(intptr_t *tos) { 3340 /* insert top word two down */ 3341 copy_stack_slot(tos, -1, 0); 3342 copy_stack_slot(tos, -2, -1); 3343 copy_stack_slot(tos, 0, -2); 3344 } 3345 3346 void BytecodeInterpreter::dup_x2(intptr_t *tos) { 3347 /* insert top word three down */ 3348 copy_stack_slot(tos, -1, 0); 3349 copy_stack_slot(tos, -2, -1); 3350 copy_stack_slot(tos, -3, -2); 3351 copy_stack_slot(tos, 0, -3); 3352 } 3353 void BytecodeInterpreter::dup2_x1(intptr_t *tos) { 3354 /* insert top 2 slots three down */ 3355 copy_stack_slot(tos, -1, 1); 3356 copy_stack_slot(tos, -2, 0); 3357 copy_stack_slot(tos, -3, -1); 3358 copy_stack_slot(tos, 1, -2); 3359 copy_stack_slot(tos, 0, -3); 3360 } 3361 void BytecodeInterpreter::dup2_x2(intptr_t *tos) { 3362 /* insert top 2 slots four down */ 3363 copy_stack_slot(tos, -1, 1); 3364 copy_stack_slot(tos, -2, 0); 3365 copy_stack_slot(tos, -3, -1); 3366 copy_stack_slot(tos, -4, -2); 3367 copy_stack_slot(tos, 1, -3); 3368 copy_stack_slot(tos, 0, -4); 3369 } 3370 3371 3372 void BytecodeInterpreter::swap(intptr_t *tos) { 3373 // swap top two elements 3374 intptr_t val = tos[Interpreter::expr_index_at(1)]; 3375 // Copy -2 entry to -1 3376 copy_stack_slot(tos, -2, -1); 3377 // Store saved -1 entry into -2 3378 tos[Interpreter::expr_index_at(2)] = val; 3379 } 3380 // -------------------------------------------------------------------------------- 3381 // Non-product code 3382 #ifndef PRODUCT 3383 3384 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) { 3385 switch (msg) { 3386 case BytecodeInterpreter::no_request: return("no_request"); 3387 case BytecodeInterpreter::initialize: return("initialize"); 3388 // status message to C++ interpreter 3389 case BytecodeInterpreter::method_entry: return("method_entry"); 3390 case BytecodeInterpreter::method_resume: return("method_resume"); 3391 case BytecodeInterpreter::got_monitors: return("got_monitors"); 3392 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception"); 3393 // requests to frame manager from C++ interpreter 3394 case BytecodeInterpreter::call_method: return("call_method"); 3395 case BytecodeInterpreter::return_from_method: return("return_from_method"); 3396 case BytecodeInterpreter::more_monitors: return("more_monitors"); 3397 case BytecodeInterpreter::throwing_exception: return("throwing_exception"); 3398 case BytecodeInterpreter::popping_frame: return("popping_frame"); 3399 case BytecodeInterpreter::do_osr: return("do_osr"); 3400 // deopt 3401 case BytecodeInterpreter::deopt_resume: return("deopt_resume"); 3402 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2"); 3403 default: return("BAD MSG"); 3404 } 3405 } 3406 void 3407 BytecodeInterpreter::print() { 3408 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread); 3409 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp); 3410 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals); 3411 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants); 3412 { 3413 ResourceMark rm; 3414 char *method_name = _method->name_and_sig_as_C_string(); 3415 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name); 3416 } 3417 tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx); 3418 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack); 3419 tty->print_cr("msg: %s", C_msg(this->_msg)); 3420 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee); 3421 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point); 3422 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance); 3423 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf); 3424 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry); 3425 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link); 3426 tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) p2i(this->_oop_temp)); 3427 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base); 3428 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit); 3429 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base); 3430 #ifdef SPARC 3431 tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc); 3432 tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom); 3433 tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult); 3434 tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult); 3435 #endif 3436 #if !defined(ZERO) && defined(PPC) 3437 tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp); 3438 #endif // !ZERO 3439 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link); 3440 } 3441 3442 extern "C" { 3443 void PI(uintptr_t arg) { 3444 ((BytecodeInterpreter*)arg)->print(); 3445 } 3446 } 3447 #endif // PRODUCT 3448 3449 #endif // JVMTI 3450 #endif // CC_INTERP