1 /* 2 * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "classfile/vmSymbols.hpp" 27 #include "gc/shared/collectedHeap.hpp" 28 #include "interpreter/bytecodeHistogram.hpp" 29 #include "interpreter/bytecodeInterpreter.hpp" 30 #include "interpreter/bytecodeInterpreter.inline.hpp" 31 #include "interpreter/bytecodeInterpreterProfiling.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "interpreter/interpreterRuntime.hpp" 34 #include "logging/log.hpp" 35 #include "memory/resourceArea.hpp" 36 #include "oops/methodCounters.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "oops/objArrayOop.inline.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "prims/jvmtiExport.hpp" 41 #include "prims/jvmtiThreadState.hpp" 42 #include "runtime/atomic.hpp" 43 #include "runtime/biasedLocking.hpp" 44 #include "runtime/frame.inline.hpp" 45 #include "runtime/handles.inline.hpp" 46 #include "runtime/interfaceSupport.hpp" 47 #include "runtime/orderAccess.inline.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 #include "runtime/threadCritical.hpp" 50 #include "utilities/exceptions.hpp" 51 52 // no precompiled headers 53 #ifdef CC_INTERP 54 55 /* 56 * USELABELS - If using GCC, then use labels for the opcode dispatching 57 * rather -then a switch statement. This improves performance because it 58 * gives us the opportunity to have the instructions that calculate the 59 * next opcode to jump to be intermixed with the rest of the instructions 60 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro). 61 */ 62 #undef USELABELS 63 #ifdef __GNUC__ 64 /* 65 ASSERT signifies debugging. It is much easier to step thru bytecodes if we 66 don't use the computed goto approach. 67 */ 68 #ifndef ASSERT 69 #define USELABELS 70 #endif 71 #endif 72 73 #undef CASE 74 #ifdef USELABELS 75 #define CASE(opcode) opc ## opcode 76 #define DEFAULT opc_default 77 #else 78 #define CASE(opcode) case Bytecodes:: opcode 79 #define DEFAULT default 80 #endif 81 82 /* 83 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next 84 * opcode before going back to the top of the while loop, rather then having 85 * the top of the while loop handle it. This provides a better opportunity 86 * for instruction scheduling. Some compilers just do this prefetch 87 * automatically. Some actually end up with worse performance if you 88 * force the prefetch. Solaris gcc seems to do better, but cc does worse. 89 */ 90 #undef PREFETCH_OPCCODE 91 #define PREFETCH_OPCCODE 92 93 /* 94 Interpreter safepoint: it is expected that the interpreter will have no live 95 handles of its own creation live at an interpreter safepoint. Therefore we 96 run a HandleMarkCleaner and trash all handles allocated in the call chain 97 since the JavaCalls::call_helper invocation that initiated the chain. 98 There really shouldn't be any handles remaining to trash but this is cheap 99 in relation to a safepoint. 100 */ 101 #define SAFEPOINT \ 102 if ( SafepointSynchronize::is_synchronizing()) { \ 103 { \ 104 /* zap freed handles rather than GC'ing them */ \ 105 HandleMarkCleaner __hmc(THREAD); \ 106 } \ 107 CALL_VM(SafepointSynchronize::block(THREAD), handle_exception); \ 108 } 109 110 /* 111 * VM_JAVA_ERROR - Macro for throwing a java exception from 112 * the interpreter loop. Should really be a CALL_VM but there 113 * is no entry point to do the transition to vm so we just 114 * do it by hand here. 115 */ 116 #define VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \ 117 DECACHE_STATE(); \ 118 SET_LAST_JAVA_FRAME(); \ 119 { \ 120 InterpreterRuntime::note_a_trap(THREAD, istate->method(), BCI()); \ 121 ThreadInVMfromJava trans(THREAD); \ 122 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \ 123 } \ 124 RESET_LAST_JAVA_FRAME(); \ 125 CACHE_STATE(); 126 127 // Normal throw of a java error. 128 #define VM_JAVA_ERROR(name, msg, note_a_trap) \ 129 VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \ 130 goto handle_exception; 131 132 #ifdef PRODUCT 133 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) 134 #else 135 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \ 136 { \ 137 BytecodeCounter::_counter_value++; \ 138 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \ 139 if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \ 140 if (TraceBytecodes) { \ 141 CALL_VM((void)InterpreterRuntime::trace_bytecode(THREAD, 0, \ 142 topOfStack[Interpreter::expr_index_at(1)], \ 143 topOfStack[Interpreter::expr_index_at(2)]), \ 144 handle_exception); \ 145 } \ 146 } 147 #endif 148 149 #undef DEBUGGER_SINGLE_STEP_NOTIFY 150 #ifdef VM_JVMTI 151 /* NOTE: (kbr) This macro must be called AFTER the PC has been 152 incremented. JvmtiExport::at_single_stepping_point() may cause a 153 breakpoint opcode to get inserted at the current PC to allow the 154 debugger to coalesce single-step events. 155 156 As a result if we call at_single_stepping_point() we refetch opcode 157 to get the current opcode. This will override any other prefetching 158 that might have occurred. 159 */ 160 #define DEBUGGER_SINGLE_STEP_NOTIFY() \ 161 { \ 162 if (_jvmti_interp_events) { \ 163 if (JvmtiExport::should_post_single_step()) { \ 164 DECACHE_STATE(); \ 165 SET_LAST_JAVA_FRAME(); \ 166 ThreadInVMfromJava trans(THREAD); \ 167 JvmtiExport::at_single_stepping_point(THREAD, \ 168 istate->method(), \ 169 pc); \ 170 RESET_LAST_JAVA_FRAME(); \ 171 CACHE_STATE(); \ 172 if (THREAD->pop_frame_pending() && \ 173 !THREAD->pop_frame_in_process()) { \ 174 goto handle_Pop_Frame; \ 175 } \ 176 if (THREAD->jvmti_thread_state() && \ 177 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ 178 goto handle_Early_Return; \ 179 } \ 180 opcode = *pc; \ 181 } \ 182 } \ 183 } 184 #else 185 #define DEBUGGER_SINGLE_STEP_NOTIFY() 186 #endif 187 188 /* 189 * CONTINUE - Macro for executing the next opcode. 190 */ 191 #undef CONTINUE 192 #ifdef USELABELS 193 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an 194 // initialization (which is is the initialization of the table pointer...) 195 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode] 196 #define CONTINUE { \ 197 opcode = *pc; \ 198 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 199 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 200 DISPATCH(opcode); \ 201 } 202 #else 203 #ifdef PREFETCH_OPCCODE 204 #define CONTINUE { \ 205 opcode = *pc; \ 206 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 207 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 208 continue; \ 209 } 210 #else 211 #define CONTINUE { \ 212 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 213 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 214 continue; \ 215 } 216 #endif 217 #endif 218 219 220 #define UPDATE_PC(opsize) {pc += opsize; } 221 /* 222 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack. 223 */ 224 #undef UPDATE_PC_AND_TOS 225 #define UPDATE_PC_AND_TOS(opsize, stack) \ 226 {pc += opsize; MORE_STACK(stack); } 227 228 /* 229 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack, 230 * and executing the next opcode. It's somewhat similar to the combination 231 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations. 232 */ 233 #undef UPDATE_PC_AND_TOS_AND_CONTINUE 234 #ifdef USELABELS 235 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 236 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 237 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 238 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 239 DISPATCH(opcode); \ 240 } 241 242 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 243 pc += opsize; opcode = *pc; \ 244 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 245 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 246 DISPATCH(opcode); \ 247 } 248 #else 249 #ifdef PREFETCH_OPCCODE 250 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 251 pc += opsize; opcode = *pc; MORE_STACK(stack); \ 252 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 253 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 254 goto do_continue; \ 255 } 256 257 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 258 pc += opsize; opcode = *pc; \ 259 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 260 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 261 goto do_continue; \ 262 } 263 #else 264 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ 265 pc += opsize; MORE_STACK(stack); \ 266 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 267 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 268 goto do_continue; \ 269 } 270 271 #define UPDATE_PC_AND_CONTINUE(opsize) { \ 272 pc += opsize; \ 273 DO_UPDATE_INSTRUCTION_COUNT(opcode); \ 274 DEBUGGER_SINGLE_STEP_NOTIFY(); \ 275 goto do_continue; \ 276 } 277 #endif /* PREFETCH_OPCCODE */ 278 #endif /* USELABELS */ 279 280 // About to call a new method, update the save the adjusted pc and return to frame manager 281 #define UPDATE_PC_AND_RETURN(opsize) \ 282 DECACHE_TOS(); \ 283 istate->set_bcp(pc+opsize); \ 284 return; 285 286 287 #define METHOD istate->method() 288 #define GET_METHOD_COUNTERS(res) \ 289 res = METHOD->method_counters(); \ 290 if (res == NULL) { \ 291 CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \ 292 } 293 294 #define OSR_REQUEST(res, branch_pc) \ 295 CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception); 296 /* 297 * For those opcodes that need to have a GC point on a backwards branch 298 */ 299 300 // Backedge counting is kind of strange. The asm interpreter will increment 301 // the backedge counter as a separate counter but it does it's comparisons 302 // to the sum (scaled) of invocation counter and backedge count to make 303 // a decision. Seems kind of odd to sum them together like that 304 305 // skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp 306 307 308 #define DO_BACKEDGE_CHECKS(skip, branch_pc) \ 309 if ((skip) <= 0) { \ 310 MethodCounters* mcs; \ 311 GET_METHOD_COUNTERS(mcs); \ 312 if (UseLoopCounter) { \ 313 bool do_OSR = UseOnStackReplacement; \ 314 mcs->backedge_counter()->increment(); \ 315 if (ProfileInterpreter) { \ 316 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); \ 317 /* Check for overflow against MDO count. */ \ 318 do_OSR = do_OSR \ 319 && (mdo_last_branch_taken_count >= (uint)InvocationCounter::InterpreterBackwardBranchLimit)\ 320 /* When ProfileInterpreter is on, the backedge_count comes */ \ 321 /* from the methodDataOop, which value does not get reset on */ \ 322 /* the call to frequency_counter_overflow(). To avoid */ \ 323 /* excessive calls to the overflow routine while the method is */ \ 324 /* being compiled, add a second test to make sure the overflow */ \ 325 /* function is called only once every overflow_frequency. */ \ 326 && (!(mdo_last_branch_taken_count & 1023)); \ 327 } else { \ 328 /* check for overflow of backedge counter */ \ 329 do_OSR = do_OSR \ 330 && mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter()); \ 331 } \ 332 if (do_OSR) { \ 333 nmethod* osr_nmethod; \ 334 OSR_REQUEST(osr_nmethod, branch_pc); \ 335 if (osr_nmethod != NULL && osr_nmethod->is_in_use()) { \ 336 intptr_t* buf; \ 337 /* Call OSR migration with last java frame only, no checks. */ \ 338 CALL_VM_NAKED_LJF(buf=SharedRuntime::OSR_migration_begin(THREAD)); \ 339 istate->set_msg(do_osr); \ 340 istate->set_osr_buf((address)buf); \ 341 istate->set_osr_entry(osr_nmethod->osr_entry()); \ 342 return; \ 343 } \ 344 } \ 345 } /* UseCompiler ... */ \ 346 SAFEPOINT; \ 347 } 348 349 /* 350 * For those opcodes that need to have a GC point on a backwards branch 351 */ 352 353 /* 354 * Macros for caching and flushing the interpreter state. Some local 355 * variables need to be flushed out to the frame before we do certain 356 * things (like pushing frames or becomming gc safe) and some need to 357 * be recached later (like after popping a frame). We could use one 358 * macro to cache or decache everything, but this would be less then 359 * optimal because we don't always need to cache or decache everything 360 * because some things we know are already cached or decached. 361 */ 362 #undef DECACHE_TOS 363 #undef CACHE_TOS 364 #undef CACHE_PREV_TOS 365 #define DECACHE_TOS() istate->set_stack(topOfStack); 366 367 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack(); 368 369 #undef DECACHE_PC 370 #undef CACHE_PC 371 #define DECACHE_PC() istate->set_bcp(pc); 372 #define CACHE_PC() pc = istate->bcp(); 373 #define CACHE_CP() cp = istate->constants(); 374 #define CACHE_LOCALS() locals = istate->locals(); 375 #undef CACHE_FRAME 376 #define CACHE_FRAME() 377 378 // BCI() returns the current bytecode-index. 379 #undef BCI 380 #define BCI() ((int)(intptr_t)(pc - (intptr_t)istate->method()->code_base())) 381 382 /* 383 * CHECK_NULL - Macro for throwing a NullPointerException if the object 384 * passed is a null ref. 385 * On some architectures/platforms it should be possible to do this implicitly 386 */ 387 #undef CHECK_NULL 388 #define CHECK_NULL(obj_) \ 389 if ((obj_) == NULL) { \ 390 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), NULL, note_nullCheck_trap); \ 391 } \ 392 VERIFY_OOP(obj_) 393 394 #define VMdoubleConstZero() 0.0 395 #define VMdoubleConstOne() 1.0 396 #define VMlongConstZero() (max_jlong-max_jlong) 397 #define VMlongConstOne() ((max_jlong-max_jlong)+1) 398 399 /* 400 * Alignment 401 */ 402 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3) 403 404 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod) 405 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS(); 406 407 // Reload interpreter state after calling the VM or a possible GC 408 #define CACHE_STATE() \ 409 CACHE_TOS(); \ 410 CACHE_PC(); \ 411 CACHE_CP(); \ 412 CACHE_LOCALS(); 413 414 // Call the VM with last java frame only. 415 #define CALL_VM_NAKED_LJF(func) \ 416 DECACHE_STATE(); \ 417 SET_LAST_JAVA_FRAME(); \ 418 func; \ 419 RESET_LAST_JAVA_FRAME(); \ 420 CACHE_STATE(); 421 422 // Call the VM. Don't check for pending exceptions. 423 #define CALL_VM_NOCHECK(func) \ 424 CALL_VM_NAKED_LJF(func) \ 425 if (THREAD->pop_frame_pending() && \ 426 !THREAD->pop_frame_in_process()) { \ 427 goto handle_Pop_Frame; \ 428 } \ 429 if (THREAD->jvmti_thread_state() && \ 430 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ 431 goto handle_Early_Return; \ 432 } 433 434 // Call the VM and check for pending exceptions 435 #define CALL_VM(func, label) { \ 436 CALL_VM_NOCHECK(func); \ 437 if (THREAD->has_pending_exception()) goto label; \ 438 } 439 440 /* 441 * BytecodeInterpreter::run(interpreterState istate) 442 * BytecodeInterpreter::runWithChecks(interpreterState istate) 443 * 444 * The real deal. This is where byte codes actually get interpreted. 445 * Basically it's a big while loop that iterates until we return from 446 * the method passed in. 447 * 448 * The runWithChecks is used if JVMTI is enabled. 449 * 450 */ 451 #if defined(VM_JVMTI) 452 void 453 BytecodeInterpreter::runWithChecks(interpreterState istate) { 454 #else 455 void 456 BytecodeInterpreter::run(interpreterState istate) { 457 #endif 458 459 // In order to simplify some tests based on switches set at runtime 460 // we invoke the interpreter a single time after switches are enabled 461 // and set simpler to to test variables rather than method calls or complex 462 // boolean expressions. 463 464 static int initialized = 0; 465 static int checkit = 0; 466 static intptr_t* c_addr = NULL; 467 static intptr_t c_value; 468 469 if (checkit && *c_addr != c_value) { 470 os::breakpoint(); 471 } 472 #ifdef VM_JVMTI 473 static bool _jvmti_interp_events = 0; 474 #endif 475 476 static int _compiling; // (UseCompiler || CountCompiledCalls) 477 478 #ifdef ASSERT 479 if (istate->_msg != initialize) { 480 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit"); 481 IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong")); 482 } 483 // Verify linkages. 484 interpreterState l = istate; 485 do { 486 assert(l == l->_self_link, "bad link"); 487 l = l->_prev_link; 488 } while (l != NULL); 489 // Screwups with stack management usually cause us to overwrite istate 490 // save a copy so we can verify it. 491 interpreterState orig = istate; 492 #endif 493 494 register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */ 495 register address pc = istate->bcp(); 496 register jubyte opcode; 497 register intptr_t* locals = istate->locals(); 498 register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache() 499 #ifdef LOTS_OF_REGS 500 register JavaThread* THREAD = istate->thread(); 501 #else 502 #undef THREAD 503 #define THREAD istate->thread() 504 #endif 505 506 #ifdef USELABELS 507 const static void* const opclabels_data[256] = { 508 /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0, 509 /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4, 510 /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0, 511 /* 0x0C */ &&opc_fconst_1,&&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1, 512 513 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w, 514 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload, 515 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0,&&opc_iload_1, 516 /* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1, 517 518 /* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1, 519 /* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1, 520 /* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1, 521 /* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload, 522 523 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload, 524 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore, 525 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0, 526 /* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0, 527 528 /* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0, 529 /* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0, 530 /* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0, 531 /* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore, 532 533 /* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore, 534 /* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop, 535 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2, 536 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap, 537 538 /* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd, 539 /* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub, 540 /* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul, 541 /* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv, 542 543 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem, 544 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg, 545 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr, 546 /* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land, 547 548 /* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor, 549 /* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d, 550 /* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i, 551 /* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l, 552 553 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s, 554 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl, 555 /* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt, 556 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq, 557 558 /* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge, &&opc_if_icmpgt, 559 /* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne, &&opc_goto, 560 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch,&&opc_lookupswitch, 561 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn, 562 563 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic, 564 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial, 565 /* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new, 566 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow, 567 568 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit, 569 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull, 570 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_default, 571 /* 0xCC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 572 573 /* 0xD0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 574 /* 0xD4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 575 /* 0xD8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 576 /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 577 578 /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 579 /* 0xE4 */ &&opc_default, &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, 580 /* 0xE8 */ &&opc_return_register_finalizer, 581 &&opc_invokehandle, &&opc_default, &&opc_default, 582 /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 583 584 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 585 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 586 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, 587 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default 588 }; 589 register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0]; 590 #endif /* USELABELS */ 591 592 #ifdef ASSERT 593 // this will trigger a VERIFY_OOP on entry 594 if (istate->msg() != initialize && ! METHOD->is_static()) { 595 oop rcvr = LOCALS_OBJECT(0); 596 VERIFY_OOP(rcvr); 597 } 598 #endif 599 // #define HACK 600 #ifdef HACK 601 bool interesting = false; 602 #endif // HACK 603 604 /* QQQ this should be a stack method so we don't know actual direction */ 605 guarantee(istate->msg() == initialize || 606 topOfStack >= istate->stack_limit() && 607 topOfStack < istate->stack_base(), 608 "Stack top out of range"); 609 610 #ifdef CC_INTERP_PROFILE 611 // MethodData's last branch taken count. 612 uint mdo_last_branch_taken_count = 0; 613 #else 614 const uint mdo_last_branch_taken_count = 0; 615 #endif 616 617 switch (istate->msg()) { 618 case initialize: { 619 if (initialized++) ShouldNotReachHere(); // Only one initialize call. 620 _compiling = (UseCompiler || CountCompiledCalls); 621 #ifdef VM_JVMTI 622 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events(); 623 #endif 624 return; 625 } 626 break; 627 case method_entry: { 628 THREAD->set_do_not_unlock(); 629 // count invocations 630 assert(initialized, "Interpreter not initialized"); 631 if (_compiling) { 632 MethodCounters* mcs; 633 GET_METHOD_COUNTERS(mcs); 634 #if COMPILER2_OR_JVMCI 635 if (ProfileInterpreter) { 636 METHOD->increment_interpreter_invocation_count(THREAD); 637 } 638 #endif 639 mcs->invocation_counter()->increment(); 640 if (mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter())) { 641 CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception); 642 // We no longer retry on a counter overflow. 643 } 644 // Get or create profile data. Check for pending (async) exceptions. 645 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 646 SAFEPOINT; 647 } 648 649 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 650 // initialize 651 os::breakpoint(); 652 } 653 654 #ifdef HACK 655 { 656 ResourceMark rm; 657 char *method_name = istate->method()->name_and_sig_as_C_string(); 658 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { 659 tty->print_cr("entering: depth %d bci: %d", 660 (istate->_stack_base - istate->_stack), 661 istate->_bcp - istate->_method->code_base()); 662 interesting = true; 663 } 664 } 665 #endif // HACK 666 667 // Lock method if synchronized. 668 if (METHOD->is_synchronized()) { 669 // oop rcvr = locals[0].j.r; 670 oop rcvr; 671 if (METHOD->is_static()) { 672 rcvr = METHOD->constants()->pool_holder()->java_mirror(); 673 } else { 674 rcvr = LOCALS_OBJECT(0); 675 VERIFY_OOP(rcvr); 676 } 677 // The initial monitor is ours for the taking. 678 // Monitor not filled in frame manager any longer as this caused race condition with biased locking. 679 BasicObjectLock* mon = &istate->monitor_base()[-1]; 680 mon->set_obj(rcvr); 681 bool success = false; 682 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 683 markOop mark = rcvr->mark(); 684 intptr_t hash = (intptr_t) markOopDesc::no_hash; 685 // Implies UseBiasedLocking. 686 if (mark->has_bias_pattern()) { 687 uintptr_t thread_ident; 688 uintptr_t anticipated_bias_locking_value; 689 thread_ident = (uintptr_t)istate->thread(); 690 anticipated_bias_locking_value = 691 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 692 ~((uintptr_t) markOopDesc::age_mask_in_place); 693 694 if (anticipated_bias_locking_value == 0) { 695 // Already biased towards this thread, nothing to do. 696 if (PrintBiasedLockingStatistics) { 697 (* BiasedLocking::biased_lock_entry_count_addr())++; 698 } 699 success = true; 700 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 701 // Try to revoke bias. 702 markOop header = rcvr->klass()->prototype_header(); 703 if (hash != markOopDesc::no_hash) { 704 header = header->copy_set_hash(hash); 705 } 706 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) { 707 if (PrintBiasedLockingStatistics) 708 (*BiasedLocking::revoked_lock_entry_count_addr())++; 709 } 710 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) { 711 // Try to rebias. 712 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident); 713 if (hash != markOopDesc::no_hash) { 714 new_header = new_header->copy_set_hash(hash); 715 } 716 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) { 717 if (PrintBiasedLockingStatistics) { 718 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 719 } 720 } else { 721 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 722 } 723 success = true; 724 } else { 725 // Try to bias towards thread in case object is anonymously biased. 726 markOop header = (markOop) ((uintptr_t) mark & 727 ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 728 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); 729 if (hash != markOopDesc::no_hash) { 730 header = header->copy_set_hash(hash); 731 } 732 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 733 // Debugging hint. 734 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 735 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) { 736 if (PrintBiasedLockingStatistics) { 737 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 738 } 739 } else { 740 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 741 } 742 success = true; 743 } 744 } 745 746 // Traditional lightweight locking. 747 if (!success) { 748 markOop displaced = rcvr->mark()->set_unlocked(); 749 mon->lock()->set_displaced_header(displaced); 750 bool call_vm = UseHeavyMonitors; 751 if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) { 752 // Is it simple recursive case? 753 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 754 mon->lock()->set_displaced_header(NULL); 755 } else { 756 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); 757 } 758 } 759 } 760 } 761 THREAD->clr_do_not_unlock(); 762 763 // Notify jvmti 764 #ifdef VM_JVMTI 765 if (_jvmti_interp_events) { 766 // Whenever JVMTI puts a thread in interp_only_mode, method 767 // entry/exit events are sent for that thread to track stack depth. 768 if (THREAD->is_interp_only_mode()) { 769 CALL_VM(InterpreterRuntime::post_method_entry(THREAD), 770 handle_exception); 771 } 772 } 773 #endif /* VM_JVMTI */ 774 775 goto run; 776 } 777 778 case popping_frame: { 779 // returned from a java call to pop the frame, restart the call 780 // clear the message so we don't confuse ourselves later 781 assert(THREAD->pop_frame_in_process(), "wrong frame pop state"); 782 istate->set_msg(no_request); 783 if (_compiling) { 784 // Set MDX back to the ProfileData of the invoke bytecode that will be 785 // restarted. 786 SET_MDX(NULL); 787 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 788 } 789 THREAD->clr_pop_frame_in_process(); 790 goto run; 791 } 792 793 case method_resume: { 794 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { 795 // resume 796 os::breakpoint(); 797 } 798 #ifdef HACK 799 { 800 ResourceMark rm; 801 char *method_name = istate->method()->name_and_sig_as_C_string(); 802 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { 803 tty->print_cr("resume: depth %d bci: %d", 804 (istate->_stack_base - istate->_stack) , 805 istate->_bcp - istate->_method->code_base()); 806 interesting = true; 807 } 808 } 809 #endif // HACK 810 // returned from a java call, continue executing. 811 if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) { 812 goto handle_Pop_Frame; 813 } 814 if (THREAD->jvmti_thread_state() && 815 THREAD->jvmti_thread_state()->is_earlyret_pending()) { 816 goto handle_Early_Return; 817 } 818 819 if (THREAD->has_pending_exception()) goto handle_exception; 820 // Update the pc by the saved amount of the invoke bytecode size 821 UPDATE_PC(istate->bcp_advance()); 822 823 if (_compiling) { 824 // Get or create profile data. Check for pending (async) exceptions. 825 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 826 } 827 goto run; 828 } 829 830 case deopt_resume2: { 831 // Returned from an opcode that will reexecute. Deopt was 832 // a result of a PopFrame request. 833 // 834 835 if (_compiling) { 836 // Get or create profile data. Check for pending (async) exceptions. 837 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 838 } 839 goto run; 840 } 841 842 case deopt_resume: { 843 // Returned from an opcode that has completed. The stack has 844 // the result all we need to do is skip across the bytecode 845 // and continue (assuming there is no exception pending) 846 // 847 // compute continuation length 848 // 849 // Note: it is possible to deopt at a return_register_finalizer opcode 850 // because this requires entering the vm to do the registering. While the 851 // opcode is complete we can't advance because there are no more opcodes 852 // much like trying to deopt at a poll return. In that has we simply 853 // get out of here 854 // 855 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) { 856 // this will do the right thing even if an exception is pending. 857 goto handle_return; 858 } 859 UPDATE_PC(Bytecodes::length_at(METHOD, pc)); 860 if (THREAD->has_pending_exception()) goto handle_exception; 861 862 if (_compiling) { 863 // Get or create profile data. Check for pending (async) exceptions. 864 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); 865 } 866 goto run; 867 } 868 case got_monitors: { 869 // continue locking now that we have a monitor to use 870 // we expect to find newly allocated monitor at the "top" of the monitor stack. 871 oop lockee = STACK_OBJECT(-1); 872 VERIFY_OOP(lockee); 873 // derefing's lockee ought to provoke implicit null check 874 // find a free monitor 875 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base(); 876 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor"); 877 entry->set_obj(lockee); 878 bool success = false; 879 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 880 881 markOop mark = lockee->mark(); 882 intptr_t hash = (intptr_t) markOopDesc::no_hash; 883 // implies UseBiasedLocking 884 if (mark->has_bias_pattern()) { 885 uintptr_t thread_ident; 886 uintptr_t anticipated_bias_locking_value; 887 thread_ident = (uintptr_t)istate->thread(); 888 anticipated_bias_locking_value = 889 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 890 ~((uintptr_t) markOopDesc::age_mask_in_place); 891 892 if (anticipated_bias_locking_value == 0) { 893 // already biased towards this thread, nothing to do 894 if (PrintBiasedLockingStatistics) { 895 (* BiasedLocking::biased_lock_entry_count_addr())++; 896 } 897 success = true; 898 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 899 // try revoke bias 900 markOop header = lockee->klass()->prototype_header(); 901 if (hash != markOopDesc::no_hash) { 902 header = header->copy_set_hash(hash); 903 } 904 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { 905 if (PrintBiasedLockingStatistics) { 906 (*BiasedLocking::revoked_lock_entry_count_addr())++; 907 } 908 } 909 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { 910 // try rebias 911 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); 912 if (hash != markOopDesc::no_hash) { 913 new_header = new_header->copy_set_hash(hash); 914 } 915 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { 916 if (PrintBiasedLockingStatistics) { 917 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 918 } 919 } else { 920 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 921 } 922 success = true; 923 } else { 924 // try to bias towards thread in case object is anonymously biased 925 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 926 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); 927 if (hash != markOopDesc::no_hash) { 928 header = header->copy_set_hash(hash); 929 } 930 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 931 // debugging hint 932 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 933 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { 934 if (PrintBiasedLockingStatistics) { 935 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 936 } 937 } else { 938 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 939 } 940 success = true; 941 } 942 } 943 944 // traditional lightweight locking 945 if (!success) { 946 markOop displaced = lockee->mark()->set_unlocked(); 947 entry->lock()->set_displaced_header(displaced); 948 bool call_vm = UseHeavyMonitors; 949 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { 950 // Is it simple recursive case? 951 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 952 entry->lock()->set_displaced_header(NULL); 953 } else { 954 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 955 } 956 } 957 } 958 UPDATE_PC_AND_TOS(1, -1); 959 goto run; 960 } 961 default: { 962 fatal("Unexpected message from frame manager"); 963 } 964 } 965 966 run: 967 968 DO_UPDATE_INSTRUCTION_COUNT(*pc) 969 DEBUGGER_SINGLE_STEP_NOTIFY(); 970 #ifdef PREFETCH_OPCCODE 971 opcode = *pc; /* prefetch first opcode */ 972 #endif 973 974 #ifndef USELABELS 975 while (1) 976 #endif 977 { 978 #ifndef PREFETCH_OPCCODE 979 opcode = *pc; 980 #endif 981 // Seems like this happens twice per opcode. At worst this is only 982 // need at entry to the loop. 983 // DEBUGGER_SINGLE_STEP_NOTIFY(); 984 /* Using this labels avoids double breakpoints when quickening and 985 * when returing from transition frames. 986 */ 987 opcode_switch: 988 assert(istate == orig, "Corrupted istate"); 989 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */ 990 assert(topOfStack >= istate->stack_limit(), "Stack overrun"); 991 assert(topOfStack < istate->stack_base(), "Stack underrun"); 992 993 #ifdef USELABELS 994 DISPATCH(opcode); 995 #else 996 switch (opcode) 997 #endif 998 { 999 CASE(_nop): 1000 UPDATE_PC_AND_CONTINUE(1); 1001 1002 /* Push miscellaneous constants onto the stack. */ 1003 1004 CASE(_aconst_null): 1005 SET_STACK_OBJECT(NULL, 0); 1006 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1007 1008 #undef OPC_CONST_n 1009 #define OPC_CONST_n(opcode, const_type, value) \ 1010 CASE(opcode): \ 1011 SET_STACK_ ## const_type(value, 0); \ 1012 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1013 1014 OPC_CONST_n(_iconst_m1, INT, -1); 1015 OPC_CONST_n(_iconst_0, INT, 0); 1016 OPC_CONST_n(_iconst_1, INT, 1); 1017 OPC_CONST_n(_iconst_2, INT, 2); 1018 OPC_CONST_n(_iconst_3, INT, 3); 1019 OPC_CONST_n(_iconst_4, INT, 4); 1020 OPC_CONST_n(_iconst_5, INT, 5); 1021 OPC_CONST_n(_fconst_0, FLOAT, 0.0); 1022 OPC_CONST_n(_fconst_1, FLOAT, 1.0); 1023 OPC_CONST_n(_fconst_2, FLOAT, 2.0); 1024 1025 #undef OPC_CONST2_n 1026 #define OPC_CONST2_n(opcname, value, key, kind) \ 1027 CASE(_##opcname): \ 1028 { \ 1029 SET_STACK_ ## kind(VM##key##Const##value(), 1); \ 1030 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1031 } 1032 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE); 1033 OPC_CONST2_n(dconst_1, One, double, DOUBLE); 1034 OPC_CONST2_n(lconst_0, Zero, long, LONG); 1035 OPC_CONST2_n(lconst_1, One, long, LONG); 1036 1037 /* Load constant from constant pool: */ 1038 1039 /* Push a 1-byte signed integer value onto the stack. */ 1040 CASE(_bipush): 1041 SET_STACK_INT((jbyte)(pc[1]), 0); 1042 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1043 1044 /* Push a 2-byte signed integer constant onto the stack. */ 1045 CASE(_sipush): 1046 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0); 1047 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 1048 1049 /* load from local variable */ 1050 1051 CASE(_aload): 1052 VERIFY_OOP(LOCALS_OBJECT(pc[1])); 1053 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0); 1054 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1055 1056 CASE(_iload): 1057 CASE(_fload): 1058 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0); 1059 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); 1060 1061 CASE(_lload): 1062 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1); 1063 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1064 1065 CASE(_dload): 1066 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1); 1067 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); 1068 1069 #undef OPC_LOAD_n 1070 #define OPC_LOAD_n(num) \ 1071 CASE(_aload_##num): \ 1072 VERIFY_OOP(LOCALS_OBJECT(num)); \ 1073 SET_STACK_OBJECT(LOCALS_OBJECT(num), 0); \ 1074 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1075 \ 1076 CASE(_iload_##num): \ 1077 CASE(_fload_##num): \ 1078 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \ 1079 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ 1080 \ 1081 CASE(_lload_##num): \ 1082 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \ 1083 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ 1084 CASE(_dload_##num): \ 1085 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \ 1086 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1087 1088 OPC_LOAD_n(0); 1089 OPC_LOAD_n(1); 1090 OPC_LOAD_n(2); 1091 OPC_LOAD_n(3); 1092 1093 /* store to a local variable */ 1094 1095 CASE(_astore): 1096 astore(topOfStack, -1, locals, pc[1]); 1097 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1098 1099 CASE(_istore): 1100 CASE(_fstore): 1101 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]); 1102 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); 1103 1104 CASE(_lstore): 1105 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]); 1106 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1107 1108 CASE(_dstore): 1109 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]); 1110 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); 1111 1112 CASE(_wide): { 1113 uint16_t reg = Bytes::get_Java_u2(pc + 2); 1114 1115 opcode = pc[1]; 1116 1117 // Wide and it's sub-bytecode are counted as separate instructions. If we 1118 // don't account for this here, the bytecode trace skips the next bytecode. 1119 DO_UPDATE_INSTRUCTION_COUNT(opcode); 1120 1121 switch(opcode) { 1122 case Bytecodes::_aload: 1123 VERIFY_OOP(LOCALS_OBJECT(reg)); 1124 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0); 1125 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1126 1127 case Bytecodes::_iload: 1128 case Bytecodes::_fload: 1129 SET_STACK_SLOT(LOCALS_SLOT(reg), 0); 1130 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); 1131 1132 case Bytecodes::_lload: 1133 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1134 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1135 1136 case Bytecodes::_dload: 1137 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1); 1138 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); 1139 1140 case Bytecodes::_astore: 1141 astore(topOfStack, -1, locals, reg); 1142 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1143 1144 case Bytecodes::_istore: 1145 case Bytecodes::_fstore: 1146 SET_LOCALS_SLOT(STACK_SLOT(-1), reg); 1147 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); 1148 1149 case Bytecodes::_lstore: 1150 SET_LOCALS_LONG(STACK_LONG(-1), reg); 1151 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1152 1153 case Bytecodes::_dstore: 1154 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg); 1155 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); 1156 1157 case Bytecodes::_iinc: { 1158 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4); 1159 // Be nice to see what this generates.... QQQ 1160 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg); 1161 UPDATE_PC_AND_CONTINUE(6); 1162 } 1163 case Bytecodes::_ret: 1164 // Profile ret. 1165 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(reg)))); 1166 // Now, update the pc. 1167 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg)); 1168 UPDATE_PC_AND_CONTINUE(0); 1169 default: 1170 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode", note_no_trap); 1171 } 1172 } 1173 1174 1175 #undef OPC_STORE_n 1176 #define OPC_STORE_n(num) \ 1177 CASE(_astore_##num): \ 1178 astore(topOfStack, -1, locals, num); \ 1179 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1180 CASE(_istore_##num): \ 1181 CASE(_fstore_##num): \ 1182 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \ 1183 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1184 1185 OPC_STORE_n(0); 1186 OPC_STORE_n(1); 1187 OPC_STORE_n(2); 1188 OPC_STORE_n(3); 1189 1190 #undef OPC_DSTORE_n 1191 #define OPC_DSTORE_n(num) \ 1192 CASE(_dstore_##num): \ 1193 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \ 1194 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1195 CASE(_lstore_##num): \ 1196 SET_LOCALS_LONG(STACK_LONG(-1), num); \ 1197 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1198 1199 OPC_DSTORE_n(0); 1200 OPC_DSTORE_n(1); 1201 OPC_DSTORE_n(2); 1202 OPC_DSTORE_n(3); 1203 1204 /* stack pop, dup, and insert opcodes */ 1205 1206 1207 CASE(_pop): /* Discard the top item on the stack */ 1208 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1209 1210 1211 CASE(_pop2): /* Discard the top 2 items on the stack */ 1212 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); 1213 1214 1215 CASE(_dup): /* Duplicate the top item on the stack */ 1216 dup(topOfStack); 1217 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1218 1219 CASE(_dup2): /* Duplicate the top 2 items on the stack */ 1220 dup2(topOfStack); 1221 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1222 1223 CASE(_dup_x1): /* insert top word two down */ 1224 dup_x1(topOfStack); 1225 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1226 1227 CASE(_dup_x2): /* insert top word three down */ 1228 dup_x2(topOfStack); 1229 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1230 1231 CASE(_dup2_x1): /* insert top 2 slots three down */ 1232 dup2_x1(topOfStack); 1233 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1234 1235 CASE(_dup2_x2): /* insert top 2 slots four down */ 1236 dup2_x2(topOfStack); 1237 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1238 1239 CASE(_swap): { /* swap top two elements on the stack */ 1240 swap(topOfStack); 1241 UPDATE_PC_AND_CONTINUE(1); 1242 } 1243 1244 /* Perform various binary integer operations */ 1245 1246 #undef OPC_INT_BINARY 1247 #define OPC_INT_BINARY(opcname, opname, test) \ 1248 CASE(_i##opcname): \ 1249 if (test && (STACK_INT(-1) == 0)) { \ 1250 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1251 "/ by zero", note_div0Check_trap); \ 1252 } \ 1253 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1254 STACK_INT(-1)), \ 1255 -2); \ 1256 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1257 CASE(_l##opcname): \ 1258 { \ 1259 if (test) { \ 1260 jlong l1 = STACK_LONG(-1); \ 1261 if (VMlongEqz(l1)) { \ 1262 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ 1263 "/ by long zero", note_div0Check_trap); \ 1264 } \ 1265 } \ 1266 /* First long at (-1,-2) next long at (-3,-4) */ \ 1267 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \ 1268 STACK_LONG(-1)), \ 1269 -3); \ 1270 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1271 } 1272 1273 OPC_INT_BINARY(add, Add, 0); 1274 OPC_INT_BINARY(sub, Sub, 0); 1275 OPC_INT_BINARY(mul, Mul, 0); 1276 OPC_INT_BINARY(and, And, 0); 1277 OPC_INT_BINARY(or, Or, 0); 1278 OPC_INT_BINARY(xor, Xor, 0); 1279 OPC_INT_BINARY(div, Div, 1); 1280 OPC_INT_BINARY(rem, Rem, 1); 1281 1282 1283 /* Perform various binary floating number operations */ 1284 /* On some machine/platforms/compilers div zero check can be implicit */ 1285 1286 #undef OPC_FLOAT_BINARY 1287 #define OPC_FLOAT_BINARY(opcname, opname) \ 1288 CASE(_d##opcname): { \ 1289 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \ 1290 STACK_DOUBLE(-1)), \ 1291 -3); \ 1292 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ 1293 } \ 1294 CASE(_f##opcname): \ 1295 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \ 1296 STACK_FLOAT(-1)), \ 1297 -2); \ 1298 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1299 1300 1301 OPC_FLOAT_BINARY(add, Add); 1302 OPC_FLOAT_BINARY(sub, Sub); 1303 OPC_FLOAT_BINARY(mul, Mul); 1304 OPC_FLOAT_BINARY(div, Div); 1305 OPC_FLOAT_BINARY(rem, Rem); 1306 1307 /* Shift operations 1308 * Shift left int and long: ishl, lshl 1309 * Logical shift right int and long w/zero extension: iushr, lushr 1310 * Arithmetic shift right int and long w/sign extension: ishr, lshr 1311 */ 1312 1313 #undef OPC_SHIFT_BINARY 1314 #define OPC_SHIFT_BINARY(opcname, opname) \ 1315 CASE(_i##opcname): \ 1316 SET_STACK_INT(VMint##opname(STACK_INT(-2), \ 1317 STACK_INT(-1)), \ 1318 -2); \ 1319 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1320 CASE(_l##opcname): \ 1321 { \ 1322 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \ 1323 STACK_INT(-1)), \ 1324 -2); \ 1325 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1326 } 1327 1328 OPC_SHIFT_BINARY(shl, Shl); 1329 OPC_SHIFT_BINARY(shr, Shr); 1330 OPC_SHIFT_BINARY(ushr, Ushr); 1331 1332 /* Increment local variable by constant */ 1333 CASE(_iinc): 1334 { 1335 // locals[pc[1]].j.i += (jbyte)(pc[2]); 1336 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]); 1337 UPDATE_PC_AND_CONTINUE(3); 1338 } 1339 1340 /* negate the value on the top of the stack */ 1341 1342 CASE(_ineg): 1343 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1); 1344 UPDATE_PC_AND_CONTINUE(1); 1345 1346 CASE(_fneg): 1347 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1); 1348 UPDATE_PC_AND_CONTINUE(1); 1349 1350 CASE(_lneg): 1351 { 1352 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1); 1353 UPDATE_PC_AND_CONTINUE(1); 1354 } 1355 1356 CASE(_dneg): 1357 { 1358 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1); 1359 UPDATE_PC_AND_CONTINUE(1); 1360 } 1361 1362 /* Conversion operations */ 1363 1364 CASE(_i2f): /* convert top of stack int to float */ 1365 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1); 1366 UPDATE_PC_AND_CONTINUE(1); 1367 1368 CASE(_i2l): /* convert top of stack int to long */ 1369 { 1370 // this is ugly QQQ 1371 jlong r = VMint2Long(STACK_INT(-1)); 1372 MORE_STACK(-1); // Pop 1373 SET_STACK_LONG(r, 1); 1374 1375 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1376 } 1377 1378 CASE(_i2d): /* convert top of stack int to double */ 1379 { 1380 // this is ugly QQQ (why cast to jlong?? ) 1381 jdouble r = (jlong)STACK_INT(-1); 1382 MORE_STACK(-1); // Pop 1383 SET_STACK_DOUBLE(r, 1); 1384 1385 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1386 } 1387 1388 CASE(_l2i): /* convert top of stack long to int */ 1389 { 1390 jint r = VMlong2Int(STACK_LONG(-1)); 1391 MORE_STACK(-2); // Pop 1392 SET_STACK_INT(r, 0); 1393 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1394 } 1395 1396 CASE(_l2f): /* convert top of stack long to float */ 1397 { 1398 jlong r = STACK_LONG(-1); 1399 MORE_STACK(-2); // Pop 1400 SET_STACK_FLOAT(VMlong2Float(r), 0); 1401 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1402 } 1403 1404 CASE(_l2d): /* convert top of stack long to double */ 1405 { 1406 jlong r = STACK_LONG(-1); 1407 MORE_STACK(-2); // Pop 1408 SET_STACK_DOUBLE(VMlong2Double(r), 1); 1409 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1410 } 1411 1412 CASE(_f2i): /* Convert top of stack float to int */ 1413 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1); 1414 UPDATE_PC_AND_CONTINUE(1); 1415 1416 CASE(_f2l): /* convert top of stack float to long */ 1417 { 1418 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1)); 1419 MORE_STACK(-1); // POP 1420 SET_STACK_LONG(r, 1); 1421 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1422 } 1423 1424 CASE(_f2d): /* convert top of stack float to double */ 1425 { 1426 jfloat f; 1427 jdouble r; 1428 f = STACK_FLOAT(-1); 1429 r = (jdouble) f; 1430 MORE_STACK(-1); // POP 1431 SET_STACK_DOUBLE(r, 1); 1432 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1433 } 1434 1435 CASE(_d2i): /* convert top of stack double to int */ 1436 { 1437 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1)); 1438 MORE_STACK(-2); 1439 SET_STACK_INT(r1, 0); 1440 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1441 } 1442 1443 CASE(_d2f): /* convert top of stack double to float */ 1444 { 1445 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1)); 1446 MORE_STACK(-2); 1447 SET_STACK_FLOAT(r1, 0); 1448 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1449 } 1450 1451 CASE(_d2l): /* convert top of stack double to long */ 1452 { 1453 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1)); 1454 MORE_STACK(-2); 1455 SET_STACK_LONG(r1, 1); 1456 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); 1457 } 1458 1459 CASE(_i2b): 1460 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1); 1461 UPDATE_PC_AND_CONTINUE(1); 1462 1463 CASE(_i2c): 1464 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1); 1465 UPDATE_PC_AND_CONTINUE(1); 1466 1467 CASE(_i2s): 1468 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1); 1469 UPDATE_PC_AND_CONTINUE(1); 1470 1471 /* comparison operators */ 1472 1473 1474 #define COMPARISON_OP(name, comparison) \ 1475 CASE(_if_icmp##name): { \ 1476 const bool cmp = (STACK_INT(-2) comparison STACK_INT(-1)); \ 1477 int skip = cmp \ 1478 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1479 address branch_pc = pc; \ 1480 /* Profile branch. */ \ 1481 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1482 UPDATE_PC_AND_TOS(skip, -2); \ 1483 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1484 CONTINUE; \ 1485 } \ 1486 CASE(_if##name): { \ 1487 const bool cmp = (STACK_INT(-1) comparison 0); \ 1488 int skip = cmp \ 1489 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1490 address branch_pc = pc; \ 1491 /* Profile branch. */ \ 1492 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1493 UPDATE_PC_AND_TOS(skip, -1); \ 1494 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1495 CONTINUE; \ 1496 } 1497 1498 #define COMPARISON_OP2(name, comparison) \ 1499 COMPARISON_OP(name, comparison) \ 1500 CASE(_if_acmp##name): { \ 1501 const bool cmp = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)); \ 1502 int skip = cmp \ 1503 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1504 address branch_pc = pc; \ 1505 /* Profile branch. */ \ 1506 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1507 UPDATE_PC_AND_TOS(skip, -2); \ 1508 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1509 CONTINUE; \ 1510 } 1511 1512 #define NULL_COMPARISON_NOT_OP(name) \ 1513 CASE(_if##name): { \ 1514 const bool cmp = (!(STACK_OBJECT(-1) == NULL)); \ 1515 int skip = cmp \ 1516 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1517 address branch_pc = pc; \ 1518 /* Profile branch. */ \ 1519 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1520 UPDATE_PC_AND_TOS(skip, -1); \ 1521 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1522 CONTINUE; \ 1523 } 1524 1525 #define NULL_COMPARISON_OP(name) \ 1526 CASE(_if##name): { \ 1527 const bool cmp = ((STACK_OBJECT(-1) == NULL)); \ 1528 int skip = cmp \ 1529 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ 1530 address branch_pc = pc; \ 1531 /* Profile branch. */ \ 1532 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \ 1533 UPDATE_PC_AND_TOS(skip, -1); \ 1534 DO_BACKEDGE_CHECKS(skip, branch_pc); \ 1535 CONTINUE; \ 1536 } 1537 COMPARISON_OP(lt, <); 1538 COMPARISON_OP(gt, >); 1539 COMPARISON_OP(le, <=); 1540 COMPARISON_OP(ge, >=); 1541 COMPARISON_OP2(eq, ==); /* include ref comparison */ 1542 COMPARISON_OP2(ne, !=); /* include ref comparison */ 1543 NULL_COMPARISON_OP(null); 1544 NULL_COMPARISON_NOT_OP(nonnull); 1545 1546 /* Goto pc at specified offset in switch table. */ 1547 1548 CASE(_tableswitch): { 1549 jint* lpc = (jint*)VMalignWordUp(pc+1); 1550 int32_t key = STACK_INT(-1); 1551 int32_t low = Bytes::get_Java_u4((address)&lpc[1]); 1552 int32_t high = Bytes::get_Java_u4((address)&lpc[2]); 1553 int32_t skip; 1554 key -= low; 1555 if (((uint32_t) key > (uint32_t)(high - low))) { 1556 key = -1; 1557 skip = Bytes::get_Java_u4((address)&lpc[0]); 1558 } else { 1559 skip = Bytes::get_Java_u4((address)&lpc[key + 3]); 1560 } 1561 // Profile switch. 1562 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/key); 1563 // Does this really need a full backedge check (osr)? 1564 address branch_pc = pc; 1565 UPDATE_PC_AND_TOS(skip, -1); 1566 DO_BACKEDGE_CHECKS(skip, branch_pc); 1567 CONTINUE; 1568 } 1569 1570 /* Goto pc whose table entry matches specified key. */ 1571 1572 CASE(_lookupswitch): { 1573 jint* lpc = (jint*)VMalignWordUp(pc+1); 1574 int32_t key = STACK_INT(-1); 1575 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */ 1576 // Remember index. 1577 int index = -1; 1578 int newindex = 0; 1579 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]); 1580 while (--npairs >= 0) { 1581 lpc += 2; 1582 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) { 1583 skip = Bytes::get_Java_u4((address)&lpc[1]); 1584 index = newindex; 1585 break; 1586 } 1587 newindex += 1; 1588 } 1589 // Profile switch. 1590 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/index); 1591 address branch_pc = pc; 1592 UPDATE_PC_AND_TOS(skip, -1); 1593 DO_BACKEDGE_CHECKS(skip, branch_pc); 1594 CONTINUE; 1595 } 1596 1597 CASE(_fcmpl): 1598 CASE(_fcmpg): 1599 { 1600 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2), 1601 STACK_FLOAT(-1), 1602 (opcode == Bytecodes::_fcmpl ? -1 : 1)), 1603 -2); 1604 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1605 } 1606 1607 CASE(_dcmpl): 1608 CASE(_dcmpg): 1609 { 1610 int r = VMdoubleCompare(STACK_DOUBLE(-3), 1611 STACK_DOUBLE(-1), 1612 (opcode == Bytecodes::_dcmpl ? -1 : 1)); 1613 MORE_STACK(-4); // Pop 1614 SET_STACK_INT(r, 0); 1615 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1616 } 1617 1618 CASE(_lcmp): 1619 { 1620 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1)); 1621 MORE_STACK(-4); 1622 SET_STACK_INT(r, 0); 1623 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); 1624 } 1625 1626 1627 /* Return from a method */ 1628 1629 CASE(_areturn): 1630 CASE(_ireturn): 1631 CASE(_freturn): 1632 { 1633 // Allow a safepoint before returning to frame manager. 1634 SAFEPOINT; 1635 1636 goto handle_return; 1637 } 1638 1639 CASE(_lreturn): 1640 CASE(_dreturn): 1641 { 1642 // Allow a safepoint before returning to frame manager. 1643 SAFEPOINT; 1644 goto handle_return; 1645 } 1646 1647 CASE(_return_register_finalizer): { 1648 1649 oop rcvr = LOCALS_OBJECT(0); 1650 VERIFY_OOP(rcvr); 1651 if (rcvr->klass()->has_finalizer()) { 1652 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception); 1653 } 1654 goto handle_return; 1655 } 1656 CASE(_return): { 1657 1658 // Allow a safepoint before returning to frame manager. 1659 SAFEPOINT; 1660 goto handle_return; 1661 } 1662 1663 /* Array access byte-codes */ 1664 1665 /* Every array access byte-code starts out like this */ 1666 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff); 1667 #define ARRAY_INTRO(arrayOff) \ 1668 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \ 1669 jint index = STACK_INT(arrayOff + 1); \ 1670 char message[jintAsStringSize]; \ 1671 CHECK_NULL(arrObj); \ 1672 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \ 1673 sprintf(message, "%d", index); \ 1674 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \ 1675 message, note_rangeCheck_trap); \ 1676 } 1677 1678 /* 32-bit loads. These handle conversion from < 32-bit types */ 1679 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \ 1680 { \ 1681 ARRAY_INTRO(-2); \ 1682 (void)extra; \ 1683 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \ 1684 -2); \ 1685 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 1686 } 1687 1688 /* 64-bit loads */ 1689 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \ 1690 { \ 1691 ARRAY_INTRO(-2); \ 1692 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \ 1693 (void)extra; \ 1694 UPDATE_PC_AND_CONTINUE(1); \ 1695 } 1696 1697 CASE(_iaload): 1698 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0); 1699 CASE(_faload): 1700 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1701 CASE(_aaload): { 1702 ARRAY_INTRO(-2); 1703 SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2); 1704 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1705 } 1706 CASE(_baload): 1707 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0); 1708 CASE(_caload): 1709 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0); 1710 CASE(_saload): 1711 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0); 1712 CASE(_laload): 1713 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0); 1714 CASE(_daload): 1715 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1716 1717 /* 32-bit stores. These handle conversion to < 32-bit types */ 1718 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \ 1719 { \ 1720 ARRAY_INTRO(-3); \ 1721 (void)extra; \ 1722 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1723 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \ 1724 } 1725 1726 /* 64-bit stores */ 1727 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \ 1728 { \ 1729 ARRAY_INTRO(-4); \ 1730 (void)extra; \ 1731 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 1732 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \ 1733 } 1734 1735 CASE(_iastore): 1736 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0); 1737 CASE(_fastore): 1738 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); 1739 /* 1740 * This one looks different because of the assignability check 1741 */ 1742 CASE(_aastore): { 1743 oop rhsObject = STACK_OBJECT(-1); 1744 VERIFY_OOP(rhsObject); 1745 ARRAY_INTRO( -3); 1746 // arrObj, index are set 1747 if (rhsObject != NULL) { 1748 /* Check assignability of rhsObject into arrObj */ 1749 Klass* rhsKlass = rhsObject->klass(); // EBX (subclass) 1750 Klass* elemKlass = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX 1751 // 1752 // Check for compatibilty. This check must not GC!! 1753 // Seems way more expensive now that we must dispatch 1754 // 1755 if (rhsKlass != elemKlass && !rhsKlass->is_subtype_of(elemKlass)) { // ebx->is... 1756 // Decrement counter if subtype check failed. 1757 BI_PROFILE_SUBTYPECHECK_FAILED(rhsKlass); 1758 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "", note_arrayCheck_trap); 1759 } 1760 // Profile checkcast with null_seen and receiver. 1761 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, rhsKlass); 1762 } else { 1763 // Profile checkcast with null_seen and receiver. 1764 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL); 1765 } 1766 ((objArrayOop) arrObj)->obj_at_put(index, rhsObject); 1767 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); 1768 } 1769 CASE(_bastore): { 1770 ARRAY_INTRO(-3); 1771 int item = STACK_INT(-1); 1772 // if it is a T_BOOLEAN array, mask the stored value to 0/1 1773 if (arrObj->klass() == Universe::boolArrayKlassObj()) { 1774 item &= 1; 1775 } else { 1776 assert(arrObj->klass() == Universe::byteArrayKlassObj(), 1777 "should be byte array otherwise"); 1778 } 1779 ((typeArrayOop)arrObj)->byte_at_put(index, item); 1780 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); 1781 } 1782 CASE(_castore): 1783 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0); 1784 CASE(_sastore): 1785 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0); 1786 CASE(_lastore): 1787 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0); 1788 CASE(_dastore): 1789 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); 1790 1791 CASE(_arraylength): 1792 { 1793 arrayOop ary = (arrayOop) STACK_OBJECT(-1); 1794 CHECK_NULL(ary); 1795 SET_STACK_INT(ary->length(), -1); 1796 UPDATE_PC_AND_CONTINUE(1); 1797 } 1798 1799 /* monitorenter and monitorexit for locking/unlocking an object */ 1800 1801 CASE(_monitorenter): { 1802 oop lockee = STACK_OBJECT(-1); 1803 // derefing's lockee ought to provoke implicit null check 1804 CHECK_NULL(lockee); 1805 // find a free monitor or one already allocated for this object 1806 // if we find a matching object then we need a new monitor 1807 // since this is recursive enter 1808 BasicObjectLock* limit = istate->monitor_base(); 1809 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1810 BasicObjectLock* entry = NULL; 1811 while (most_recent != limit ) { 1812 if (most_recent->obj() == NULL) entry = most_recent; 1813 else if (most_recent->obj() == lockee) break; 1814 most_recent++; 1815 } 1816 if (entry != NULL) { 1817 entry->set_obj(lockee); 1818 int success = false; 1819 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; 1820 1821 markOop mark = lockee->mark(); 1822 intptr_t hash = (intptr_t) markOopDesc::no_hash; 1823 // implies UseBiasedLocking 1824 if (mark->has_bias_pattern()) { 1825 uintptr_t thread_ident; 1826 uintptr_t anticipated_bias_locking_value; 1827 thread_ident = (uintptr_t)istate->thread(); 1828 anticipated_bias_locking_value = 1829 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & 1830 ~((uintptr_t) markOopDesc::age_mask_in_place); 1831 1832 if (anticipated_bias_locking_value == 0) { 1833 // already biased towards this thread, nothing to do 1834 if (PrintBiasedLockingStatistics) { 1835 (* BiasedLocking::biased_lock_entry_count_addr())++; 1836 } 1837 success = true; 1838 } 1839 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { 1840 // try revoke bias 1841 markOop header = lockee->klass()->prototype_header(); 1842 if (hash != markOopDesc::no_hash) { 1843 header = header->copy_set_hash(hash); 1844 } 1845 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { 1846 if (PrintBiasedLockingStatistics) 1847 (*BiasedLocking::revoked_lock_entry_count_addr())++; 1848 } 1849 } 1850 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { 1851 // try rebias 1852 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); 1853 if (hash != markOopDesc::no_hash) { 1854 new_header = new_header->copy_set_hash(hash); 1855 } 1856 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { 1857 if (PrintBiasedLockingStatistics) 1858 (* BiasedLocking::rebiased_lock_entry_count_addr())++; 1859 } 1860 else { 1861 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1862 } 1863 success = true; 1864 } 1865 else { 1866 // try to bias towards thread in case object is anonymously biased 1867 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | 1868 (uintptr_t)markOopDesc::age_mask_in_place | 1869 epoch_mask_in_place)); 1870 if (hash != markOopDesc::no_hash) { 1871 header = header->copy_set_hash(hash); 1872 } 1873 markOop new_header = (markOop) ((uintptr_t) header | thread_ident); 1874 // debugging hint 1875 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) 1876 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { 1877 if (PrintBiasedLockingStatistics) 1878 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; 1879 } 1880 else { 1881 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1882 } 1883 success = true; 1884 } 1885 } 1886 1887 // traditional lightweight locking 1888 if (!success) { 1889 markOop displaced = lockee->mark()->set_unlocked(); 1890 entry->lock()->set_displaced_header(displaced); 1891 bool call_vm = UseHeavyMonitors; 1892 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { 1893 // Is it simple recursive case? 1894 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { 1895 entry->lock()->set_displaced_header(NULL); 1896 } else { 1897 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); 1898 } 1899 } 1900 } 1901 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1902 } else { 1903 istate->set_msg(more_monitors); 1904 UPDATE_PC_AND_RETURN(0); // Re-execute 1905 } 1906 } 1907 1908 CASE(_monitorexit): { 1909 oop lockee = STACK_OBJECT(-1); 1910 CHECK_NULL(lockee); 1911 // derefing's lockee ought to provoke implicit null check 1912 // find our monitor slot 1913 BasicObjectLock* limit = istate->monitor_base(); 1914 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); 1915 while (most_recent != limit ) { 1916 if ((most_recent)->obj() == lockee) { 1917 BasicLock* lock = most_recent->lock(); 1918 markOop header = lock->displaced_header(); 1919 most_recent->set_obj(NULL); 1920 if (!lockee->mark()->has_bias_pattern()) { 1921 bool call_vm = UseHeavyMonitors; 1922 // If it isn't recursive we either must swap old header or call the runtime 1923 if (header != NULL || call_vm) { 1924 if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { 1925 // restore object for the slow case 1926 most_recent->set_obj(lockee); 1927 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception); 1928 } 1929 } 1930 } 1931 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); 1932 } 1933 most_recent++; 1934 } 1935 // Need to throw illegal monitor state exception 1936 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception); 1937 ShouldNotReachHere(); 1938 } 1939 1940 /* All of the non-quick opcodes. */ 1941 1942 /* -Set clobbersCpIndex true if the quickened opcode clobbers the 1943 * constant pool index in the instruction. 1944 */ 1945 CASE(_getfield): 1946 CASE(_getstatic): 1947 { 1948 u2 index; 1949 ConstantPoolCacheEntry* cache; 1950 index = Bytes::get_native_u2(pc+1); 1951 1952 // QQQ Need to make this as inlined as possible. Probably need to 1953 // split all the bytecode cases out so c++ compiler has a chance 1954 // for constant prop to fold everything possible away. 1955 1956 cache = cp->entry_at(index); 1957 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 1958 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 1959 handle_exception); 1960 cache = cp->entry_at(index); 1961 } 1962 1963 #ifdef VM_JVMTI 1964 if (_jvmti_interp_events) { 1965 int *count_addr; 1966 oop obj; 1967 // Check to see if a field modification watch has been set 1968 // before we take the time to call into the VM. 1969 count_addr = (int *)JvmtiExport::get_field_access_count_addr(); 1970 if ( *count_addr > 0 ) { 1971 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { 1972 obj = (oop)NULL; 1973 } else { 1974 obj = (oop) STACK_OBJECT(-1); 1975 VERIFY_OOP(obj); 1976 } 1977 CALL_VM(InterpreterRuntime::post_field_access(THREAD, 1978 obj, 1979 cache), 1980 handle_exception); 1981 } 1982 } 1983 #endif /* VM_JVMTI */ 1984 1985 oop obj; 1986 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { 1987 Klass* k = cache->f1_as_klass(); 1988 obj = k->java_mirror(); 1989 MORE_STACK(1); // Assume single slot push 1990 } else { 1991 obj = (oop) STACK_OBJECT(-1); 1992 CHECK_NULL(obj); 1993 } 1994 1995 // 1996 // Now store the result on the stack 1997 // 1998 TosState tos_type = cache->flag_state(); 1999 int field_offset = cache->f2_as_index(); 2000 if (cache->is_volatile()) { 2001 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2002 OrderAccess::fence(); 2003 } 2004 if (tos_type == atos) { 2005 VERIFY_OOP(obj->obj_field_acquire(field_offset)); 2006 SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1); 2007 } else if (tos_type == itos) { 2008 SET_STACK_INT(obj->int_field_acquire(field_offset), -1); 2009 } else if (tos_type == ltos) { 2010 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0); 2011 MORE_STACK(1); 2012 } else if (tos_type == btos || tos_type == ztos) { 2013 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1); 2014 } else if (tos_type == ctos) { 2015 SET_STACK_INT(obj->char_field_acquire(field_offset), -1); 2016 } else if (tos_type == stos) { 2017 SET_STACK_INT(obj->short_field_acquire(field_offset), -1); 2018 } else if (tos_type == ftos) { 2019 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1); 2020 } else { 2021 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0); 2022 MORE_STACK(1); 2023 } 2024 } else { 2025 if (tos_type == atos) { 2026 VERIFY_OOP(obj->obj_field(field_offset)); 2027 SET_STACK_OBJECT(obj->obj_field(field_offset), -1); 2028 } else if (tos_type == itos) { 2029 SET_STACK_INT(obj->int_field(field_offset), -1); 2030 } else if (tos_type == ltos) { 2031 SET_STACK_LONG(obj->long_field(field_offset), 0); 2032 MORE_STACK(1); 2033 } else if (tos_type == btos || tos_type == ztos) { 2034 SET_STACK_INT(obj->byte_field(field_offset), -1); 2035 } else if (tos_type == ctos) { 2036 SET_STACK_INT(obj->char_field(field_offset), -1); 2037 } else if (tos_type == stos) { 2038 SET_STACK_INT(obj->short_field(field_offset), -1); 2039 } else if (tos_type == ftos) { 2040 SET_STACK_FLOAT(obj->float_field(field_offset), -1); 2041 } else { 2042 SET_STACK_DOUBLE(obj->double_field(field_offset), 0); 2043 MORE_STACK(1); 2044 } 2045 } 2046 2047 UPDATE_PC_AND_CONTINUE(3); 2048 } 2049 2050 CASE(_putfield): 2051 CASE(_putstatic): 2052 { 2053 u2 index = Bytes::get_native_u2(pc+1); 2054 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2055 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2056 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2057 handle_exception); 2058 cache = cp->entry_at(index); 2059 } 2060 2061 #ifdef VM_JVMTI 2062 if (_jvmti_interp_events) { 2063 int *count_addr; 2064 oop obj; 2065 // Check to see if a field modification watch has been set 2066 // before we take the time to call into the VM. 2067 count_addr = (int *)JvmtiExport::get_field_modification_count_addr(); 2068 if ( *count_addr > 0 ) { 2069 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 2070 obj = (oop)NULL; 2071 } 2072 else { 2073 if (cache->is_long() || cache->is_double()) { 2074 obj = (oop) STACK_OBJECT(-3); 2075 } else { 2076 obj = (oop) STACK_OBJECT(-2); 2077 } 2078 VERIFY_OOP(obj); 2079 } 2080 2081 CALL_VM(InterpreterRuntime::post_field_modification(THREAD, 2082 obj, 2083 cache, 2084 (jvalue *)STACK_SLOT(-1)), 2085 handle_exception); 2086 } 2087 } 2088 #endif /* VM_JVMTI */ 2089 2090 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2091 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2092 2093 oop obj; 2094 int count; 2095 TosState tos_type = cache->flag_state(); 2096 2097 count = -1; 2098 if (tos_type == ltos || tos_type == dtos) { 2099 --count; 2100 } 2101 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { 2102 Klass* k = cache->f1_as_klass(); 2103 obj = k->java_mirror(); 2104 } else { 2105 --count; 2106 obj = (oop) STACK_OBJECT(count); 2107 CHECK_NULL(obj); 2108 } 2109 2110 // 2111 // Now store the result 2112 // 2113 int field_offset = cache->f2_as_index(); 2114 if (cache->is_volatile()) { 2115 if (tos_type == itos) { 2116 obj->release_int_field_put(field_offset, STACK_INT(-1)); 2117 } else if (tos_type == atos) { 2118 VERIFY_OOP(STACK_OBJECT(-1)); 2119 obj->release_obj_field_put(field_offset, STACK_OBJECT(-1)); 2120 } else if (tos_type == btos) { 2121 obj->release_byte_field_put(field_offset, STACK_INT(-1)); 2122 } else if (tos_type == ztos) { 2123 int bool_field = STACK_INT(-1); // only store LSB 2124 obj->release_byte_field_put(field_offset, (bool_field & 1)); 2125 } else if (tos_type == ltos) { 2126 obj->release_long_field_put(field_offset, STACK_LONG(-1)); 2127 } else if (tos_type == ctos) { 2128 obj->release_char_field_put(field_offset, STACK_INT(-1)); 2129 } else if (tos_type == stos) { 2130 obj->release_short_field_put(field_offset, STACK_INT(-1)); 2131 } else if (tos_type == ftos) { 2132 obj->release_float_field_put(field_offset, STACK_FLOAT(-1)); 2133 } else { 2134 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1)); 2135 } 2136 OrderAccess::storeload(); 2137 } else { 2138 if (tos_type == itos) { 2139 obj->int_field_put(field_offset, STACK_INT(-1)); 2140 } else if (tos_type == atos) { 2141 VERIFY_OOP(STACK_OBJECT(-1)); 2142 obj->obj_field_put(field_offset, STACK_OBJECT(-1)); 2143 } else if (tos_type == btos) { 2144 obj->byte_field_put(field_offset, STACK_INT(-1)); 2145 } else if (tos_type == ztos) { 2146 int bool_field = STACK_INT(-1); // only store LSB 2147 obj->byte_field_put(field_offset, (bool_field & 1)); 2148 } else if (tos_type == ltos) { 2149 obj->long_field_put(field_offset, STACK_LONG(-1)); 2150 } else if (tos_type == ctos) { 2151 obj->char_field_put(field_offset, STACK_INT(-1)); 2152 } else if (tos_type == stos) { 2153 obj->short_field_put(field_offset, STACK_INT(-1)); 2154 } else if (tos_type == ftos) { 2155 obj->float_field_put(field_offset, STACK_FLOAT(-1)); 2156 } else { 2157 obj->double_field_put(field_offset, STACK_DOUBLE(-1)); 2158 } 2159 } 2160 2161 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count); 2162 } 2163 2164 CASE(_new): { 2165 u2 index = Bytes::get_Java_u2(pc+1); 2166 ConstantPool* constants = istate->method()->constants(); 2167 if (!constants->tag_at(index).is_unresolved_klass()) { 2168 // Make sure klass is initialized and doesn't have a finalizer 2169 Klass* entry = constants->resolved_klass_at(index); 2170 InstanceKlass* ik = InstanceKlass::cast(entry); 2171 if (ik->is_initialized() && ik->can_be_fastpath_allocated() ) { 2172 size_t obj_size = ik->size_helper(); 2173 oop result = NULL; 2174 // If the TLAB isn't pre-zeroed then we'll have to do it 2175 bool need_zero = !ZeroTLAB; 2176 if (UseTLAB) { 2177 result = (oop) THREAD->tlab().allocate(obj_size); 2178 } 2179 // Disable non-TLAB-based fast-path, because profiling requires that all 2180 // allocations go through InterpreterRuntime::_new() if THREAD->tlab().allocate 2181 // returns NULL. 2182 #ifndef CC_INTERP_PROFILE 2183 if (result == NULL) { 2184 need_zero = true; 2185 // Try allocate in shared eden 2186 retry: 2187 HeapWord* compare_to = *Universe::heap()->top_addr(); 2188 HeapWord* new_top = compare_to + obj_size; 2189 if (new_top <= *Universe::heap()->end_addr()) { 2190 if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) { 2191 goto retry; 2192 } 2193 result = (oop) compare_to; 2194 } 2195 } 2196 #endif 2197 if (result != NULL) { 2198 // Initialize object (if nonzero size and need) and then the header 2199 if (need_zero ) { 2200 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize; 2201 obj_size -= sizeof(oopDesc) / oopSize; 2202 if (obj_size > 0 ) { 2203 memset(to_zero, 0, obj_size * HeapWordSize); 2204 } 2205 } 2206 if (UseBiasedLocking) { 2207 result->set_mark(ik->prototype_header()); 2208 } else { 2209 result->set_mark(markOopDesc::prototype()); 2210 } 2211 result->set_klass_gap(0); 2212 result->set_klass(ik); 2213 // Must prevent reordering of stores for object initialization 2214 // with stores that publish the new object. 2215 OrderAccess::storestore(); 2216 SET_STACK_OBJECT(result, 0); 2217 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2218 } 2219 } 2220 } 2221 // Slow case allocation 2222 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index), 2223 handle_exception); 2224 // Must prevent reordering of stores for object initialization 2225 // with stores that publish the new object. 2226 OrderAccess::storestore(); 2227 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2228 THREAD->set_vm_result(NULL); 2229 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); 2230 } 2231 CASE(_anewarray): { 2232 u2 index = Bytes::get_Java_u2(pc+1); 2233 jint size = STACK_INT(-1); 2234 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size), 2235 handle_exception); 2236 // Must prevent reordering of stores for object initialization 2237 // with stores that publish the new object. 2238 OrderAccess::storestore(); 2239 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2240 THREAD->set_vm_result(NULL); 2241 UPDATE_PC_AND_CONTINUE(3); 2242 } 2243 CASE(_multianewarray): { 2244 jint dims = *(pc+3); 2245 jint size = STACK_INT(-1); 2246 // stack grows down, dimensions are up! 2247 jint *dimarray = 2248 (jint*)&topOfStack[dims * Interpreter::stackElementWords+ 2249 Interpreter::stackElementWords-1]; 2250 //adjust pointer to start of stack element 2251 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray), 2252 handle_exception); 2253 // Must prevent reordering of stores for object initialization 2254 // with stores that publish the new object. 2255 OrderAccess::storestore(); 2256 SET_STACK_OBJECT(THREAD->vm_result(), -dims); 2257 THREAD->set_vm_result(NULL); 2258 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1)); 2259 } 2260 CASE(_checkcast): 2261 if (STACK_OBJECT(-1) != NULL) { 2262 VERIFY_OOP(STACK_OBJECT(-1)); 2263 u2 index = Bytes::get_Java_u2(pc+1); 2264 // Constant pool may have actual klass or unresolved klass. If it is 2265 // unresolved we must resolve it. 2266 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2267 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2268 } 2269 Klass* klassOf = (Klass*) METHOD->constants()->resolved_klass_at(index); 2270 Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx 2271 // 2272 // Check for compatibilty. This check must not GC!! 2273 // Seems way more expensive now that we must dispatch. 2274 // 2275 if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) { 2276 // Decrement counter at checkcast. 2277 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass); 2278 ResourceMark rm(THREAD); 2279 char* message = SharedRuntime::generate_class_cast_message( 2280 objKlass, klassOf); 2281 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message, note_classCheck_trap); 2282 } 2283 // Profile checkcast with null_seen and receiver. 2284 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, objKlass); 2285 } else { 2286 // Profile checkcast with null_seen and receiver. 2287 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL); 2288 } 2289 UPDATE_PC_AND_CONTINUE(3); 2290 2291 CASE(_instanceof): 2292 if (STACK_OBJECT(-1) == NULL) { 2293 SET_STACK_INT(0, -1); 2294 // Profile instanceof with null_seen and receiver. 2295 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/true, NULL); 2296 } else { 2297 VERIFY_OOP(STACK_OBJECT(-1)); 2298 u2 index = Bytes::get_Java_u2(pc+1); 2299 // Constant pool may have actual klass or unresolved klass. If it is 2300 // unresolved we must resolve it. 2301 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { 2302 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); 2303 } 2304 Klass* klassOf = (Klass*) METHOD->constants()->resolved_klass_at(index); 2305 Klass* objKlass = STACK_OBJECT(-1)->klass(); 2306 // 2307 // Check for compatibilty. This check must not GC!! 2308 // Seems way more expensive now that we must dispatch. 2309 // 2310 if ( objKlass == klassOf || objKlass->is_subtype_of(klassOf)) { 2311 SET_STACK_INT(1, -1); 2312 } else { 2313 SET_STACK_INT(0, -1); 2314 // Decrement counter at checkcast. 2315 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass); 2316 } 2317 // Profile instanceof with null_seen and receiver. 2318 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/false, objKlass); 2319 } 2320 UPDATE_PC_AND_CONTINUE(3); 2321 2322 CASE(_ldc_w): 2323 CASE(_ldc): 2324 { 2325 u2 index; 2326 bool wide = false; 2327 int incr = 2; // frequent case 2328 if (opcode == Bytecodes::_ldc) { 2329 index = pc[1]; 2330 } else { 2331 index = Bytes::get_Java_u2(pc+1); 2332 incr = 3; 2333 wide = true; 2334 } 2335 2336 ConstantPool* constants = METHOD->constants(); 2337 switch (constants->tag_at(index).value()) { 2338 case JVM_CONSTANT_Integer: 2339 SET_STACK_INT(constants->int_at(index), 0); 2340 break; 2341 2342 case JVM_CONSTANT_Float: 2343 SET_STACK_FLOAT(constants->float_at(index), 0); 2344 break; 2345 2346 case JVM_CONSTANT_String: 2347 { 2348 oop result = constants->resolved_references()->obj_at(index); 2349 if (result == NULL) { 2350 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception); 2351 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2352 THREAD->set_vm_result(NULL); 2353 } else { 2354 VERIFY_OOP(result); 2355 SET_STACK_OBJECT(result, 0); 2356 } 2357 break; 2358 } 2359 2360 case JVM_CONSTANT_Class: 2361 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror()); 2362 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0); 2363 break; 2364 2365 case JVM_CONSTANT_UnresolvedClass: 2366 case JVM_CONSTANT_UnresolvedClassInError: 2367 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception); 2368 SET_STACK_OBJECT(THREAD->vm_result(), 0); 2369 THREAD->set_vm_result(NULL); 2370 break; 2371 2372 default: ShouldNotReachHere(); 2373 } 2374 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2375 } 2376 2377 CASE(_ldc2_w): 2378 { 2379 u2 index = Bytes::get_Java_u2(pc+1); 2380 2381 ConstantPool* constants = METHOD->constants(); 2382 switch (constants->tag_at(index).value()) { 2383 2384 case JVM_CONSTANT_Long: 2385 SET_STACK_LONG(constants->long_at(index), 1); 2386 break; 2387 2388 case JVM_CONSTANT_Double: 2389 SET_STACK_DOUBLE(constants->double_at(index), 1); 2390 break; 2391 default: ShouldNotReachHere(); 2392 } 2393 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2); 2394 } 2395 2396 CASE(_fast_aldc_w): 2397 CASE(_fast_aldc): { 2398 u2 index; 2399 int incr; 2400 if (opcode == Bytecodes::_fast_aldc) { 2401 index = pc[1]; 2402 incr = 2; 2403 } else { 2404 index = Bytes::get_native_u2(pc+1); 2405 incr = 3; 2406 } 2407 2408 // We are resolved if the f1 field contains a non-null object (CallSite, etc.) 2409 // This kind of CP cache entry does not need to match the flags byte, because 2410 // there is a 1-1 relation between bytecode type and CP entry type. 2411 ConstantPool* constants = METHOD->constants(); 2412 oop result = constants->resolved_references()->obj_at(index); 2413 if (result == NULL) { 2414 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), 2415 handle_exception); 2416 result = THREAD->vm_result(); 2417 } 2418 2419 VERIFY_OOP(result); 2420 SET_STACK_OBJECT(result, 0); 2421 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); 2422 } 2423 2424 CASE(_invokedynamic): { 2425 2426 u4 index = Bytes::get_native_u4(pc+1); 2427 ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2428 2429 // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.) 2430 // This kind of CP cache entry does not need to match the flags byte, because 2431 // there is a 1-1 relation between bytecode type and CP entry type. 2432 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2433 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2434 handle_exception); 2435 cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); 2436 } 2437 2438 Method* method = cache->f1_as_method(); 2439 if (VerifyOops) method->verify(); 2440 2441 if (cache->has_appendix()) { 2442 ConstantPool* constants = METHOD->constants(); 2443 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2444 MORE_STACK(1); 2445 } 2446 2447 istate->set_msg(call_method); 2448 istate->set_callee(method); 2449 istate->set_callee_entry_point(method->from_interpreted_entry()); 2450 istate->set_bcp_advance(5); 2451 2452 // Invokedynamic has got a call counter, just like an invokestatic -> increment! 2453 BI_PROFILE_UPDATE_CALL(); 2454 2455 UPDATE_PC_AND_RETURN(0); // I'll be back... 2456 } 2457 2458 CASE(_invokehandle): { 2459 2460 u2 index = Bytes::get_native_u2(pc+1); 2461 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2462 2463 if (! cache->is_resolved((Bytecodes::Code) opcode)) { 2464 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2465 handle_exception); 2466 cache = cp->entry_at(index); 2467 } 2468 2469 Method* method = cache->f1_as_method(); 2470 if (VerifyOops) method->verify(); 2471 2472 if (cache->has_appendix()) { 2473 ConstantPool* constants = METHOD->constants(); 2474 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); 2475 MORE_STACK(1); 2476 } 2477 2478 istate->set_msg(call_method); 2479 istate->set_callee(method); 2480 istate->set_callee_entry_point(method->from_interpreted_entry()); 2481 istate->set_bcp_advance(3); 2482 2483 // Invokehandle has got a call counter, just like a final call -> increment! 2484 BI_PROFILE_UPDATE_FINALCALL(); 2485 2486 UPDATE_PC_AND_RETURN(0); // I'll be back... 2487 } 2488 2489 CASE(_invokeinterface): { 2490 u2 index = Bytes::get_native_u2(pc+1); 2491 2492 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2493 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2494 2495 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2496 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2497 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2498 handle_exception); 2499 cache = cp->entry_at(index); 2500 } 2501 2502 istate->set_msg(call_method); 2503 2504 // Special case of invokeinterface called for virtual method of 2505 // java.lang.Object. See cpCacheOop.cpp for details. 2506 // This code isn't produced by javac, but could be produced by 2507 // another compliant java compiler. 2508 if (cache->is_forced_virtual()) { 2509 Method* callee; 2510 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2511 if (cache->is_vfinal()) { 2512 callee = cache->f2_as_vfinal_method(); 2513 // Profile 'special case of invokeinterface' final call. 2514 BI_PROFILE_UPDATE_FINALCALL(); 2515 } else { 2516 // Get receiver. 2517 int parms = cache->parameter_size(); 2518 // Same comments as invokevirtual apply here. 2519 oop rcvr = STACK_OBJECT(-parms); 2520 VERIFY_OOP(rcvr); 2521 Klass* rcvrKlass = rcvr->klass(); 2522 callee = (Method*) rcvrKlass->method_at_vtable(cache->f2_as_index()); 2523 // Profile 'special case of invokeinterface' virtual call. 2524 BI_PROFILE_UPDATE_VIRTUALCALL(rcvrKlass); 2525 } 2526 istate->set_callee(callee); 2527 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2528 #ifdef VM_JVMTI 2529 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2530 istate->set_callee_entry_point(callee->interpreter_entry()); 2531 } 2532 #endif /* VM_JVMTI */ 2533 istate->set_bcp_advance(5); 2534 UPDATE_PC_AND_RETURN(0); // I'll be back... 2535 } 2536 2537 // this could definitely be cleaned up QQQ 2538 Method* callee; 2539 Klass* iclass = cache->f1_as_klass(); 2540 // InstanceKlass* interface = (InstanceKlass*) iclass; 2541 // get receiver 2542 int parms = cache->parameter_size(); 2543 oop rcvr = STACK_OBJECT(-parms); 2544 CHECK_NULL(rcvr); 2545 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass(); 2546 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable(); 2547 int i; 2548 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) { 2549 if (ki->interface_klass() == iclass) break; 2550 } 2551 // If the interface isn't found, this class doesn't implement this 2552 // interface. The link resolver checks this but only for the first 2553 // time this interface is called. 2554 if (i == int2->itable_length()) { 2555 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "", note_no_trap); 2556 } 2557 int mindex = cache->f2_as_index(); 2558 itableMethodEntry* im = ki->first_method_entry(rcvr->klass()); 2559 callee = im[mindex].method(); 2560 if (callee == NULL) { 2561 VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "", note_no_trap); 2562 } 2563 2564 // Profile virtual call. 2565 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass()); 2566 2567 istate->set_callee(callee); 2568 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2569 #ifdef VM_JVMTI 2570 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2571 istate->set_callee_entry_point(callee->interpreter_entry()); 2572 } 2573 #endif /* VM_JVMTI */ 2574 istate->set_bcp_advance(5); 2575 UPDATE_PC_AND_RETURN(0); // I'll be back... 2576 } 2577 2578 CASE(_invokevirtual): 2579 CASE(_invokespecial): 2580 CASE(_invokestatic): { 2581 u2 index = Bytes::get_native_u2(pc+1); 2582 2583 ConstantPoolCacheEntry* cache = cp->entry_at(index); 2584 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases 2585 // out so c++ compiler has a chance for constant prop to fold everything possible away. 2586 2587 if (!cache->is_resolved((Bytecodes::Code)opcode)) { 2588 CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, (Bytecodes::Code)opcode), 2589 handle_exception); 2590 cache = cp->entry_at(index); 2591 } 2592 2593 istate->set_msg(call_method); 2594 { 2595 Method* callee; 2596 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) { 2597 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2598 if (cache->is_vfinal()) { 2599 callee = cache->f2_as_vfinal_method(); 2600 // Profile final call. 2601 BI_PROFILE_UPDATE_FINALCALL(); 2602 } else { 2603 // get receiver 2604 int parms = cache->parameter_size(); 2605 // this works but needs a resourcemark and seems to create a vtable on every call: 2606 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index()); 2607 // 2608 // this fails with an assert 2609 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass()); 2610 // but this works 2611 oop rcvr = STACK_OBJECT(-parms); 2612 VERIFY_OOP(rcvr); 2613 Klass* rcvrKlass = rcvr->klass(); 2614 /* 2615 Executing this code in java.lang.String: 2616 public String(char value[]) { 2617 this.count = value.length; 2618 this.value = (char[])value.clone(); 2619 } 2620 2621 a find on rcvr->klass() reports: 2622 {type array char}{type array class} 2623 - klass: {other class} 2624 2625 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure 2626 because rcvr->klass()->is_instance_klass() == 0 2627 However it seems to have a vtable in the right location. Huh? 2628 Because vtables have the same offset for ArrayKlass and InstanceKlass. 2629 */ 2630 callee = (Method*) rcvrKlass->method_at_vtable(cache->f2_as_index()); 2631 // Profile virtual call. 2632 BI_PROFILE_UPDATE_VIRTUALCALL(rcvrKlass); 2633 } 2634 } else { 2635 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) { 2636 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); 2637 } 2638 callee = cache->f1_as_method(); 2639 2640 // Profile call. 2641 BI_PROFILE_UPDATE_CALL(); 2642 } 2643 2644 istate->set_callee(callee); 2645 istate->set_callee_entry_point(callee->from_interpreted_entry()); 2646 #ifdef VM_JVMTI 2647 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { 2648 istate->set_callee_entry_point(callee->interpreter_entry()); 2649 } 2650 #endif /* VM_JVMTI */ 2651 istate->set_bcp_advance(3); 2652 UPDATE_PC_AND_RETURN(0); // I'll be back... 2653 } 2654 } 2655 2656 /* Allocate memory for a new java object. */ 2657 2658 CASE(_newarray): { 2659 BasicType atype = (BasicType) *(pc+1); 2660 jint size = STACK_INT(-1); 2661 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size), 2662 handle_exception); 2663 // Must prevent reordering of stores for object initialization 2664 // with stores that publish the new object. 2665 OrderAccess::storestore(); 2666 SET_STACK_OBJECT(THREAD->vm_result(), -1); 2667 THREAD->set_vm_result(NULL); 2668 2669 UPDATE_PC_AND_CONTINUE(2); 2670 } 2671 2672 /* Throw an exception. */ 2673 2674 CASE(_athrow): { 2675 oop except_oop = STACK_OBJECT(-1); 2676 CHECK_NULL(except_oop); 2677 // set pending_exception so we use common code 2678 THREAD->set_pending_exception(except_oop, NULL, 0); 2679 goto handle_exception; 2680 } 2681 2682 /* goto and jsr. They are exactly the same except jsr pushes 2683 * the address of the next instruction first. 2684 */ 2685 2686 CASE(_jsr): { 2687 /* push bytecode index on stack */ 2688 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0); 2689 MORE_STACK(1); 2690 /* FALL THROUGH */ 2691 } 2692 2693 CASE(_goto): 2694 { 2695 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1); 2696 // Profile jump. 2697 BI_PROFILE_UPDATE_JUMP(); 2698 address branch_pc = pc; 2699 UPDATE_PC(offset); 2700 DO_BACKEDGE_CHECKS(offset, branch_pc); 2701 CONTINUE; 2702 } 2703 2704 CASE(_jsr_w): { 2705 /* push return address on the stack */ 2706 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0); 2707 MORE_STACK(1); 2708 /* FALL THROUGH */ 2709 } 2710 2711 CASE(_goto_w): 2712 { 2713 int32_t offset = Bytes::get_Java_u4(pc + 1); 2714 // Profile jump. 2715 BI_PROFILE_UPDATE_JUMP(); 2716 address branch_pc = pc; 2717 UPDATE_PC(offset); 2718 DO_BACKEDGE_CHECKS(offset, branch_pc); 2719 CONTINUE; 2720 } 2721 2722 /* return from a jsr or jsr_w */ 2723 2724 CASE(_ret): { 2725 // Profile ret. 2726 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(pc[1])))); 2727 // Now, update the pc. 2728 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1])); 2729 UPDATE_PC_AND_CONTINUE(0); 2730 } 2731 2732 /* debugger breakpoint */ 2733 2734 CASE(_breakpoint): { 2735 Bytecodes::Code original_bytecode; 2736 DECACHE_STATE(); 2737 SET_LAST_JAVA_FRAME(); 2738 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD, 2739 METHOD, pc); 2740 RESET_LAST_JAVA_FRAME(); 2741 CACHE_STATE(); 2742 if (THREAD->has_pending_exception()) goto handle_exception; 2743 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc), 2744 handle_exception); 2745 2746 opcode = (jubyte)original_bytecode; 2747 goto opcode_switch; 2748 } 2749 2750 DEFAULT: 2751 fatal("Unimplemented opcode %d = %s", opcode, 2752 Bytecodes::name((Bytecodes::Code)opcode)); 2753 goto finish; 2754 2755 } /* switch(opc) */ 2756 2757 2758 #ifdef USELABELS 2759 check_for_exception: 2760 #endif 2761 { 2762 if (!THREAD->has_pending_exception()) { 2763 CONTINUE; 2764 } 2765 /* We will be gcsafe soon, so flush our state. */ 2766 DECACHE_PC(); 2767 goto handle_exception; 2768 } 2769 do_continue: ; 2770 2771 } /* while (1) interpreter loop */ 2772 2773 2774 // An exception exists in the thread state see whether this activation can handle it 2775 handle_exception: { 2776 2777 HandleMarkCleaner __hmc(THREAD); 2778 Handle except_oop(THREAD, THREAD->pending_exception()); 2779 // Prevent any subsequent HandleMarkCleaner in the VM 2780 // from freeing the except_oop handle. 2781 HandleMark __hm(THREAD); 2782 2783 THREAD->clear_pending_exception(); 2784 assert(except_oop(), "No exception to process"); 2785 intptr_t continuation_bci; 2786 // expression stack is emptied 2787 topOfStack = istate->stack_base() - Interpreter::stackElementWords; 2788 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()), 2789 handle_exception); 2790 2791 except_oop = Handle(THREAD, THREAD->vm_result()); 2792 THREAD->set_vm_result(NULL); 2793 if (continuation_bci >= 0) { 2794 // Place exception on top of stack 2795 SET_STACK_OBJECT(except_oop(), 0); 2796 MORE_STACK(1); 2797 pc = METHOD->code_base() + continuation_bci; 2798 if (log_is_enabled(Info, exceptions)) { 2799 ResourceMark rm(THREAD); 2800 stringStream tempst; 2801 tempst.print("interpreter method <%s>\n" 2802 " at bci %d, continuing at %d for thread " INTPTR_FORMAT, 2803 METHOD->print_value_string(), 2804 (int)(istate->bcp() - METHOD->code_base()), 2805 (int)continuation_bci, p2i(THREAD)); 2806 Exceptions::log_exception(except_oop, tempst); 2807 } 2808 // for AbortVMOnException flag 2809 Exceptions::debug_check_abort(except_oop); 2810 2811 // Update profiling data. 2812 BI_PROFILE_ALIGN_TO_CURRENT_BCI(); 2813 goto run; 2814 } 2815 if (log_is_enabled(Info, exceptions)) { 2816 ResourceMark rm; 2817 stringStream tempst; 2818 tempst.print("interpreter method <%s>\n" 2819 " at bci %d, unwinding for thread " INTPTR_FORMAT, 2820 METHOD->print_value_string(), 2821 (int)(istate->bcp() - METHOD->code_base()), 2822 p2i(THREAD)); 2823 Exceptions::log_exception(except_oop, tempst); 2824 } 2825 // for AbortVMOnException flag 2826 Exceptions::debug_check_abort(except_oop); 2827 2828 // No handler in this activation, unwind and try again 2829 THREAD->set_pending_exception(except_oop(), NULL, 0); 2830 goto handle_return; 2831 } // handle_exception: 2832 2833 // Return from an interpreter invocation with the result of the interpretation 2834 // on the top of the Java Stack (or a pending exception) 2835 2836 handle_Pop_Frame: { 2837 2838 // We don't really do anything special here except we must be aware 2839 // that we can get here without ever locking the method (if sync). 2840 // Also we skip the notification of the exit. 2841 2842 istate->set_msg(popping_frame); 2843 // Clear pending so while the pop is in process 2844 // we don't start another one if a call_vm is done. 2845 THREAD->clr_pop_frame_pending(); 2846 // Let interpreter (only) see the we're in the process of popping a frame 2847 THREAD->set_pop_frame_in_process(); 2848 2849 goto handle_return; 2850 2851 } // handle_Pop_Frame 2852 2853 // ForceEarlyReturn ends a method, and returns to the caller with a return value 2854 // given by the invoker of the early return. 2855 handle_Early_Return: { 2856 2857 istate->set_msg(early_return); 2858 2859 // Clear expression stack. 2860 topOfStack = istate->stack_base() - Interpreter::stackElementWords; 2861 2862 JvmtiThreadState *ts = THREAD->jvmti_thread_state(); 2863 2864 // Push the value to be returned. 2865 switch (istate->method()->result_type()) { 2866 case T_BOOLEAN: 2867 case T_SHORT: 2868 case T_BYTE: 2869 case T_CHAR: 2870 case T_INT: 2871 SET_STACK_INT(ts->earlyret_value().i, 0); 2872 MORE_STACK(1); 2873 break; 2874 case T_LONG: 2875 SET_STACK_LONG(ts->earlyret_value().j, 1); 2876 MORE_STACK(2); 2877 break; 2878 case T_FLOAT: 2879 SET_STACK_FLOAT(ts->earlyret_value().f, 0); 2880 MORE_STACK(1); 2881 break; 2882 case T_DOUBLE: 2883 SET_STACK_DOUBLE(ts->earlyret_value().d, 1); 2884 MORE_STACK(2); 2885 break; 2886 case T_ARRAY: 2887 case T_OBJECT: 2888 SET_STACK_OBJECT(ts->earlyret_oop(), 0); 2889 MORE_STACK(1); 2890 break; 2891 } 2892 2893 ts->clr_earlyret_value(); 2894 ts->set_earlyret_oop(NULL); 2895 ts->clr_earlyret_pending(); 2896 2897 // Fall through to handle_return. 2898 2899 } // handle_Early_Return 2900 2901 handle_return: { 2902 // A storestore barrier is required to order initialization of 2903 // final fields with publishing the reference to the object that 2904 // holds the field. Without the barrier the value of final fields 2905 // can be observed to change. 2906 OrderAccess::storestore(); 2907 2908 DECACHE_STATE(); 2909 2910 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return; 2911 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame; 2912 Handle original_exception(THREAD, THREAD->pending_exception()); 2913 Handle illegal_state_oop(THREAD, NULL); 2914 2915 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner 2916 // in any following VM entries from freeing our live handles, but illegal_state_oop 2917 // isn't really allocated yet and so doesn't become live until later and 2918 // in unpredicatable places. Instead we must protect the places where we enter the 2919 // VM. It would be much simpler (and safer) if we could allocate a real handle with 2920 // a NULL oop in it and then overwrite the oop later as needed. This isn't 2921 // unfortunately isn't possible. 2922 2923 THREAD->clear_pending_exception(); 2924 2925 // 2926 // As far as we are concerned we have returned. If we have a pending exception 2927 // that will be returned as this invocation's result. However if we get any 2928 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions 2929 // will be our final result (i.e. monitor exception trumps a pending exception). 2930 // 2931 2932 // If we never locked the method (or really passed the point where we would have), 2933 // there is no need to unlock it (or look for other monitors), since that 2934 // could not have happened. 2935 2936 if (THREAD->do_not_unlock()) { 2937 2938 // Never locked, reset the flag now because obviously any caller must 2939 // have passed their point of locking for us to have gotten here. 2940 2941 THREAD->clr_do_not_unlock(); 2942 } else { 2943 // At this point we consider that we have returned. We now check that the 2944 // locks were properly block structured. If we find that they were not 2945 // used properly we will return with an illegal monitor exception. 2946 // The exception is checked by the caller not the callee since this 2947 // checking is considered to be part of the invocation and therefore 2948 // in the callers scope (JVM spec 8.13). 2949 // 2950 // Another weird thing to watch for is if the method was locked 2951 // recursively and then not exited properly. This means we must 2952 // examine all the entries in reverse time(and stack) order and 2953 // unlock as we find them. If we find the method monitor before 2954 // we are at the initial entry then we should throw an exception. 2955 // It is not clear the template based interpreter does this 2956 // correctly 2957 2958 BasicObjectLock* base = istate->monitor_base(); 2959 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base(); 2960 bool method_unlock_needed = METHOD->is_synchronized(); 2961 // We know the initial monitor was used for the method don't check that 2962 // slot in the loop 2963 if (method_unlock_needed) base--; 2964 2965 // Check all the monitors to see they are unlocked. Install exception if found to be locked. 2966 while (end < base) { 2967 oop lockee = end->obj(); 2968 if (lockee != NULL) { 2969 BasicLock* lock = end->lock(); 2970 markOop header = lock->displaced_header(); 2971 end->set_obj(NULL); 2972 2973 if (!lockee->mark()->has_bias_pattern()) { 2974 // If it isn't recursive we either must swap old header or call the runtime 2975 if (header != NULL) { 2976 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { 2977 // restore object for the slow case 2978 end->set_obj(lockee); 2979 { 2980 // Prevent any HandleMarkCleaner from freeing our live handles 2981 HandleMark __hm(THREAD); 2982 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end)); 2983 } 2984 } 2985 } 2986 } 2987 // One error is plenty 2988 if (illegal_state_oop() == NULL && !suppress_error) { 2989 { 2990 // Prevent any HandleMarkCleaner from freeing our live handles 2991 HandleMark __hm(THREAD); 2992 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 2993 } 2994 assert(THREAD->has_pending_exception(), "Lost our exception!"); 2995 illegal_state_oop = Handle(THREAD, THREAD->pending_exception()); 2996 THREAD->clear_pending_exception(); 2997 } 2998 } 2999 end++; 3000 } 3001 // Unlock the method if needed 3002 if (method_unlock_needed) { 3003 if (base->obj() == NULL) { 3004 // The method is already unlocked this is not good. 3005 if (illegal_state_oop() == NULL && !suppress_error) { 3006 { 3007 // Prevent any HandleMarkCleaner from freeing our live handles 3008 HandleMark __hm(THREAD); 3009 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); 3010 } 3011 assert(THREAD->has_pending_exception(), "Lost our exception!"); 3012 illegal_state_oop = Handle(THREAD, THREAD->pending_exception()); 3013 THREAD->clear_pending_exception(); 3014 } 3015 } else { 3016 // 3017 // The initial monitor is always used for the method 3018 // However if that slot is no longer the oop for the method it was unlocked 3019 // and reused by something that wasn't unlocked! 3020 // 3021 // deopt can come in with rcvr dead because c2 knows 3022 // its value is preserved in the monitor. So we can't use locals[0] at all 3023 // and must use first monitor slot. 3024 // 3025 oop rcvr = base->obj(); 3026 if (rcvr == NULL) { 3027 if (!suppress_error) { 3028 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap); 3029 illegal_state_oop = Handle(THREAD, THREAD->pending_exception()); 3030 THREAD->clear_pending_exception(); 3031 } 3032 } else if (UseHeavyMonitors) { 3033 { 3034 // Prevent any HandleMarkCleaner from freeing our live handles. 3035 HandleMark __hm(THREAD); 3036 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 3037 } 3038 if (THREAD->has_pending_exception()) { 3039 if (!suppress_error) illegal_state_oop = Handle(THREAD, THREAD->pending_exception()); 3040 THREAD->clear_pending_exception(); 3041 } 3042 } else { 3043 BasicLock* lock = base->lock(); 3044 markOop header = lock->displaced_header(); 3045 base->set_obj(NULL); 3046 3047 if (!rcvr->mark()->has_bias_pattern()) { 3048 base->set_obj(NULL); 3049 // If it isn't recursive we either must swap old header or call the runtime 3050 if (header != NULL) { 3051 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) { 3052 // restore object for the slow case 3053 base->set_obj(rcvr); 3054 { 3055 // Prevent any HandleMarkCleaner from freeing our live handles 3056 HandleMark __hm(THREAD); 3057 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); 3058 } 3059 if (THREAD->has_pending_exception()) { 3060 if (!suppress_error) illegal_state_oop = Handle(THREAD, THREAD->pending_exception()); 3061 THREAD->clear_pending_exception(); 3062 } 3063 } 3064 } 3065 } 3066 } 3067 } 3068 } 3069 } 3070 // Clear the do_not_unlock flag now. 3071 THREAD->clr_do_not_unlock(); 3072 3073 // 3074 // Notify jvmti/jvmdi 3075 // 3076 // NOTE: we do not notify a method_exit if we have a pending exception, 3077 // including an exception we generate for unlocking checks. In the former 3078 // case, JVMDI has already been notified by our call for the exception handler 3079 // and in both cases as far as JVMDI is concerned we have already returned. 3080 // If we notify it again JVMDI will be all confused about how many frames 3081 // are still on the stack (4340444). 3082 // 3083 // NOTE Further! It turns out the the JVMTI spec in fact expects to see 3084 // method_exit events whenever we leave an activation unless it was done 3085 // for popframe. This is nothing like jvmdi. However we are passing the 3086 // tests at the moment (apparently because they are jvmdi based) so rather 3087 // than change this code and possibly fail tests we will leave it alone 3088 // (with this note) in anticipation of changing the vm and the tests 3089 // simultaneously. 3090 3091 3092 // 3093 suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL; 3094 3095 3096 3097 #ifdef VM_JVMTI 3098 if (_jvmti_interp_events) { 3099 // Whenever JVMTI puts a thread in interp_only_mode, method 3100 // entry/exit events are sent for that thread to track stack depth. 3101 if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) { 3102 { 3103 // Prevent any HandleMarkCleaner from freeing our live handles 3104 HandleMark __hm(THREAD); 3105 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD)); 3106 } 3107 } 3108 } 3109 #endif /* VM_JVMTI */ 3110 3111 // 3112 // See if we are returning any exception 3113 // A pending exception that was pending prior to a possible popping frame 3114 // overrides the popping frame. 3115 // 3116 assert(!suppress_error || (suppress_error && illegal_state_oop() == NULL), "Error was not suppressed"); 3117 if (illegal_state_oop() != NULL || original_exception() != NULL) { 3118 // Inform the frame manager we have no result. 3119 istate->set_msg(throwing_exception); 3120 if (illegal_state_oop() != NULL) 3121 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0); 3122 else 3123 THREAD->set_pending_exception(original_exception(), NULL, 0); 3124 UPDATE_PC_AND_RETURN(0); 3125 } 3126 3127 if (istate->msg() == popping_frame) { 3128 // Make it simpler on the assembly code and set the message for the frame pop. 3129 // returns 3130 if (istate->prev() == NULL) { 3131 // We must be returning to a deoptimized frame (because popframe only happens between 3132 // two interpreted frames). We need to save the current arguments in C heap so that 3133 // the deoptimized frame when it restarts can copy the arguments to its expression 3134 // stack and re-execute the call. We also have to notify deoptimization that this 3135 // has occurred and to pick the preserved args copy them to the deoptimized frame's 3136 // java expression stack. Yuck. 3137 // 3138 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize), 3139 LOCALS_SLOT(METHOD->size_of_parameters() - 1)); 3140 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit); 3141 } 3142 } else { 3143 istate->set_msg(return_from_method); 3144 } 3145 3146 // Normal return 3147 // Advance the pc and return to frame manager 3148 UPDATE_PC_AND_RETURN(1); 3149 } /* handle_return: */ 3150 3151 // This is really a fatal error return 3152 3153 finish: 3154 DECACHE_TOS(); 3155 DECACHE_PC(); 3156 3157 return; 3158 } 3159 3160 /* 3161 * All the code following this point is only produced once and is not present 3162 * in the JVMTI version of the interpreter 3163 */ 3164 3165 #ifndef VM_JVMTI 3166 3167 // This constructor should only be used to contruct the object to signal 3168 // interpreter initialization. All other instances should be created by 3169 // the frame manager. 3170 BytecodeInterpreter::BytecodeInterpreter(messages msg) { 3171 if (msg != initialize) ShouldNotReachHere(); 3172 _msg = msg; 3173 _self_link = this; 3174 _prev_link = NULL; 3175 } 3176 3177 // Inline static functions for Java Stack and Local manipulation 3178 3179 // The implementations are platform dependent. We have to worry about alignment 3180 // issues on some machines which can change on the same platform depending on 3181 // whether it is an LP64 machine also. 3182 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) { 3183 return (address) tos[Interpreter::expr_index_at(-offset)]; 3184 } 3185 3186 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) { 3187 return *((jint*) &tos[Interpreter::expr_index_at(-offset)]); 3188 } 3189 3190 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) { 3191 return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]); 3192 } 3193 3194 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) { 3195 return cast_to_oop(tos [Interpreter::expr_index_at(-offset)]); 3196 } 3197 3198 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) { 3199 return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d; 3200 } 3201 3202 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) { 3203 return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l; 3204 } 3205 3206 // only used for value types 3207 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value, 3208 int offset) { 3209 *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3210 } 3211 3212 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value, 3213 int offset) { 3214 *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3215 } 3216 3217 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value, 3218 int offset) { 3219 *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3220 } 3221 3222 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value, 3223 int offset) { 3224 *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value; 3225 } 3226 3227 // needs to be platform dep for the 32 bit platforms. 3228 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value, 3229 int offset) { 3230 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value; 3231 } 3232 3233 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos, 3234 address addr, int offset) { 3235 (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = 3236 ((VMJavaVal64*)addr)->d); 3237 } 3238 3239 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value, 3240 int offset) { 3241 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3242 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value; 3243 } 3244 3245 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos, 3246 address addr, int offset) { 3247 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; 3248 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = 3249 ((VMJavaVal64*)addr)->l; 3250 } 3251 3252 // Locals 3253 3254 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) { 3255 return (address)locals[Interpreter::local_index_at(-offset)]; 3256 } 3257 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) { 3258 return (jint)locals[Interpreter::local_index_at(-offset)]; 3259 } 3260 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) { 3261 return (jfloat)locals[Interpreter::local_index_at(-offset)]; 3262 } 3263 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) { 3264 return cast_to_oop(locals[Interpreter::local_index_at(-offset)]); 3265 } 3266 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) { 3267 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d; 3268 } 3269 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) { 3270 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l; 3271 } 3272 3273 // Returns the address of locals value. 3274 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) { 3275 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3276 } 3277 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) { 3278 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); 3279 } 3280 3281 // Used for local value or returnAddress 3282 void BytecodeInterpreter::set_locals_slot(intptr_t *locals, 3283 address value, int offset) { 3284 *((address*)&locals[Interpreter::local_index_at(-offset)]) = value; 3285 } 3286 void BytecodeInterpreter::set_locals_int(intptr_t *locals, 3287 jint value, int offset) { 3288 *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value; 3289 } 3290 void BytecodeInterpreter::set_locals_float(intptr_t *locals, 3291 jfloat value, int offset) { 3292 *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value; 3293 } 3294 void BytecodeInterpreter::set_locals_object(intptr_t *locals, 3295 oop value, int offset) { 3296 *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value; 3297 } 3298 void BytecodeInterpreter::set_locals_double(intptr_t *locals, 3299 jdouble value, int offset) { 3300 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value; 3301 } 3302 void BytecodeInterpreter::set_locals_long(intptr_t *locals, 3303 jlong value, int offset) { 3304 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value; 3305 } 3306 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals, 3307 address addr, int offset) { 3308 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d; 3309 } 3310 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals, 3311 address addr, int offset) { 3312 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l; 3313 } 3314 3315 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset, 3316 intptr_t* locals, int locals_offset) { 3317 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)]; 3318 locals[Interpreter::local_index_at(-locals_offset)] = value; 3319 } 3320 3321 3322 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset, 3323 int to_offset) { 3324 tos[Interpreter::expr_index_at(-to_offset)] = 3325 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)]; 3326 } 3327 3328 void BytecodeInterpreter::dup(intptr_t *tos) { 3329 copy_stack_slot(tos, -1, 0); 3330 } 3331 void BytecodeInterpreter::dup2(intptr_t *tos) { 3332 copy_stack_slot(tos, -2, 0); 3333 copy_stack_slot(tos, -1, 1); 3334 } 3335 3336 void BytecodeInterpreter::dup_x1(intptr_t *tos) { 3337 /* insert top word two down */ 3338 copy_stack_slot(tos, -1, 0); 3339 copy_stack_slot(tos, -2, -1); 3340 copy_stack_slot(tos, 0, -2); 3341 } 3342 3343 void BytecodeInterpreter::dup_x2(intptr_t *tos) { 3344 /* insert top word three down */ 3345 copy_stack_slot(tos, -1, 0); 3346 copy_stack_slot(tos, -2, -1); 3347 copy_stack_slot(tos, -3, -2); 3348 copy_stack_slot(tos, 0, -3); 3349 } 3350 void BytecodeInterpreter::dup2_x1(intptr_t *tos) { 3351 /* insert top 2 slots three down */ 3352 copy_stack_slot(tos, -1, 1); 3353 copy_stack_slot(tos, -2, 0); 3354 copy_stack_slot(tos, -3, -1); 3355 copy_stack_slot(tos, 1, -2); 3356 copy_stack_slot(tos, 0, -3); 3357 } 3358 void BytecodeInterpreter::dup2_x2(intptr_t *tos) { 3359 /* insert top 2 slots four down */ 3360 copy_stack_slot(tos, -1, 1); 3361 copy_stack_slot(tos, -2, 0); 3362 copy_stack_slot(tos, -3, -1); 3363 copy_stack_slot(tos, -4, -2); 3364 copy_stack_slot(tos, 1, -3); 3365 copy_stack_slot(tos, 0, -4); 3366 } 3367 3368 3369 void BytecodeInterpreter::swap(intptr_t *tos) { 3370 // swap top two elements 3371 intptr_t val = tos[Interpreter::expr_index_at(1)]; 3372 // Copy -2 entry to -1 3373 copy_stack_slot(tos, -2, -1); 3374 // Store saved -1 entry into -2 3375 tos[Interpreter::expr_index_at(2)] = val; 3376 } 3377 // -------------------------------------------------------------------------------- 3378 // Non-product code 3379 #ifndef PRODUCT 3380 3381 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) { 3382 switch (msg) { 3383 case BytecodeInterpreter::no_request: return("no_request"); 3384 case BytecodeInterpreter::initialize: return("initialize"); 3385 // status message to C++ interpreter 3386 case BytecodeInterpreter::method_entry: return("method_entry"); 3387 case BytecodeInterpreter::method_resume: return("method_resume"); 3388 case BytecodeInterpreter::got_monitors: return("got_monitors"); 3389 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception"); 3390 // requests to frame manager from C++ interpreter 3391 case BytecodeInterpreter::call_method: return("call_method"); 3392 case BytecodeInterpreter::return_from_method: return("return_from_method"); 3393 case BytecodeInterpreter::more_monitors: return("more_monitors"); 3394 case BytecodeInterpreter::throwing_exception: return("throwing_exception"); 3395 case BytecodeInterpreter::popping_frame: return("popping_frame"); 3396 case BytecodeInterpreter::do_osr: return("do_osr"); 3397 // deopt 3398 case BytecodeInterpreter::deopt_resume: return("deopt_resume"); 3399 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2"); 3400 default: return("BAD MSG"); 3401 } 3402 } 3403 void 3404 BytecodeInterpreter::print() { 3405 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread); 3406 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp); 3407 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals); 3408 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants); 3409 { 3410 ResourceMark rm; 3411 char *method_name = _method->name_and_sig_as_C_string(); 3412 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name); 3413 } 3414 tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx); 3415 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack); 3416 tty->print_cr("msg: %s", C_msg(this->_msg)); 3417 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee); 3418 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point); 3419 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance); 3420 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf); 3421 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry); 3422 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link); 3423 tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) p2i(this->_oop_temp)); 3424 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base); 3425 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit); 3426 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base); 3427 #ifdef SPARC 3428 tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc); 3429 tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom); 3430 tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult); 3431 tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult); 3432 #endif 3433 #if !defined(ZERO) && defined(PPC) 3434 tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp); 3435 #endif // !ZERO 3436 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link); 3437 } 3438 3439 extern "C" { 3440 void PI(uintptr_t arg) { 3441 ((BytecodeInterpreter*)arg)->print(); 3442 } 3443 } 3444 #endif // PRODUCT 3445 3446 #endif // JVMTI 3447 #endif // CC_INTERP